1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 
239 	if (priv->plat->has_sun8i) {
240 		if (clk_rate > 160000000)
241 			priv->clk_csr = 0x03;
242 		else if (clk_rate > 80000000)
243 			priv->clk_csr = 0x02;
244 		else if (clk_rate > 40000000)
245 			priv->clk_csr = 0x01;
246 		else
247 			priv->clk_csr = 0;
248 	}
249 }
250 
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256 
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 	u32 avail;
261 
262 	if (tx_q->dirty_tx > tx_q->cur_tx)
263 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 	else
265 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266 
267 	return avail;
268 }
269 
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 	u32 dirty;
279 
280 	if (rx_q->dirty_rx <= rx_q->cur_rx)
281 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 	else
283 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284 
285 	return dirty;
286 }
287 
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 	struct net_device *ndev = priv->dev;
297 	struct phy_device *phydev = ndev->phydev;
298 
299 	if (likely(priv->plat->fix_mac_speed))
300 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302 
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
312 	u32 queue;
313 
314 	/* check if all TX queues have the work finished */
315 	for (queue = 0; queue < tx_cnt; queue++) {
316 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317 
318 		if (tx_q->dirty_tx != tx_q->cur_tx)
319 			return; /* still unfinished work */
320 	}
321 
322 	/* Check and enter in LPI mode */
323 	if (!priv->tx_path_in_lpi_mode)
324 		priv->hw->mac->set_eee_mode(priv->hw,
325 					    priv->plat->en_tx_lpi_clockgating);
326 }
327 
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 	priv->hw->mac->reset_eee_mode(priv->hw);
337 	del_timer_sync(&priv->eee_ctrl_timer);
338 	priv->tx_path_in_lpi_mode = false;
339 }
340 
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
349 {
350 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
351 
352 	stmmac_enable_eee_mode(priv);
353 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355 
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 	struct net_device *ndev = priv->dev;
367 	unsigned long flags;
368 	bool ret = false;
369 
370 	/* Using PCS we cannot dial with the phy registers at this stage
371 	 * so we do not support extra feature like EEE.
372 	 */
373 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
376 		goto out;
377 
378 	/* MAC core supports the EEE feature. */
379 	if (priv->dma_cap.eee) {
380 		int tx_lpi_timer = priv->tx_lpi_timer;
381 
382 		/* Check if the PHY supports EEE */
383 		if (phy_init_eee(ndev->phydev, 1)) {
384 			/* To manage at run-time if the EEE cannot be supported
385 			 * anymore (for example because the lp caps have been
386 			 * changed).
387 			 * In that case the driver disable own timers.
388 			 */
389 			spin_lock_irqsave(&priv->lock, flags);
390 			if (priv->eee_active) {
391 				netdev_dbg(priv->dev, "disable EEE\n");
392 				del_timer_sync(&priv->eee_ctrl_timer);
393 				priv->hw->mac->set_eee_timer(priv->hw, 0,
394 							     tx_lpi_timer);
395 			}
396 			priv->eee_active = 0;
397 			spin_unlock_irqrestore(&priv->lock, flags);
398 			goto out;
399 		}
400 		/* Activate the EEE and start timers */
401 		spin_lock_irqsave(&priv->lock, flags);
402 		if (!priv->eee_active) {
403 			priv->eee_active = 1;
404 			timer_setup(&priv->eee_ctrl_timer,
405 				    stmmac_eee_ctrl_timer, 0);
406 			mod_timer(&priv->eee_ctrl_timer,
407 				  STMMAC_LPI_T(eee_timer));
408 
409 			priv->hw->mac->set_eee_timer(priv->hw,
410 						     STMMAC_DEFAULT_LIT_LS,
411 						     tx_lpi_timer);
412 		}
413 		/* Set HW EEE according to the speed */
414 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
415 
416 		ret = true;
417 		spin_unlock_irqrestore(&priv->lock, flags);
418 
419 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
420 	}
421 out:
422 	return ret;
423 }
424 
425 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
426  * @priv: driver private structure
427  * @p : descriptor pointer
428  * @skb : the socket buffer
429  * Description :
430  * This function will read timestamp from the descriptor & pass it to stack.
431  * and also perform some sanity checks.
432  */
433 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434 				   struct dma_desc *p, struct sk_buff *skb)
435 {
436 	struct skb_shared_hwtstamps shhwtstamp;
437 	u64 ns;
438 
439 	if (!priv->hwts_tx_en)
440 		return;
441 
442 	/* exit if skb doesn't support hw tstamp */
443 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444 		return;
445 
446 	/* check tx tstamp status */
447 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
448 		/* get the valid tstamp */
449 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
450 
451 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
452 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
453 
454 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
455 		/* pass tstamp to stack */
456 		skb_tstamp_tx(skb, &shhwtstamp);
457 	}
458 
459 	return;
460 }
461 
462 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
463  * @priv: driver private structure
464  * @p : descriptor pointer
465  * @np : next descriptor pointer
466  * @skb : the socket buffer
467  * Description :
468  * This function will read received packet's timestamp from the descriptor
469  * and pass it to stack. It also perform some sanity checks.
470  */
471 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
472 				   struct dma_desc *np, struct sk_buff *skb)
473 {
474 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
475 	struct dma_desc *desc = p;
476 	u64 ns;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 	/* For GMAC4, the valid timestamp is from CTX next desc. */
481 	if (priv->plat->has_gmac4)
482 		desc = np;
483 
484 	/* Check if timestamp is available */
485 	if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
486 		ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
487 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
488 		shhwtstamp = skb_hwtstamps(skb);
489 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
490 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
491 	} else  {
492 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
493 	}
494 }
495 
496 /**
497  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
498  *  @dev: device pointer.
499  *  @ifr: An IOCTL specific structure, that can contain a pointer to
500  *  a proprietary structure used to pass information to the driver.
501  *  Description:
502  *  This function configures the MAC to enable/disable both outgoing(TX)
503  *  and incoming(RX) packets time stamping based on user input.
504  *  Return Value:
505  *  0 on success and an appropriate -ve integer on failure.
506  */
507 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
508 {
509 	struct stmmac_priv *priv = netdev_priv(dev);
510 	struct hwtstamp_config config;
511 	struct timespec64 now;
512 	u64 temp = 0;
513 	u32 ptp_v2 = 0;
514 	u32 tstamp_all = 0;
515 	u32 ptp_over_ipv4_udp = 0;
516 	u32 ptp_over_ipv6_udp = 0;
517 	u32 ptp_over_ethernet = 0;
518 	u32 snap_type_sel = 0;
519 	u32 ts_master_en = 0;
520 	u32 ts_event_en = 0;
521 	u32 value = 0;
522 	u32 sec_inc;
523 
524 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
525 		netdev_alert(priv->dev, "No support for HW time stamping\n");
526 		priv->hwts_tx_en = 0;
527 		priv->hwts_rx_en = 0;
528 
529 		return -EOPNOTSUPP;
530 	}
531 
532 	if (copy_from_user(&config, ifr->ifr_data,
533 			   sizeof(struct hwtstamp_config)))
534 		return -EFAULT;
535 
536 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
537 		   __func__, config.flags, config.tx_type, config.rx_filter);
538 
539 	/* reserved for future extensions */
540 	if (config.flags)
541 		return -EINVAL;
542 
543 	if (config.tx_type != HWTSTAMP_TX_OFF &&
544 	    config.tx_type != HWTSTAMP_TX_ON)
545 		return -ERANGE;
546 
547 	if (priv->adv_ts) {
548 		switch (config.rx_filter) {
549 		case HWTSTAMP_FILTER_NONE:
550 			/* time stamp no incoming packet at all */
551 			config.rx_filter = HWTSTAMP_FILTER_NONE;
552 			break;
553 
554 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
555 			/* PTP v1, UDP, any kind of event packet */
556 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
557 			/* take time stamp for all event messages */
558 			if (priv->plat->has_gmac4)
559 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
560 			else
561 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
562 
563 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
564 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
565 			break;
566 
567 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
568 			/* PTP v1, UDP, Sync packet */
569 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
570 			/* take time stamp for SYNC messages only */
571 			ts_event_en = PTP_TCR_TSEVNTENA;
572 
573 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
574 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
575 			break;
576 
577 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
578 			/* PTP v1, UDP, Delay_req packet */
579 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
580 			/* take time stamp for Delay_Req messages only */
581 			ts_master_en = PTP_TCR_TSMSTRENA;
582 			ts_event_en = PTP_TCR_TSEVNTENA;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
589 			/* PTP v2, UDP, any kind of event packet */
590 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
591 			ptp_v2 = PTP_TCR_TSVER2ENA;
592 			/* take time stamp for all event messages */
593 			if (priv->plat->has_gmac4)
594 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
595 			else
596 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
597 
598 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
599 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
600 			break;
601 
602 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
603 			/* PTP v2, UDP, Sync packet */
604 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
605 			ptp_v2 = PTP_TCR_TSVER2ENA;
606 			/* take time stamp for SYNC messages only */
607 			ts_event_en = PTP_TCR_TSEVNTENA;
608 
609 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
610 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
611 			break;
612 
613 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
614 			/* PTP v2, UDP, Delay_req packet */
615 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
616 			ptp_v2 = PTP_TCR_TSVER2ENA;
617 			/* take time stamp for Delay_Req messages only */
618 			ts_master_en = PTP_TCR_TSMSTRENA;
619 			ts_event_en = PTP_TCR_TSEVNTENA;
620 
621 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
622 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
623 			break;
624 
625 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
626 			/* PTP v2/802.AS1 any layer, any kind of event packet */
627 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
628 			ptp_v2 = PTP_TCR_TSVER2ENA;
629 			/* take time stamp for all event messages */
630 			if (priv->plat->has_gmac4)
631 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
632 			else
633 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
634 
635 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637 			ptp_over_ethernet = PTP_TCR_TSIPENA;
638 			break;
639 
640 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
641 			/* PTP v2/802.AS1, any layer, Sync packet */
642 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
643 			ptp_v2 = PTP_TCR_TSVER2ENA;
644 			/* take time stamp for SYNC messages only */
645 			ts_event_en = PTP_TCR_TSEVNTENA;
646 
647 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649 			ptp_over_ethernet = PTP_TCR_TSIPENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653 			/* PTP v2/802.AS1, any layer, Delay_req packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			ptp_over_ethernet = PTP_TCR_TSIPENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_NTP_ALL:
666 		case HWTSTAMP_FILTER_ALL:
667 			/* time stamp any incoming packet */
668 			config.rx_filter = HWTSTAMP_FILTER_ALL;
669 			tstamp_all = PTP_TCR_TSENALL;
670 			break;
671 
672 		default:
673 			return -ERANGE;
674 		}
675 	} else {
676 		switch (config.rx_filter) {
677 		case HWTSTAMP_FILTER_NONE:
678 			config.rx_filter = HWTSTAMP_FILTER_NONE;
679 			break;
680 		default:
681 			/* PTP v1, UDP, any kind of event packet */
682 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
683 			break;
684 		}
685 	}
686 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
687 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688 
689 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
690 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
691 	else {
692 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
693 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
694 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
695 			 ts_master_en | snap_type_sel);
696 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
697 
698 		/* program Sub Second Increment reg */
699 		sec_inc = priv->hw->ptp->config_sub_second_increment(
700 			priv->ptpaddr, priv->plat->clk_ptp_rate,
701 			priv->plat->has_gmac4);
702 		temp = div_u64(1000000000ULL, sec_inc);
703 
704 		/* calculate default added value:
705 		 * formula is :
706 		 * addend = (2^32)/freq_div_ratio;
707 		 * where, freq_div_ratio = 1e9ns/sec_inc
708 		 */
709 		temp = (u64)(temp << 32);
710 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
711 		priv->hw->ptp->config_addend(priv->ptpaddr,
712 					     priv->default_addend);
713 
714 		/* initialize system time */
715 		ktime_get_real_ts64(&now);
716 
717 		/* lower 32 bits of tv_sec are safe until y2106 */
718 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
719 					    now.tv_nsec);
720 	}
721 
722 	return copy_to_user(ifr->ifr_data, &config,
723 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
724 }
725 
726 /**
727  * stmmac_init_ptp - init PTP
728  * @priv: driver private structure
729  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
730  * This is done by looking at the HW cap. register.
731  * This function also registers the ptp driver.
732  */
733 static int stmmac_init_ptp(struct stmmac_priv *priv)
734 {
735 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
736 		return -EOPNOTSUPP;
737 
738 	priv->adv_ts = 0;
739 	/* Check if adv_ts can be enabled for dwmac 4.x core */
740 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
741 		priv->adv_ts = 1;
742 	/* Dwmac 3.x core with extend_desc can support adv_ts */
743 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
744 		priv->adv_ts = 1;
745 
746 	if (priv->dma_cap.time_stamp)
747 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
748 
749 	if (priv->adv_ts)
750 		netdev_info(priv->dev,
751 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
752 
753 	priv->hw->ptp = &stmmac_ptp;
754 	priv->hwts_tx_en = 0;
755 	priv->hwts_rx_en = 0;
756 
757 	stmmac_ptp_register(priv);
758 
759 	return 0;
760 }
761 
762 static void stmmac_release_ptp(struct stmmac_priv *priv)
763 {
764 	if (priv->plat->clk_ptp_ref)
765 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
766 	stmmac_ptp_unregister(priv);
767 }
768 
769 /**
770  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
771  *  @priv: driver private structure
772  *  Description: It is used for configuring the flow control in all queues
773  */
774 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
775 {
776 	u32 tx_cnt = priv->plat->tx_queues_to_use;
777 
778 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
779 				 priv->pause, tx_cnt);
780 }
781 
782 /**
783  * stmmac_adjust_link - adjusts the link parameters
784  * @dev: net device structure
785  * Description: this is the helper called by the physical abstraction layer
786  * drivers to communicate the phy link status. According the speed and duplex
787  * this driver can invoke registered glue-logic as well.
788  * It also invoke the eee initialization because it could happen when switch
789  * on different networks (that are eee capable).
790  */
791 static void stmmac_adjust_link(struct net_device *dev)
792 {
793 	struct stmmac_priv *priv = netdev_priv(dev);
794 	struct phy_device *phydev = dev->phydev;
795 	unsigned long flags;
796 	bool new_state = false;
797 
798 	if (!phydev)
799 		return;
800 
801 	spin_lock_irqsave(&priv->lock, flags);
802 
803 	if (phydev->link) {
804 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
805 
806 		/* Now we make sure that we can be in full duplex mode.
807 		 * If not, we operate in half-duplex mode. */
808 		if (phydev->duplex != priv->oldduplex) {
809 			new_state = true;
810 			if (!phydev->duplex)
811 				ctrl &= ~priv->hw->link.duplex;
812 			else
813 				ctrl |= priv->hw->link.duplex;
814 			priv->oldduplex = phydev->duplex;
815 		}
816 		/* Flow Control operation */
817 		if (phydev->pause)
818 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
819 
820 		if (phydev->speed != priv->speed) {
821 			new_state = true;
822 			ctrl &= ~priv->hw->link.speed_mask;
823 			switch (phydev->speed) {
824 			case SPEED_1000:
825 				ctrl |= priv->hw->link.speed1000;
826 				break;
827 			case SPEED_100:
828 				ctrl |= priv->hw->link.speed100;
829 				break;
830 			case SPEED_10:
831 				ctrl |= priv->hw->link.speed10;
832 				break;
833 			default:
834 				netif_warn(priv, link, priv->dev,
835 					   "broken speed: %d\n", phydev->speed);
836 				phydev->speed = SPEED_UNKNOWN;
837 				break;
838 			}
839 			if (phydev->speed != SPEED_UNKNOWN)
840 				stmmac_hw_fix_mac_speed(priv);
841 			priv->speed = phydev->speed;
842 		}
843 
844 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
845 
846 		if (!priv->oldlink) {
847 			new_state = true;
848 			priv->oldlink = true;
849 		}
850 	} else if (priv->oldlink) {
851 		new_state = true;
852 		priv->oldlink = false;
853 		priv->speed = SPEED_UNKNOWN;
854 		priv->oldduplex = DUPLEX_UNKNOWN;
855 	}
856 
857 	if (new_state && netif_msg_link(priv))
858 		phy_print_status(phydev);
859 
860 	spin_unlock_irqrestore(&priv->lock, flags);
861 
862 	if (phydev->is_pseudo_fixed_link)
863 		/* Stop PHY layer to call the hook to adjust the link in case
864 		 * of a switch is attached to the stmmac driver.
865 		 */
866 		phydev->irq = PHY_IGNORE_INTERRUPT;
867 	else
868 		/* At this stage, init the EEE if supported.
869 		 * Never called in case of fixed_link.
870 		 */
871 		priv->eee_enabled = stmmac_eee_init(priv);
872 }
873 
874 /**
875  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PCS.
878  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
879  * configured for the TBI, RTBI, or SGMII PHY interface.
880  */
881 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
882 {
883 	int interface = priv->plat->interface;
884 
885 	if (priv->dma_cap.pcs) {
886 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
887 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
888 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
889 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
890 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
891 			priv->hw->pcs = STMMAC_PCS_RGMII;
892 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
893 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
894 			priv->hw->pcs = STMMAC_PCS_SGMII;
895 		}
896 	}
897 }
898 
899 /**
900  * stmmac_init_phy - PHY initialization
901  * @dev: net device structure
902  * Description: it initializes the driver's PHY state, and attaches the PHY
903  * to the mac driver.
904  *  Return value:
905  *  0 on success
906  */
907 static int stmmac_init_phy(struct net_device *dev)
908 {
909 	struct stmmac_priv *priv = netdev_priv(dev);
910 	struct phy_device *phydev;
911 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
912 	char bus_id[MII_BUS_ID_SIZE];
913 	int interface = priv->plat->interface;
914 	int max_speed = priv->plat->max_speed;
915 	priv->oldlink = false;
916 	priv->speed = SPEED_UNKNOWN;
917 	priv->oldduplex = DUPLEX_UNKNOWN;
918 
919 	if (priv->plat->phy_node) {
920 		phydev = of_phy_connect(dev, priv->plat->phy_node,
921 					&stmmac_adjust_link, 0, interface);
922 	} else {
923 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
924 			 priv->plat->bus_id);
925 
926 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
927 			 priv->plat->phy_addr);
928 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
929 			   phy_id_fmt);
930 
931 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
932 				     interface);
933 	}
934 
935 	if (IS_ERR_OR_NULL(phydev)) {
936 		netdev_err(priv->dev, "Could not attach to PHY\n");
937 		if (!phydev)
938 			return -ENODEV;
939 
940 		return PTR_ERR(phydev);
941 	}
942 
943 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
944 	if ((interface == PHY_INTERFACE_MODE_MII) ||
945 	    (interface == PHY_INTERFACE_MODE_RMII) ||
946 		(max_speed < 1000 && max_speed > 0))
947 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
948 					 SUPPORTED_1000baseT_Full);
949 
950 	/*
951 	 * Broken HW is sometimes missing the pull-up resistor on the
952 	 * MDIO line, which results in reads to non-existent devices returning
953 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
954 	 * device as well.
955 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
956 	 */
957 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
958 		phy_disconnect(phydev);
959 		return -ENODEV;
960 	}
961 
962 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
963 	 * subsequent PHY polling, make sure we force a link transition if
964 	 * we have a UP/DOWN/UP transition
965 	 */
966 	if (phydev->is_pseudo_fixed_link)
967 		phydev->irq = PHY_POLL;
968 
969 	phy_attached_info(phydev);
970 	return 0;
971 }
972 
973 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
974 {
975 	u32 rx_cnt = priv->plat->rx_queues_to_use;
976 	void *head_rx;
977 	u32 queue;
978 
979 	/* Display RX rings */
980 	for (queue = 0; queue < rx_cnt; queue++) {
981 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
982 
983 		pr_info("\tRX Queue %u rings\n", queue);
984 
985 		if (priv->extend_desc)
986 			head_rx = (void *)rx_q->dma_erx;
987 		else
988 			head_rx = (void *)rx_q->dma_rx;
989 
990 		/* Display RX ring */
991 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
992 	}
993 }
994 
995 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
996 {
997 	u32 tx_cnt = priv->plat->tx_queues_to_use;
998 	void *head_tx;
999 	u32 queue;
1000 
1001 	/* Display TX rings */
1002 	for (queue = 0; queue < tx_cnt; queue++) {
1003 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1004 
1005 		pr_info("\tTX Queue %d rings\n", queue);
1006 
1007 		if (priv->extend_desc)
1008 			head_tx = (void *)tx_q->dma_etx;
1009 		else
1010 			head_tx = (void *)tx_q->dma_tx;
1011 
1012 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1013 	}
1014 }
1015 
1016 static void stmmac_display_rings(struct stmmac_priv *priv)
1017 {
1018 	/* Display RX ring */
1019 	stmmac_display_rx_rings(priv);
1020 
1021 	/* Display TX ring */
1022 	stmmac_display_tx_rings(priv);
1023 }
1024 
1025 static int stmmac_set_bfsize(int mtu, int bufsize)
1026 {
1027 	int ret = bufsize;
1028 
1029 	if (mtu >= BUF_SIZE_4KiB)
1030 		ret = BUF_SIZE_8KiB;
1031 	else if (mtu >= BUF_SIZE_2KiB)
1032 		ret = BUF_SIZE_4KiB;
1033 	else if (mtu > DEFAULT_BUFSIZE)
1034 		ret = BUF_SIZE_2KiB;
1035 	else
1036 		ret = DEFAULT_BUFSIZE;
1037 
1038 	return ret;
1039 }
1040 
1041 /**
1042  * stmmac_clear_rx_descriptors - clear RX descriptors
1043  * @priv: driver private structure
1044  * @queue: RX queue index
1045  * Description: this function is called to clear the RX descriptors
1046  * in case of both basic and extended descriptors are used.
1047  */
1048 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1049 {
1050 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1051 	int i;
1052 
1053 	/* Clear the RX descriptors */
1054 	for (i = 0; i < DMA_RX_SIZE; i++)
1055 		if (priv->extend_desc)
1056 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1057 						     priv->use_riwt, priv->mode,
1058 						     (i == DMA_RX_SIZE - 1));
1059 		else
1060 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1061 						     priv->use_riwt, priv->mode,
1062 						     (i == DMA_RX_SIZE - 1));
1063 }
1064 
1065 /**
1066  * stmmac_clear_tx_descriptors - clear tx descriptors
1067  * @priv: driver private structure
1068  * @queue: TX queue index.
1069  * Description: this function is called to clear the TX descriptors
1070  * in case of both basic and extended descriptors are used.
1071  */
1072 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1073 {
1074 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1075 	int i;
1076 
1077 	/* Clear the TX descriptors */
1078 	for (i = 0; i < DMA_TX_SIZE; i++)
1079 		if (priv->extend_desc)
1080 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1081 						     priv->mode,
1082 						     (i == DMA_TX_SIZE - 1));
1083 		else
1084 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1085 						     priv->mode,
1086 						     (i == DMA_TX_SIZE - 1));
1087 }
1088 
1089 /**
1090  * stmmac_clear_descriptors - clear descriptors
1091  * @priv: driver private structure
1092  * Description: this function is called to clear the TX and RX descriptors
1093  * in case of both basic and extended descriptors are used.
1094  */
1095 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1096 {
1097 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1098 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1099 	u32 queue;
1100 
1101 	/* Clear the RX descriptors */
1102 	for (queue = 0; queue < rx_queue_cnt; queue++)
1103 		stmmac_clear_rx_descriptors(priv, queue);
1104 
1105 	/* Clear the TX descriptors */
1106 	for (queue = 0; queue < tx_queue_cnt; queue++)
1107 		stmmac_clear_tx_descriptors(priv, queue);
1108 }
1109 
1110 /**
1111  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1112  * @priv: driver private structure
1113  * @p: descriptor pointer
1114  * @i: descriptor index
1115  * @flags: gfp flag
1116  * @queue: RX queue index
1117  * Description: this function is called to allocate a receive buffer, perform
1118  * the DMA mapping and init the descriptor.
1119  */
1120 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1121 				  int i, gfp_t flags, u32 queue)
1122 {
1123 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1124 	struct sk_buff *skb;
1125 
1126 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1127 	if (!skb) {
1128 		netdev_err(priv->dev,
1129 			   "%s: Rx init fails; skb is NULL\n", __func__);
1130 		return -ENOMEM;
1131 	}
1132 	rx_q->rx_skbuff[i] = skb;
1133 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1134 						priv->dma_buf_sz,
1135 						DMA_FROM_DEVICE);
1136 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1137 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1138 		dev_kfree_skb_any(skb);
1139 		return -EINVAL;
1140 	}
1141 
1142 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1143 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1144 	else
1145 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146 
1147 	if ((priv->hw->mode->init_desc3) &&
1148 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1149 		priv->hw->mode->init_desc3(p);
1150 
1151 	return 0;
1152 }
1153 
1154 /**
1155  * stmmac_free_rx_buffer - free RX dma buffers
1156  * @priv: private structure
1157  * @queue: RX queue index
1158  * @i: buffer index.
1159  */
1160 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1161 {
1162 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1163 
1164 	if (rx_q->rx_skbuff[i]) {
1165 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1166 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1167 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1168 	}
1169 	rx_q->rx_skbuff[i] = NULL;
1170 }
1171 
1172 /**
1173  * stmmac_free_tx_buffer - free RX dma buffers
1174  * @priv: private structure
1175  * @queue: RX queue index
1176  * @i: buffer index.
1177  */
1178 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1179 {
1180 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1181 
1182 	if (tx_q->tx_skbuff_dma[i].buf) {
1183 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1184 			dma_unmap_page(priv->device,
1185 				       tx_q->tx_skbuff_dma[i].buf,
1186 				       tx_q->tx_skbuff_dma[i].len,
1187 				       DMA_TO_DEVICE);
1188 		else
1189 			dma_unmap_single(priv->device,
1190 					 tx_q->tx_skbuff_dma[i].buf,
1191 					 tx_q->tx_skbuff_dma[i].len,
1192 					 DMA_TO_DEVICE);
1193 	}
1194 
1195 	if (tx_q->tx_skbuff[i]) {
1196 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1197 		tx_q->tx_skbuff[i] = NULL;
1198 		tx_q->tx_skbuff_dma[i].buf = 0;
1199 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1200 	}
1201 }
1202 
1203 /**
1204  * init_dma_rx_desc_rings - init the RX descriptor rings
1205  * @dev: net device structure
1206  * @flags: gfp flag.
1207  * Description: this function initializes the DMA RX descriptors
1208  * and allocates the socket buffers. It supports the chained and ring
1209  * modes.
1210  */
1211 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1212 {
1213 	struct stmmac_priv *priv = netdev_priv(dev);
1214 	u32 rx_count = priv->plat->rx_queues_to_use;
1215 	unsigned int bfsize = 0;
1216 	int ret = -ENOMEM;
1217 	int queue;
1218 	int i;
1219 
1220 	if (priv->hw->mode->set_16kib_bfsize)
1221 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1222 
1223 	if (bfsize < BUF_SIZE_16KiB)
1224 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1225 
1226 	priv->dma_buf_sz = bfsize;
1227 
1228 	/* RX INITIALIZATION */
1229 	netif_dbg(priv, probe, priv->dev,
1230 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1231 
1232 	for (queue = 0; queue < rx_count; queue++) {
1233 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1234 
1235 		netif_dbg(priv, probe, priv->dev,
1236 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1237 			  (u32)rx_q->dma_rx_phy);
1238 
1239 		for (i = 0; i < DMA_RX_SIZE; i++) {
1240 			struct dma_desc *p;
1241 
1242 			if (priv->extend_desc)
1243 				p = &((rx_q->dma_erx + i)->basic);
1244 			else
1245 				p = rx_q->dma_rx + i;
1246 
1247 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1248 						     queue);
1249 			if (ret)
1250 				goto err_init_rx_buffers;
1251 
1252 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1253 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1254 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1255 		}
1256 
1257 		rx_q->cur_rx = 0;
1258 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1259 
1260 		stmmac_clear_rx_descriptors(priv, queue);
1261 
1262 		/* Setup the chained descriptor addresses */
1263 		if (priv->mode == STMMAC_CHAIN_MODE) {
1264 			if (priv->extend_desc)
1265 				priv->hw->mode->init(rx_q->dma_erx,
1266 						     rx_q->dma_rx_phy,
1267 						     DMA_RX_SIZE, 1);
1268 			else
1269 				priv->hw->mode->init(rx_q->dma_rx,
1270 						     rx_q->dma_rx_phy,
1271 						     DMA_RX_SIZE, 0);
1272 		}
1273 	}
1274 
1275 	buf_sz = bfsize;
1276 
1277 	return 0;
1278 
1279 err_init_rx_buffers:
1280 	while (queue >= 0) {
1281 		while (--i >= 0)
1282 			stmmac_free_rx_buffer(priv, queue, i);
1283 
1284 		if (queue == 0)
1285 			break;
1286 
1287 		i = DMA_RX_SIZE;
1288 		queue--;
1289 	}
1290 
1291 	return ret;
1292 }
1293 
1294 /**
1295  * init_dma_tx_desc_rings - init the TX descriptor rings
1296  * @dev: net device structure.
1297  * Description: this function initializes the DMA TX descriptors
1298  * and allocates the socket buffers. It supports the chained and ring
1299  * modes.
1300  */
1301 static int init_dma_tx_desc_rings(struct net_device *dev)
1302 {
1303 	struct stmmac_priv *priv = netdev_priv(dev);
1304 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1305 	u32 queue;
1306 	int i;
1307 
1308 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1309 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1310 
1311 		netif_dbg(priv, probe, priv->dev,
1312 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1313 			 (u32)tx_q->dma_tx_phy);
1314 
1315 		/* Setup the chained descriptor addresses */
1316 		if (priv->mode == STMMAC_CHAIN_MODE) {
1317 			if (priv->extend_desc)
1318 				priv->hw->mode->init(tx_q->dma_etx,
1319 						     tx_q->dma_tx_phy,
1320 						     DMA_TX_SIZE, 1);
1321 			else
1322 				priv->hw->mode->init(tx_q->dma_tx,
1323 						     tx_q->dma_tx_phy,
1324 						     DMA_TX_SIZE, 0);
1325 		}
1326 
1327 		for (i = 0; i < DMA_TX_SIZE; i++) {
1328 			struct dma_desc *p;
1329 			if (priv->extend_desc)
1330 				p = &((tx_q->dma_etx + i)->basic);
1331 			else
1332 				p = tx_q->dma_tx + i;
1333 
1334 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1335 				p->des0 = 0;
1336 				p->des1 = 0;
1337 				p->des2 = 0;
1338 				p->des3 = 0;
1339 			} else {
1340 				p->des2 = 0;
1341 			}
1342 
1343 			tx_q->tx_skbuff_dma[i].buf = 0;
1344 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1345 			tx_q->tx_skbuff_dma[i].len = 0;
1346 			tx_q->tx_skbuff_dma[i].last_segment = false;
1347 			tx_q->tx_skbuff[i] = NULL;
1348 		}
1349 
1350 		tx_q->dirty_tx = 0;
1351 		tx_q->cur_tx = 0;
1352 
1353 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 /**
1360  * init_dma_desc_rings - init the RX/TX descriptor rings
1361  * @dev: net device structure
1362  * @flags: gfp flag.
1363  * Description: this function initializes the DMA RX/TX descriptors
1364  * and allocates the socket buffers. It supports the chained and ring
1365  * modes.
1366  */
1367 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1368 {
1369 	struct stmmac_priv *priv = netdev_priv(dev);
1370 	int ret;
1371 
1372 	ret = init_dma_rx_desc_rings(dev, flags);
1373 	if (ret)
1374 		return ret;
1375 
1376 	ret = init_dma_tx_desc_rings(dev);
1377 
1378 	stmmac_clear_descriptors(priv);
1379 
1380 	if (netif_msg_hw(priv))
1381 		stmmac_display_rings(priv);
1382 
1383 	return ret;
1384 }
1385 
1386 /**
1387  * dma_free_rx_skbufs - free RX dma buffers
1388  * @priv: private structure
1389  * @queue: RX queue index
1390  */
1391 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1392 {
1393 	int i;
1394 
1395 	for (i = 0; i < DMA_RX_SIZE; i++)
1396 		stmmac_free_rx_buffer(priv, queue, i);
1397 }
1398 
1399 /**
1400  * dma_free_tx_skbufs - free TX dma buffers
1401  * @priv: private structure
1402  * @queue: TX queue index
1403  */
1404 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1405 {
1406 	int i;
1407 
1408 	for (i = 0; i < DMA_TX_SIZE; i++)
1409 		stmmac_free_tx_buffer(priv, queue, i);
1410 }
1411 
1412 /**
1413  * free_dma_rx_desc_resources - free RX dma desc resources
1414  * @priv: private structure
1415  */
1416 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1417 {
1418 	u32 rx_count = priv->plat->rx_queues_to_use;
1419 	u32 queue;
1420 
1421 	/* Free RX queue resources */
1422 	for (queue = 0; queue < rx_count; queue++) {
1423 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1424 
1425 		/* Release the DMA RX socket buffers */
1426 		dma_free_rx_skbufs(priv, queue);
1427 
1428 		/* Free DMA regions of consistent memory previously allocated */
1429 		if (!priv->extend_desc)
1430 			dma_free_coherent(priv->device,
1431 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1432 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1433 		else
1434 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1435 					  sizeof(struct dma_extended_desc),
1436 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1437 
1438 		kfree(rx_q->rx_skbuff_dma);
1439 		kfree(rx_q->rx_skbuff);
1440 	}
1441 }
1442 
1443 /**
1444  * free_dma_tx_desc_resources - free TX dma desc resources
1445  * @priv: private structure
1446  */
1447 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1448 {
1449 	u32 tx_count = priv->plat->tx_queues_to_use;
1450 	u32 queue;
1451 
1452 	/* Free TX queue resources */
1453 	for (queue = 0; queue < tx_count; queue++) {
1454 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1455 
1456 		/* Release the DMA TX socket buffers */
1457 		dma_free_tx_skbufs(priv, queue);
1458 
1459 		/* Free DMA regions of consistent memory previously allocated */
1460 		if (!priv->extend_desc)
1461 			dma_free_coherent(priv->device,
1462 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1463 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1464 		else
1465 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1466 					  sizeof(struct dma_extended_desc),
1467 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1468 
1469 		kfree(tx_q->tx_skbuff_dma);
1470 		kfree(tx_q->tx_skbuff);
1471 	}
1472 }
1473 
1474 /**
1475  * alloc_dma_rx_desc_resources - alloc RX resources.
1476  * @priv: private structure
1477  * Description: according to which descriptor can be used (extend or basic)
1478  * this function allocates the resources for TX and RX paths. In case of
1479  * reception, for example, it pre-allocated the RX socket buffer in order to
1480  * allow zero-copy mechanism.
1481  */
1482 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1483 {
1484 	u32 rx_count = priv->plat->rx_queues_to_use;
1485 	int ret = -ENOMEM;
1486 	u32 queue;
1487 
1488 	/* RX queues buffers and DMA */
1489 	for (queue = 0; queue < rx_count; queue++) {
1490 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1491 
1492 		rx_q->queue_index = queue;
1493 		rx_q->priv_data = priv;
1494 
1495 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1496 						    sizeof(dma_addr_t),
1497 						    GFP_KERNEL);
1498 		if (!rx_q->rx_skbuff_dma)
1499 			goto err_dma;
1500 
1501 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1502 						sizeof(struct sk_buff *),
1503 						GFP_KERNEL);
1504 		if (!rx_q->rx_skbuff)
1505 			goto err_dma;
1506 
1507 		if (priv->extend_desc) {
1508 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1509 							    DMA_RX_SIZE *
1510 							    sizeof(struct
1511 							    dma_extended_desc),
1512 							    &rx_q->dma_rx_phy,
1513 							    GFP_KERNEL);
1514 			if (!rx_q->dma_erx)
1515 				goto err_dma;
1516 
1517 		} else {
1518 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1519 							   DMA_RX_SIZE *
1520 							   sizeof(struct
1521 							   dma_desc),
1522 							   &rx_q->dma_rx_phy,
1523 							   GFP_KERNEL);
1524 			if (!rx_q->dma_rx)
1525 				goto err_dma;
1526 		}
1527 	}
1528 
1529 	return 0;
1530 
1531 err_dma:
1532 	free_dma_rx_desc_resources(priv);
1533 
1534 	return ret;
1535 }
1536 
1537 /**
1538  * alloc_dma_tx_desc_resources - alloc TX resources.
1539  * @priv: private structure
1540  * Description: according to which descriptor can be used (extend or basic)
1541  * this function allocates the resources for TX and RX paths. In case of
1542  * reception, for example, it pre-allocated the RX socket buffer in order to
1543  * allow zero-copy mechanism.
1544  */
1545 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1546 {
1547 	u32 tx_count = priv->plat->tx_queues_to_use;
1548 	int ret = -ENOMEM;
1549 	u32 queue;
1550 
1551 	/* TX queues buffers and DMA */
1552 	for (queue = 0; queue < tx_count; queue++) {
1553 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1554 
1555 		tx_q->queue_index = queue;
1556 		tx_q->priv_data = priv;
1557 
1558 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1559 						    sizeof(*tx_q->tx_skbuff_dma),
1560 						    GFP_KERNEL);
1561 		if (!tx_q->tx_skbuff_dma)
1562 			goto err_dma;
1563 
1564 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1565 						sizeof(struct sk_buff *),
1566 						GFP_KERNEL);
1567 		if (!tx_q->tx_skbuff)
1568 			goto err_dma;
1569 
1570 		if (priv->extend_desc) {
1571 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1572 							    DMA_TX_SIZE *
1573 							    sizeof(struct
1574 							    dma_extended_desc),
1575 							    &tx_q->dma_tx_phy,
1576 							    GFP_KERNEL);
1577 			if (!tx_q->dma_etx)
1578 				goto err_dma;
1579 		} else {
1580 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1581 							   DMA_TX_SIZE *
1582 							   sizeof(struct
1583 								  dma_desc),
1584 							   &tx_q->dma_tx_phy,
1585 							   GFP_KERNEL);
1586 			if (!tx_q->dma_tx)
1587 				goto err_dma;
1588 		}
1589 	}
1590 
1591 	return 0;
1592 
1593 err_dma:
1594 	free_dma_tx_desc_resources(priv);
1595 
1596 	return ret;
1597 }
1598 
1599 /**
1600  * alloc_dma_desc_resources - alloc TX/RX resources.
1601  * @priv: private structure
1602  * Description: according to which descriptor can be used (extend or basic)
1603  * this function allocates the resources for TX and RX paths. In case of
1604  * reception, for example, it pre-allocated the RX socket buffer in order to
1605  * allow zero-copy mechanism.
1606  */
1607 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1608 {
1609 	/* RX Allocation */
1610 	int ret = alloc_dma_rx_desc_resources(priv);
1611 
1612 	if (ret)
1613 		return ret;
1614 
1615 	ret = alloc_dma_tx_desc_resources(priv);
1616 
1617 	return ret;
1618 }
1619 
1620 /**
1621  * free_dma_desc_resources - free dma desc resources
1622  * @priv: private structure
1623  */
1624 static void free_dma_desc_resources(struct stmmac_priv *priv)
1625 {
1626 	/* Release the DMA RX socket buffers */
1627 	free_dma_rx_desc_resources(priv);
1628 
1629 	/* Release the DMA TX socket buffers */
1630 	free_dma_tx_desc_resources(priv);
1631 }
1632 
1633 /**
1634  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1635  *  @priv: driver private structure
1636  *  Description: It is used for enabling the rx queues in the MAC
1637  */
1638 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1639 {
1640 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1641 	int queue;
1642 	u8 mode;
1643 
1644 	for (queue = 0; queue < rx_queues_count; queue++) {
1645 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1646 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1647 	}
1648 }
1649 
1650 /**
1651  * stmmac_start_rx_dma - start RX DMA channel
1652  * @priv: driver private structure
1653  * @chan: RX channel index
1654  * Description:
1655  * This starts a RX DMA channel
1656  */
1657 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1658 {
1659 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1660 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1661 }
1662 
1663 /**
1664  * stmmac_start_tx_dma - start TX DMA channel
1665  * @priv: driver private structure
1666  * @chan: TX channel index
1667  * Description:
1668  * This starts a TX DMA channel
1669  */
1670 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1671 {
1672 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1673 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1674 }
1675 
1676 /**
1677  * stmmac_stop_rx_dma - stop RX DMA channel
1678  * @priv: driver private structure
1679  * @chan: RX channel index
1680  * Description:
1681  * This stops a RX DMA channel
1682  */
1683 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1684 {
1685 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1686 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1687 }
1688 
1689 /**
1690  * stmmac_stop_tx_dma - stop TX DMA channel
1691  * @priv: driver private structure
1692  * @chan: TX channel index
1693  * Description:
1694  * This stops a TX DMA channel
1695  */
1696 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1697 {
1698 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1699 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1700 }
1701 
1702 /**
1703  * stmmac_start_all_dma - start all RX and TX DMA channels
1704  * @priv: driver private structure
1705  * Description:
1706  * This starts all the RX and TX DMA channels
1707  */
1708 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1709 {
1710 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1711 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1712 	u32 chan = 0;
1713 
1714 	for (chan = 0; chan < rx_channels_count; chan++)
1715 		stmmac_start_rx_dma(priv, chan);
1716 
1717 	for (chan = 0; chan < tx_channels_count; chan++)
1718 		stmmac_start_tx_dma(priv, chan);
1719 }
1720 
1721 /**
1722  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1723  * @priv: driver private structure
1724  * Description:
1725  * This stops the RX and TX DMA channels
1726  */
1727 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1728 {
1729 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731 	u32 chan = 0;
1732 
1733 	for (chan = 0; chan < rx_channels_count; chan++)
1734 		stmmac_stop_rx_dma(priv, chan);
1735 
1736 	for (chan = 0; chan < tx_channels_count; chan++)
1737 		stmmac_stop_tx_dma(priv, chan);
1738 }
1739 
1740 /**
1741  *  stmmac_dma_operation_mode - HW DMA operation mode
1742  *  @priv: driver private structure
1743  *  Description: it is used for configuring the DMA operation mode register in
1744  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1745  */
1746 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1747 {
1748 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750 	int rxfifosz = priv->plat->rx_fifo_size;
1751 	int txfifosz = priv->plat->tx_fifo_size;
1752 	u32 txmode = 0;
1753 	u32 rxmode = 0;
1754 	u32 chan = 0;
1755 	u8 qmode = 0;
1756 
1757 	if (rxfifosz == 0)
1758 		rxfifosz = priv->dma_cap.rx_fifo_size;
1759 	if (txfifosz == 0)
1760 		txfifosz = priv->dma_cap.tx_fifo_size;
1761 
1762 	/* Adjust for real per queue fifo size */
1763 	rxfifosz /= rx_channels_count;
1764 	txfifosz /= tx_channels_count;
1765 
1766 	if (priv->plat->force_thresh_dma_mode) {
1767 		txmode = tc;
1768 		rxmode = tc;
1769 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1770 		/*
1771 		 * In case of GMAC, SF mode can be enabled
1772 		 * to perform the TX COE in HW. This depends on:
1773 		 * 1) TX COE if actually supported
1774 		 * 2) There is no bugged Jumbo frame support
1775 		 *    that needs to not insert csum in the TDES.
1776 		 */
1777 		txmode = SF_DMA_MODE;
1778 		rxmode = SF_DMA_MODE;
1779 		priv->xstats.threshold = SF_DMA_MODE;
1780 	} else {
1781 		txmode = tc;
1782 		rxmode = SF_DMA_MODE;
1783 	}
1784 
1785 	/* configure all channels */
1786 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1787 		for (chan = 0; chan < rx_channels_count; chan++) {
1788 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1789 
1790 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1791 						   rxfifosz, qmode);
1792 		}
1793 
1794 		for (chan = 0; chan < tx_channels_count; chan++) {
1795 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1796 
1797 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1798 						   txfifosz, qmode);
1799 		}
1800 	} else {
1801 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1802 					rxfifosz);
1803 	}
1804 }
1805 
1806 /**
1807  * stmmac_tx_clean - to manage the transmission completion
1808  * @priv: driver private structure
1809  * @queue: TX queue index
1810  * Description: it reclaims the transmit resources after transmission completes.
1811  */
1812 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1813 {
1814 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1815 	unsigned int bytes_compl = 0, pkts_compl = 0;
1816 	unsigned int entry;
1817 
1818 	netif_tx_lock(priv->dev);
1819 
1820 	priv->xstats.tx_clean++;
1821 
1822 	entry = tx_q->dirty_tx;
1823 	while (entry != tx_q->cur_tx) {
1824 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1825 		struct dma_desc *p;
1826 		int status;
1827 
1828 		if (priv->extend_desc)
1829 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1830 		else
1831 			p = tx_q->dma_tx + entry;
1832 
1833 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1834 						      &priv->xstats, p,
1835 						      priv->ioaddr);
1836 		/* Check if the descriptor is owned by the DMA */
1837 		if (unlikely(status & tx_dma_own))
1838 			break;
1839 
1840 		/* Just consider the last segment and ...*/
1841 		if (likely(!(status & tx_not_ls))) {
1842 			/* ... verify the status error condition */
1843 			if (unlikely(status & tx_err)) {
1844 				priv->dev->stats.tx_errors++;
1845 			} else {
1846 				priv->dev->stats.tx_packets++;
1847 				priv->xstats.tx_pkt_n++;
1848 			}
1849 			stmmac_get_tx_hwtstamp(priv, p, skb);
1850 		}
1851 
1852 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1853 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1854 				dma_unmap_page(priv->device,
1855 					       tx_q->tx_skbuff_dma[entry].buf,
1856 					       tx_q->tx_skbuff_dma[entry].len,
1857 					       DMA_TO_DEVICE);
1858 			else
1859 				dma_unmap_single(priv->device,
1860 						 tx_q->tx_skbuff_dma[entry].buf,
1861 						 tx_q->tx_skbuff_dma[entry].len,
1862 						 DMA_TO_DEVICE);
1863 			tx_q->tx_skbuff_dma[entry].buf = 0;
1864 			tx_q->tx_skbuff_dma[entry].len = 0;
1865 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1866 		}
1867 
1868 		if (priv->hw->mode->clean_desc3)
1869 			priv->hw->mode->clean_desc3(tx_q, p);
1870 
1871 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1872 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1873 
1874 		if (likely(skb != NULL)) {
1875 			pkts_compl++;
1876 			bytes_compl += skb->len;
1877 			dev_consume_skb_any(skb);
1878 			tx_q->tx_skbuff[entry] = NULL;
1879 		}
1880 
1881 		priv->hw->desc->release_tx_desc(p, priv->mode);
1882 
1883 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1884 	}
1885 	tx_q->dirty_tx = entry;
1886 
1887 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1888 				  pkts_compl, bytes_compl);
1889 
1890 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1891 								queue))) &&
1892 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1893 
1894 		netif_dbg(priv, tx_done, priv->dev,
1895 			  "%s: restart transmit\n", __func__);
1896 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1897 	}
1898 
1899 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1900 		stmmac_enable_eee_mode(priv);
1901 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1902 	}
1903 	netif_tx_unlock(priv->dev);
1904 }
1905 
1906 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1907 {
1908 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1909 }
1910 
1911 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1912 {
1913 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1914 }
1915 
1916 /**
1917  * stmmac_tx_err - to manage the tx error
1918  * @priv: driver private structure
1919  * @chan: channel index
1920  * Description: it cleans the descriptors and restarts the transmission
1921  * in case of transmission errors.
1922  */
1923 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1924 {
1925 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1926 	int i;
1927 
1928 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1929 
1930 	stmmac_stop_tx_dma(priv, chan);
1931 	dma_free_tx_skbufs(priv, chan);
1932 	for (i = 0; i < DMA_TX_SIZE; i++)
1933 		if (priv->extend_desc)
1934 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1935 						     priv->mode,
1936 						     (i == DMA_TX_SIZE - 1));
1937 		else
1938 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1939 						     priv->mode,
1940 						     (i == DMA_TX_SIZE - 1));
1941 	tx_q->dirty_tx = 0;
1942 	tx_q->cur_tx = 0;
1943 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1944 	stmmac_start_tx_dma(priv, chan);
1945 
1946 	priv->dev->stats.tx_errors++;
1947 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1948 }
1949 
1950 /**
1951  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1952  *  @priv: driver private structure
1953  *  @txmode: TX operating mode
1954  *  @rxmode: RX operating mode
1955  *  @chan: channel index
1956  *  Description: it is used for configuring of the DMA operation mode in
1957  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1958  *  mode.
1959  */
1960 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1961 					  u32 rxmode, u32 chan)
1962 {
1963 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1964 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1965 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1966 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1967 	int rxfifosz = priv->plat->rx_fifo_size;
1968 	int txfifosz = priv->plat->tx_fifo_size;
1969 
1970 	if (rxfifosz == 0)
1971 		rxfifosz = priv->dma_cap.rx_fifo_size;
1972 	if (txfifosz == 0)
1973 		txfifosz = priv->dma_cap.tx_fifo_size;
1974 
1975 	/* Adjust for real per queue fifo size */
1976 	rxfifosz /= rx_channels_count;
1977 	txfifosz /= tx_channels_count;
1978 
1979 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1980 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1981 					   rxfifosz, rxqmode);
1982 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1983 					   txfifosz, txqmode);
1984 	} else {
1985 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1986 					rxfifosz);
1987 	}
1988 }
1989 
1990 /**
1991  * stmmac_dma_interrupt - DMA ISR
1992  * @priv: driver private structure
1993  * Description: this is the DMA ISR. It is called by the main ISR.
1994  * It calls the dwmac dma routine and schedule poll method in case of some
1995  * work can be done.
1996  */
1997 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1998 {
1999 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2000 	int status;
2001 	u32 chan;
2002 
2003 	for (chan = 0; chan < tx_channel_count; chan++) {
2004 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2005 
2006 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
2007 						      &priv->xstats, chan);
2008 		if (likely((status & handle_rx)) || (status & handle_tx)) {
2009 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2010 				stmmac_disable_dma_irq(priv, chan);
2011 				__napi_schedule(&rx_q->napi);
2012 			}
2013 		}
2014 
2015 		if (unlikely(status & tx_hard_error_bump_tc)) {
2016 			/* Try to bump up the dma threshold on this failure */
2017 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2018 			    (tc <= 256)) {
2019 				tc += 64;
2020 				if (priv->plat->force_thresh_dma_mode)
2021 					stmmac_set_dma_operation_mode(priv,
2022 								      tc,
2023 								      tc,
2024 								      chan);
2025 				else
2026 					stmmac_set_dma_operation_mode(priv,
2027 								    tc,
2028 								    SF_DMA_MODE,
2029 								    chan);
2030 				priv->xstats.threshold = tc;
2031 			}
2032 		} else if (unlikely(status == tx_hard_error)) {
2033 			stmmac_tx_err(priv, chan);
2034 		}
2035 	}
2036 }
2037 
2038 /**
2039  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2040  * @priv: driver private structure
2041  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2042  */
2043 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2044 {
2045 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2046 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2047 
2048 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2049 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2050 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2051 	} else {
2052 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2053 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2054 	}
2055 
2056 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2057 
2058 	if (priv->dma_cap.rmon) {
2059 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2060 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2061 	} else
2062 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2063 }
2064 
2065 /**
2066  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2067  * @priv: driver private structure
2068  * Description: select the Enhanced/Alternate or Normal descriptors.
2069  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2070  * supported by the HW capability register.
2071  */
2072 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2073 {
2074 	if (priv->plat->enh_desc) {
2075 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2076 
2077 		/* GMAC older than 3.50 has no extended descriptors */
2078 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2079 			dev_info(priv->device, "Enabled extended descriptors\n");
2080 			priv->extend_desc = 1;
2081 		} else
2082 			dev_warn(priv->device, "Extended descriptors not supported\n");
2083 
2084 		priv->hw->desc = &enh_desc_ops;
2085 	} else {
2086 		dev_info(priv->device, "Normal descriptors\n");
2087 		priv->hw->desc = &ndesc_ops;
2088 	}
2089 }
2090 
2091 /**
2092  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2093  * @priv: driver private structure
2094  * Description:
2095  *  new GMAC chip generations have a new register to indicate the
2096  *  presence of the optional feature/functions.
2097  *  This can be also used to override the value passed through the
2098  *  platform and necessary for old MAC10/100 and GMAC chips.
2099  */
2100 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2101 {
2102 	u32 ret = 0;
2103 
2104 	if (priv->hw->dma->get_hw_feature) {
2105 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2106 					      &priv->dma_cap);
2107 		ret = 1;
2108 	}
2109 
2110 	return ret;
2111 }
2112 
2113 /**
2114  * stmmac_check_ether_addr - check if the MAC addr is valid
2115  * @priv: driver private structure
2116  * Description:
2117  * it is to verify if the MAC address is valid, in case of failures it
2118  * generates a random MAC address
2119  */
2120 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2121 {
2122 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2123 		priv->hw->mac->get_umac_addr(priv->hw,
2124 					     priv->dev->dev_addr, 0);
2125 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2126 			eth_hw_addr_random(priv->dev);
2127 		netdev_info(priv->dev, "device MAC address %pM\n",
2128 			    priv->dev->dev_addr);
2129 	}
2130 }
2131 
2132 /**
2133  * stmmac_init_dma_engine - DMA init.
2134  * @priv: driver private structure
2135  * Description:
2136  * It inits the DMA invoking the specific MAC/GMAC callback.
2137  * Some DMA parameters can be passed from the platform;
2138  * in case of these are not passed a default is kept for the MAC or GMAC.
2139  */
2140 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2141 {
2142 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2143 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2144 	struct stmmac_rx_queue *rx_q;
2145 	struct stmmac_tx_queue *tx_q;
2146 	u32 dummy_dma_rx_phy = 0;
2147 	u32 dummy_dma_tx_phy = 0;
2148 	u32 chan = 0;
2149 	int atds = 0;
2150 	int ret = 0;
2151 
2152 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2153 		dev_err(priv->device, "Invalid DMA configuration\n");
2154 		return -EINVAL;
2155 	}
2156 
2157 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2158 		atds = 1;
2159 
2160 	ret = priv->hw->dma->reset(priv->ioaddr);
2161 	if (ret) {
2162 		dev_err(priv->device, "Failed to reset the dma\n");
2163 		return ret;
2164 	}
2165 
2166 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2167 		/* DMA Configuration */
2168 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2169 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2170 
2171 		/* DMA RX Channel Configuration */
2172 		for (chan = 0; chan < rx_channels_count; chan++) {
2173 			rx_q = &priv->rx_queue[chan];
2174 
2175 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2176 						    priv->plat->dma_cfg,
2177 						    rx_q->dma_rx_phy, chan);
2178 
2179 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2180 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2181 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2182 						       rx_q->rx_tail_addr,
2183 						       chan);
2184 		}
2185 
2186 		/* DMA TX Channel Configuration */
2187 		for (chan = 0; chan < tx_channels_count; chan++) {
2188 			tx_q = &priv->tx_queue[chan];
2189 
2190 			priv->hw->dma->init_chan(priv->ioaddr,
2191 						 priv->plat->dma_cfg,
2192 						 chan);
2193 
2194 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2195 						    priv->plat->dma_cfg,
2196 						    tx_q->dma_tx_phy, chan);
2197 
2198 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2199 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2200 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2201 						       tx_q->tx_tail_addr,
2202 						       chan);
2203 		}
2204 	} else {
2205 		rx_q = &priv->rx_queue[chan];
2206 		tx_q = &priv->tx_queue[chan];
2207 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2208 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2209 	}
2210 
2211 	if (priv->plat->axi && priv->hw->dma->axi)
2212 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2213 
2214 	return ret;
2215 }
2216 
2217 /**
2218  * stmmac_tx_timer - mitigation sw timer for tx.
2219  * @data: data pointer
2220  * Description:
2221  * This is the timer handler to directly invoke the stmmac_tx_clean.
2222  */
2223 static void stmmac_tx_timer(struct timer_list *t)
2224 {
2225 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2226 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2227 	u32 queue;
2228 
2229 	/* let's scan all the tx queues */
2230 	for (queue = 0; queue < tx_queues_count; queue++)
2231 		stmmac_tx_clean(priv, queue);
2232 }
2233 
2234 /**
2235  * stmmac_init_tx_coalesce - init tx mitigation options.
2236  * @priv: driver private structure
2237  * Description:
2238  * This inits the transmit coalesce parameters: i.e. timer rate,
2239  * timer handler and default threshold used for enabling the
2240  * interrupt on completion bit.
2241  */
2242 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2243 {
2244 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2245 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2246 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2247 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2248 	add_timer(&priv->txtimer);
2249 }
2250 
2251 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2252 {
2253 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2254 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2255 	u32 chan;
2256 
2257 	/* set TX ring length */
2258 	if (priv->hw->dma->set_tx_ring_len) {
2259 		for (chan = 0; chan < tx_channels_count; chan++)
2260 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2261 						       (DMA_TX_SIZE - 1), chan);
2262 	}
2263 
2264 	/* set RX ring length */
2265 	if (priv->hw->dma->set_rx_ring_len) {
2266 		for (chan = 0; chan < rx_channels_count; chan++)
2267 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2268 						       (DMA_RX_SIZE - 1), chan);
2269 	}
2270 }
2271 
2272 /**
2273  *  stmmac_set_tx_queue_weight - Set TX queue weight
2274  *  @priv: driver private structure
2275  *  Description: It is used for setting TX queues weight
2276  */
2277 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2278 {
2279 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2280 	u32 weight;
2281 	u32 queue;
2282 
2283 	for (queue = 0; queue < tx_queues_count; queue++) {
2284 		weight = priv->plat->tx_queues_cfg[queue].weight;
2285 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2286 	}
2287 }
2288 
2289 /**
2290  *  stmmac_configure_cbs - Configure CBS in TX queue
2291  *  @priv: driver private structure
2292  *  Description: It is used for configuring CBS in AVB TX queues
2293  */
2294 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2295 {
2296 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2297 	u32 mode_to_use;
2298 	u32 queue;
2299 
2300 	/* queue 0 is reserved for legacy traffic */
2301 	for (queue = 1; queue < tx_queues_count; queue++) {
2302 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2303 		if (mode_to_use == MTL_QUEUE_DCB)
2304 			continue;
2305 
2306 		priv->hw->mac->config_cbs(priv->hw,
2307 				priv->plat->tx_queues_cfg[queue].send_slope,
2308 				priv->plat->tx_queues_cfg[queue].idle_slope,
2309 				priv->plat->tx_queues_cfg[queue].high_credit,
2310 				priv->plat->tx_queues_cfg[queue].low_credit,
2311 				queue);
2312 	}
2313 }
2314 
2315 /**
2316  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2317  *  @priv: driver private structure
2318  *  Description: It is used for mapping RX queues to RX dma channels
2319  */
2320 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2321 {
2322 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2323 	u32 queue;
2324 	u32 chan;
2325 
2326 	for (queue = 0; queue < rx_queues_count; queue++) {
2327 		chan = priv->plat->rx_queues_cfg[queue].chan;
2328 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2329 	}
2330 }
2331 
2332 /**
2333  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2334  *  @priv: driver private structure
2335  *  Description: It is used for configuring the RX Queue Priority
2336  */
2337 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2338 {
2339 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2340 	u32 queue;
2341 	u32 prio;
2342 
2343 	for (queue = 0; queue < rx_queues_count; queue++) {
2344 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2345 			continue;
2346 
2347 		prio = priv->plat->rx_queues_cfg[queue].prio;
2348 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2349 	}
2350 }
2351 
2352 /**
2353  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2354  *  @priv: driver private structure
2355  *  Description: It is used for configuring the TX Queue Priority
2356  */
2357 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2358 {
2359 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2360 	u32 queue;
2361 	u32 prio;
2362 
2363 	for (queue = 0; queue < tx_queues_count; queue++) {
2364 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2365 			continue;
2366 
2367 		prio = priv->plat->tx_queues_cfg[queue].prio;
2368 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2369 	}
2370 }
2371 
2372 /**
2373  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2374  *  @priv: driver private structure
2375  *  Description: It is used for configuring the RX queue routing
2376  */
2377 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2378 {
2379 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2380 	u32 queue;
2381 	u8 packet;
2382 
2383 	for (queue = 0; queue < rx_queues_count; queue++) {
2384 		/* no specific packet type routing specified for the queue */
2385 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2386 			continue;
2387 
2388 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2389 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2390 	}
2391 }
2392 
2393 /**
2394  *  stmmac_mtl_configuration - Configure MTL
2395  *  @priv: driver private structure
2396  *  Description: It is used for configurring MTL
2397  */
2398 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2399 {
2400 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2401 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402 
2403 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2404 		stmmac_set_tx_queue_weight(priv);
2405 
2406 	/* Configure MTL RX algorithms */
2407 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2408 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2409 						priv->plat->rx_sched_algorithm);
2410 
2411 	/* Configure MTL TX algorithms */
2412 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2413 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2414 						priv->plat->tx_sched_algorithm);
2415 
2416 	/* Configure CBS in AVB TX queues */
2417 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2418 		stmmac_configure_cbs(priv);
2419 
2420 	/* Map RX MTL to DMA channels */
2421 	if (priv->hw->mac->map_mtl_to_dma)
2422 		stmmac_rx_queue_dma_chan_map(priv);
2423 
2424 	/* Enable MAC RX Queues */
2425 	if (priv->hw->mac->rx_queue_enable)
2426 		stmmac_mac_enable_rx_queues(priv);
2427 
2428 	/* Set RX priorities */
2429 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2430 		stmmac_mac_config_rx_queues_prio(priv);
2431 
2432 	/* Set TX priorities */
2433 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2434 		stmmac_mac_config_tx_queues_prio(priv);
2435 
2436 	/* Set RX routing */
2437 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2438 		stmmac_mac_config_rx_queues_routing(priv);
2439 }
2440 
2441 /**
2442  * stmmac_hw_setup - setup mac in a usable state.
2443  *  @dev : pointer to the device structure.
2444  *  Description:
2445  *  this is the main function to setup the HW in a usable state because the
2446  *  dma engine is reset, the core registers are configured (e.g. AXI,
2447  *  Checksum features, timers). The DMA is ready to start receiving and
2448  *  transmitting.
2449  *  Return value:
2450  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2451  *  file on failure.
2452  */
2453 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2454 {
2455 	struct stmmac_priv *priv = netdev_priv(dev);
2456 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2457 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2458 	u32 chan;
2459 	int ret;
2460 
2461 	/* DMA initialization and SW reset */
2462 	ret = stmmac_init_dma_engine(priv);
2463 	if (ret < 0) {
2464 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2465 			   __func__);
2466 		return ret;
2467 	}
2468 
2469 	/* Copy the MAC addr into the HW  */
2470 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2471 
2472 	/* PS and related bits will be programmed according to the speed */
2473 	if (priv->hw->pcs) {
2474 		int speed = priv->plat->mac_port_sel_speed;
2475 
2476 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2477 		    (speed == SPEED_1000)) {
2478 			priv->hw->ps = speed;
2479 		} else {
2480 			dev_warn(priv->device, "invalid port speed\n");
2481 			priv->hw->ps = 0;
2482 		}
2483 	}
2484 
2485 	/* Initialize the MAC Core */
2486 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2487 
2488 	/* Initialize MTL*/
2489 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2490 		stmmac_mtl_configuration(priv);
2491 
2492 	ret = priv->hw->mac->rx_ipc(priv->hw);
2493 	if (!ret) {
2494 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2495 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2496 		priv->hw->rx_csum = 0;
2497 	}
2498 
2499 	/* Enable the MAC Rx/Tx */
2500 	priv->hw->mac->set_mac(priv->ioaddr, true);
2501 
2502 	/* Set the HW DMA mode and the COE */
2503 	stmmac_dma_operation_mode(priv);
2504 
2505 	stmmac_mmc_setup(priv);
2506 
2507 	if (init_ptp) {
2508 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2509 		if (ret < 0)
2510 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2511 
2512 		ret = stmmac_init_ptp(priv);
2513 		if (ret == -EOPNOTSUPP)
2514 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2515 		else if (ret)
2516 			netdev_warn(priv->dev, "PTP init failed\n");
2517 	}
2518 
2519 #ifdef CONFIG_DEBUG_FS
2520 	ret = stmmac_init_fs(dev);
2521 	if (ret < 0)
2522 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2523 			    __func__);
2524 #endif
2525 	/* Start the ball rolling... */
2526 	stmmac_start_all_dma(priv);
2527 
2528 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2529 
2530 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2531 		priv->rx_riwt = MAX_DMA_RIWT;
2532 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2533 	}
2534 
2535 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2536 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2537 
2538 	/* set TX and RX rings length */
2539 	stmmac_set_rings_length(priv);
2540 
2541 	/* Enable TSO */
2542 	if (priv->tso) {
2543 		for (chan = 0; chan < tx_cnt; chan++)
2544 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2545 	}
2546 
2547 	return 0;
2548 }
2549 
2550 static void stmmac_hw_teardown(struct net_device *dev)
2551 {
2552 	struct stmmac_priv *priv = netdev_priv(dev);
2553 
2554 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2555 }
2556 
2557 /**
2558  *  stmmac_open - open entry point of the driver
2559  *  @dev : pointer to the device structure.
2560  *  Description:
2561  *  This function is the open entry point of the driver.
2562  *  Return value:
2563  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2564  *  file on failure.
2565  */
2566 static int stmmac_open(struct net_device *dev)
2567 {
2568 	struct stmmac_priv *priv = netdev_priv(dev);
2569 	int ret;
2570 
2571 	stmmac_check_ether_addr(priv);
2572 
2573 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2574 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2575 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2576 		ret = stmmac_init_phy(dev);
2577 		if (ret) {
2578 			netdev_err(priv->dev,
2579 				   "%s: Cannot attach to PHY (error: %d)\n",
2580 				   __func__, ret);
2581 			return ret;
2582 		}
2583 	}
2584 
2585 	/* Extra statistics */
2586 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2587 	priv->xstats.threshold = tc;
2588 
2589 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2590 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2591 
2592 	ret = alloc_dma_desc_resources(priv);
2593 	if (ret < 0) {
2594 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2595 			   __func__);
2596 		goto dma_desc_error;
2597 	}
2598 
2599 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2600 	if (ret < 0) {
2601 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2602 			   __func__);
2603 		goto init_error;
2604 	}
2605 
2606 	ret = stmmac_hw_setup(dev, true);
2607 	if (ret < 0) {
2608 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2609 		goto init_error;
2610 	}
2611 
2612 	stmmac_init_tx_coalesce(priv);
2613 
2614 	if (dev->phydev)
2615 		phy_start(dev->phydev);
2616 
2617 	/* Request the IRQ lines */
2618 	ret = request_irq(dev->irq, stmmac_interrupt,
2619 			  IRQF_SHARED, dev->name, dev);
2620 	if (unlikely(ret < 0)) {
2621 		netdev_err(priv->dev,
2622 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2623 			   __func__, dev->irq, ret);
2624 		goto irq_error;
2625 	}
2626 
2627 	/* Request the Wake IRQ in case of another line is used for WoL */
2628 	if (priv->wol_irq != dev->irq) {
2629 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2630 				  IRQF_SHARED, dev->name, dev);
2631 		if (unlikely(ret < 0)) {
2632 			netdev_err(priv->dev,
2633 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2634 				   __func__, priv->wol_irq, ret);
2635 			goto wolirq_error;
2636 		}
2637 	}
2638 
2639 	/* Request the IRQ lines */
2640 	if (priv->lpi_irq > 0) {
2641 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2642 				  dev->name, dev);
2643 		if (unlikely(ret < 0)) {
2644 			netdev_err(priv->dev,
2645 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2646 				   __func__, priv->lpi_irq, ret);
2647 			goto lpiirq_error;
2648 		}
2649 	}
2650 
2651 	stmmac_enable_all_queues(priv);
2652 	stmmac_start_all_queues(priv);
2653 
2654 	return 0;
2655 
2656 lpiirq_error:
2657 	if (priv->wol_irq != dev->irq)
2658 		free_irq(priv->wol_irq, dev);
2659 wolirq_error:
2660 	free_irq(dev->irq, dev);
2661 irq_error:
2662 	if (dev->phydev)
2663 		phy_stop(dev->phydev);
2664 
2665 	del_timer_sync(&priv->txtimer);
2666 	stmmac_hw_teardown(dev);
2667 init_error:
2668 	free_dma_desc_resources(priv);
2669 dma_desc_error:
2670 	if (dev->phydev)
2671 		phy_disconnect(dev->phydev);
2672 
2673 	return ret;
2674 }
2675 
2676 /**
2677  *  stmmac_release - close entry point of the driver
2678  *  @dev : device pointer.
2679  *  Description:
2680  *  This is the stop entry point of the driver.
2681  */
2682 static int stmmac_release(struct net_device *dev)
2683 {
2684 	struct stmmac_priv *priv = netdev_priv(dev);
2685 
2686 	if (priv->eee_enabled)
2687 		del_timer_sync(&priv->eee_ctrl_timer);
2688 
2689 	/* Stop and disconnect the PHY */
2690 	if (dev->phydev) {
2691 		phy_stop(dev->phydev);
2692 		phy_disconnect(dev->phydev);
2693 	}
2694 
2695 	stmmac_stop_all_queues(priv);
2696 
2697 	stmmac_disable_all_queues(priv);
2698 
2699 	del_timer_sync(&priv->txtimer);
2700 
2701 	/* Free the IRQ lines */
2702 	free_irq(dev->irq, dev);
2703 	if (priv->wol_irq != dev->irq)
2704 		free_irq(priv->wol_irq, dev);
2705 	if (priv->lpi_irq > 0)
2706 		free_irq(priv->lpi_irq, dev);
2707 
2708 	/* Stop TX/RX DMA and clear the descriptors */
2709 	stmmac_stop_all_dma(priv);
2710 
2711 	/* Release and free the Rx/Tx resources */
2712 	free_dma_desc_resources(priv);
2713 
2714 	/* Disable the MAC Rx/Tx */
2715 	priv->hw->mac->set_mac(priv->ioaddr, false);
2716 
2717 	netif_carrier_off(dev);
2718 
2719 #ifdef CONFIG_DEBUG_FS
2720 	stmmac_exit_fs(dev);
2721 #endif
2722 
2723 	stmmac_release_ptp(priv);
2724 
2725 	return 0;
2726 }
2727 
2728 /**
2729  *  stmmac_tso_allocator - close entry point of the driver
2730  *  @priv: driver private structure
2731  *  @des: buffer start address
2732  *  @total_len: total length to fill in descriptors
2733  *  @last_segmant: condition for the last descriptor
2734  *  @queue: TX queue index
2735  *  Description:
2736  *  This function fills descriptor and request new descriptors according to
2737  *  buffer length to fill
2738  */
2739 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2740 				 int total_len, bool last_segment, u32 queue)
2741 {
2742 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2743 	struct dma_desc *desc;
2744 	u32 buff_size;
2745 	int tmp_len;
2746 
2747 	tmp_len = total_len;
2748 
2749 	while (tmp_len > 0) {
2750 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2751 		desc = tx_q->dma_tx + tx_q->cur_tx;
2752 
2753 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2754 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2755 			    TSO_MAX_BUFF_SIZE : tmp_len;
2756 
2757 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2758 			0, 1,
2759 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2760 			0, 0);
2761 
2762 		tmp_len -= TSO_MAX_BUFF_SIZE;
2763 	}
2764 }
2765 
2766 /**
2767  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2768  *  @skb : the socket buffer
2769  *  @dev : device pointer
2770  *  Description: this is the transmit function that is called on TSO frames
2771  *  (support available on GMAC4 and newer chips).
2772  *  Diagram below show the ring programming in case of TSO frames:
2773  *
2774  *  First Descriptor
2775  *   --------
2776  *   | DES0 |---> buffer1 = L2/L3/L4 header
2777  *   | DES1 |---> TCP Payload (can continue on next descr...)
2778  *   | DES2 |---> buffer 1 and 2 len
2779  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2780  *   --------
2781  *	|
2782  *     ...
2783  *	|
2784  *   --------
2785  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2786  *   | DES1 | --|
2787  *   | DES2 | --> buffer 1 and 2 len
2788  *   | DES3 |
2789  *   --------
2790  *
2791  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2792  */
2793 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2794 {
2795 	struct dma_desc *desc, *first, *mss_desc = NULL;
2796 	struct stmmac_priv *priv = netdev_priv(dev);
2797 	int nfrags = skb_shinfo(skb)->nr_frags;
2798 	u32 queue = skb_get_queue_mapping(skb);
2799 	unsigned int first_entry, des;
2800 	struct stmmac_tx_queue *tx_q;
2801 	int tmp_pay_len = 0;
2802 	u32 pay_len, mss;
2803 	u8 proto_hdr_len;
2804 	int i;
2805 
2806 	tx_q = &priv->tx_queue[queue];
2807 
2808 	/* Compute header lengths */
2809 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2810 
2811 	/* Desc availability based on threshold should be enough safe */
2812 	if (unlikely(stmmac_tx_avail(priv, queue) <
2813 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2814 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2815 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2816 								queue));
2817 			/* This is a hard error, log it. */
2818 			netdev_err(priv->dev,
2819 				   "%s: Tx Ring full when queue awake\n",
2820 				   __func__);
2821 		}
2822 		return NETDEV_TX_BUSY;
2823 	}
2824 
2825 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2826 
2827 	mss = skb_shinfo(skb)->gso_size;
2828 
2829 	/* set new MSS value if needed */
2830 	if (mss != priv->mss) {
2831 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2832 		priv->hw->desc->set_mss(mss_desc, mss);
2833 		priv->mss = mss;
2834 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2835 	}
2836 
2837 	if (netif_msg_tx_queued(priv)) {
2838 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2839 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2840 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2841 			skb->data_len);
2842 	}
2843 
2844 	first_entry = tx_q->cur_tx;
2845 
2846 	desc = tx_q->dma_tx + first_entry;
2847 	first = desc;
2848 
2849 	/* first descriptor: fill Headers on Buf1 */
2850 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2851 			     DMA_TO_DEVICE);
2852 	if (dma_mapping_error(priv->device, des))
2853 		goto dma_map_err;
2854 
2855 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2856 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2857 
2858 	first->des0 = cpu_to_le32(des);
2859 
2860 	/* Fill start of payload in buff2 of first descriptor */
2861 	if (pay_len)
2862 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2863 
2864 	/* If needed take extra descriptors to fill the remaining payload */
2865 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2866 
2867 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2868 
2869 	/* Prepare fragments */
2870 	for (i = 0; i < nfrags; i++) {
2871 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2872 
2873 		des = skb_frag_dma_map(priv->device, frag, 0,
2874 				       skb_frag_size(frag),
2875 				       DMA_TO_DEVICE);
2876 		if (dma_mapping_error(priv->device, des))
2877 			goto dma_map_err;
2878 
2879 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2880 				     (i == nfrags - 1), queue);
2881 
2882 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2883 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2884 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2885 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2886 	}
2887 
2888 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2889 
2890 	/* Only the last descriptor gets to point to the skb. */
2891 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2892 
2893 	/* We've used all descriptors we need for this skb, however,
2894 	 * advance cur_tx so that it references a fresh descriptor.
2895 	 * ndo_start_xmit will fill this descriptor the next time it's
2896 	 * called and stmmac_tx_clean may clean up to this descriptor.
2897 	 */
2898 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2899 
2900 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2901 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2902 			  __func__);
2903 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2904 	}
2905 
2906 	dev->stats.tx_bytes += skb->len;
2907 	priv->xstats.tx_tso_frames++;
2908 	priv->xstats.tx_tso_nfrags += nfrags;
2909 
2910 	/* Manage tx mitigation */
2911 	priv->tx_count_frames += nfrags + 1;
2912 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2913 		mod_timer(&priv->txtimer,
2914 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2915 	} else {
2916 		priv->tx_count_frames = 0;
2917 		priv->hw->desc->set_tx_ic(desc);
2918 		priv->xstats.tx_set_ic_bit++;
2919 	}
2920 
2921 	skb_tx_timestamp(skb);
2922 
2923 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2924 		     priv->hwts_tx_en)) {
2925 		/* declare that device is doing timestamping */
2926 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2927 		priv->hw->desc->enable_tx_timestamp(first);
2928 	}
2929 
2930 	/* Complete the first descriptor before granting the DMA */
2931 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2932 			proto_hdr_len,
2933 			pay_len,
2934 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2935 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2936 
2937 	/* If context desc is used to change MSS */
2938 	if (mss_desc)
2939 		priv->hw->desc->set_tx_owner(mss_desc);
2940 
2941 	/* The own bit must be the latest setting done when prepare the
2942 	 * descriptor and then barrier is needed to make sure that
2943 	 * all is coherent before granting the DMA engine.
2944 	 */
2945 	dma_wmb();
2946 
2947 	if (netif_msg_pktdata(priv)) {
2948 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2949 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2950 			tx_q->cur_tx, first, nfrags);
2951 
2952 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2953 					     0);
2954 
2955 		pr_info(">>> frame to be transmitted: ");
2956 		print_pkt(skb->data, skb_headlen(skb));
2957 	}
2958 
2959 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2960 
2961 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2962 				       queue);
2963 
2964 	return NETDEV_TX_OK;
2965 
2966 dma_map_err:
2967 	dev_err(priv->device, "Tx dma map failed\n");
2968 	dev_kfree_skb(skb);
2969 	priv->dev->stats.tx_dropped++;
2970 	return NETDEV_TX_OK;
2971 }
2972 
2973 /**
2974  *  stmmac_xmit - Tx entry point of the driver
2975  *  @skb : the socket buffer
2976  *  @dev : device pointer
2977  *  Description : this is the tx entry point of the driver.
2978  *  It programs the chain or the ring and supports oversized frames
2979  *  and SG feature.
2980  */
2981 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2982 {
2983 	struct stmmac_priv *priv = netdev_priv(dev);
2984 	unsigned int nopaged_len = skb_headlen(skb);
2985 	int i, csum_insertion = 0, is_jumbo = 0;
2986 	u32 queue = skb_get_queue_mapping(skb);
2987 	int nfrags = skb_shinfo(skb)->nr_frags;
2988 	int entry;
2989 	unsigned int first_entry;
2990 	struct dma_desc *desc, *first;
2991 	struct stmmac_tx_queue *tx_q;
2992 	unsigned int enh_desc;
2993 	unsigned int des;
2994 
2995 	tx_q = &priv->tx_queue[queue];
2996 
2997 	/* Manage oversized TCP frames for GMAC4 device */
2998 	if (skb_is_gso(skb) && priv->tso) {
2999 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3000 			return stmmac_tso_xmit(skb, dev);
3001 	}
3002 
3003 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3004 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3005 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3006 								queue));
3007 			/* This is a hard error, log it. */
3008 			netdev_err(priv->dev,
3009 				   "%s: Tx Ring full when queue awake\n",
3010 				   __func__);
3011 		}
3012 		return NETDEV_TX_BUSY;
3013 	}
3014 
3015 	if (priv->tx_path_in_lpi_mode)
3016 		stmmac_disable_eee_mode(priv);
3017 
3018 	entry = tx_q->cur_tx;
3019 	first_entry = entry;
3020 
3021 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3022 
3023 	if (likely(priv->extend_desc))
3024 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3025 	else
3026 		desc = tx_q->dma_tx + entry;
3027 
3028 	first = desc;
3029 
3030 	enh_desc = priv->plat->enh_desc;
3031 	/* To program the descriptors according to the size of the frame */
3032 	if (enh_desc)
3033 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3034 
3035 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3036 					 DWMAC_CORE_4_00)) {
3037 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3038 		if (unlikely(entry < 0))
3039 			goto dma_map_err;
3040 	}
3041 
3042 	for (i = 0; i < nfrags; i++) {
3043 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3044 		int len = skb_frag_size(frag);
3045 		bool last_segment = (i == (nfrags - 1));
3046 
3047 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3048 
3049 		if (likely(priv->extend_desc))
3050 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3051 		else
3052 			desc = tx_q->dma_tx + entry;
3053 
3054 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3055 				       DMA_TO_DEVICE);
3056 		if (dma_mapping_error(priv->device, des))
3057 			goto dma_map_err; /* should reuse desc w/o issues */
3058 
3059 		tx_q->tx_skbuff[entry] = NULL;
3060 
3061 		tx_q->tx_skbuff_dma[entry].buf = des;
3062 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3063 			desc->des0 = cpu_to_le32(des);
3064 		else
3065 			desc->des2 = cpu_to_le32(des);
3066 
3067 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3068 		tx_q->tx_skbuff_dma[entry].len = len;
3069 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3070 
3071 		/* Prepare the descriptor and set the own bit too */
3072 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3073 						priv->mode, 1, last_segment,
3074 						skb->len);
3075 	}
3076 
3077 	/* Only the last descriptor gets to point to the skb. */
3078 	tx_q->tx_skbuff[entry] = skb;
3079 
3080 	/* We've used all descriptors we need for this skb, however,
3081 	 * advance cur_tx so that it references a fresh descriptor.
3082 	 * ndo_start_xmit will fill this descriptor the next time it's
3083 	 * called and stmmac_tx_clean may clean up to this descriptor.
3084 	 */
3085 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3086 	tx_q->cur_tx = entry;
3087 
3088 	if (netif_msg_pktdata(priv)) {
3089 		void *tx_head;
3090 
3091 		netdev_dbg(priv->dev,
3092 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3093 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3094 			   entry, first, nfrags);
3095 
3096 		if (priv->extend_desc)
3097 			tx_head = (void *)tx_q->dma_etx;
3098 		else
3099 			tx_head = (void *)tx_q->dma_tx;
3100 
3101 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3102 
3103 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3104 		print_pkt(skb->data, skb->len);
3105 	}
3106 
3107 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3108 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3109 			  __func__);
3110 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3111 	}
3112 
3113 	dev->stats.tx_bytes += skb->len;
3114 
3115 	/* According to the coalesce parameter the IC bit for the latest
3116 	 * segment is reset and the timer re-started to clean the tx status.
3117 	 * This approach takes care about the fragments: desc is the first
3118 	 * element in case of no SG.
3119 	 */
3120 	priv->tx_count_frames += nfrags + 1;
3121 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3122 		mod_timer(&priv->txtimer,
3123 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3124 	} else {
3125 		priv->tx_count_frames = 0;
3126 		priv->hw->desc->set_tx_ic(desc);
3127 		priv->xstats.tx_set_ic_bit++;
3128 	}
3129 
3130 	skb_tx_timestamp(skb);
3131 
3132 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3133 	 * problems because all the descriptors are actually ready to be
3134 	 * passed to the DMA engine.
3135 	 */
3136 	if (likely(!is_jumbo)) {
3137 		bool last_segment = (nfrags == 0);
3138 
3139 		des = dma_map_single(priv->device, skb->data,
3140 				     nopaged_len, DMA_TO_DEVICE);
3141 		if (dma_mapping_error(priv->device, des))
3142 			goto dma_map_err;
3143 
3144 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3145 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3146 			first->des0 = cpu_to_le32(des);
3147 		else
3148 			first->des2 = cpu_to_le32(des);
3149 
3150 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3151 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3152 
3153 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3154 			     priv->hwts_tx_en)) {
3155 			/* declare that device is doing timestamping */
3156 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3157 			priv->hw->desc->enable_tx_timestamp(first);
3158 		}
3159 
3160 		/* Prepare the first descriptor setting the OWN bit too */
3161 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3162 						csum_insertion, priv->mode, 1,
3163 						last_segment, skb->len);
3164 
3165 		/* The own bit must be the latest setting done when prepare the
3166 		 * descriptor and then barrier is needed to make sure that
3167 		 * all is coherent before granting the DMA engine.
3168 		 */
3169 		dma_wmb();
3170 	}
3171 
3172 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3173 
3174 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3175 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3176 	else
3177 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3178 					       queue);
3179 
3180 	return NETDEV_TX_OK;
3181 
3182 dma_map_err:
3183 	netdev_err(priv->dev, "Tx DMA map failed\n");
3184 	dev_kfree_skb(skb);
3185 	priv->dev->stats.tx_dropped++;
3186 	return NETDEV_TX_OK;
3187 }
3188 
3189 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3190 {
3191 	struct ethhdr *ehdr;
3192 	u16 vlanid;
3193 
3194 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3195 	    NETIF_F_HW_VLAN_CTAG_RX &&
3196 	    !__vlan_get_tag(skb, &vlanid)) {
3197 		/* pop the vlan tag */
3198 		ehdr = (struct ethhdr *)skb->data;
3199 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3200 		skb_pull(skb, VLAN_HLEN);
3201 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3202 	}
3203 }
3204 
3205 
3206 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3207 {
3208 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3209 		return 0;
3210 
3211 	return 1;
3212 }
3213 
3214 /**
3215  * stmmac_rx_refill - refill used skb preallocated buffers
3216  * @priv: driver private structure
3217  * @queue: RX queue index
3218  * Description : this is to reallocate the skb for the reception process
3219  * that is based on zero-copy.
3220  */
3221 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3222 {
3223 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3224 	int dirty = stmmac_rx_dirty(priv, queue);
3225 	unsigned int entry = rx_q->dirty_rx;
3226 
3227 	int bfsize = priv->dma_buf_sz;
3228 
3229 	while (dirty-- > 0) {
3230 		struct dma_desc *p;
3231 
3232 		if (priv->extend_desc)
3233 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3234 		else
3235 			p = rx_q->dma_rx + entry;
3236 
3237 		if (likely(!rx_q->rx_skbuff[entry])) {
3238 			struct sk_buff *skb;
3239 
3240 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3241 			if (unlikely(!skb)) {
3242 				/* so for a while no zero-copy! */
3243 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3244 				if (unlikely(net_ratelimit()))
3245 					dev_err(priv->device,
3246 						"fail to alloc skb entry %d\n",
3247 						entry);
3248 				break;
3249 			}
3250 
3251 			rx_q->rx_skbuff[entry] = skb;
3252 			rx_q->rx_skbuff_dma[entry] =
3253 			    dma_map_single(priv->device, skb->data, bfsize,
3254 					   DMA_FROM_DEVICE);
3255 			if (dma_mapping_error(priv->device,
3256 					      rx_q->rx_skbuff_dma[entry])) {
3257 				netdev_err(priv->dev, "Rx DMA map failed\n");
3258 				dev_kfree_skb(skb);
3259 				break;
3260 			}
3261 
3262 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3263 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3264 				p->des1 = 0;
3265 			} else {
3266 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3267 			}
3268 			if (priv->hw->mode->refill_desc3)
3269 				priv->hw->mode->refill_desc3(rx_q, p);
3270 
3271 			if (rx_q->rx_zeroc_thresh > 0)
3272 				rx_q->rx_zeroc_thresh--;
3273 
3274 			netif_dbg(priv, rx_status, priv->dev,
3275 				  "refill entry #%d\n", entry);
3276 		}
3277 		dma_wmb();
3278 
3279 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3280 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3281 		else
3282 			priv->hw->desc->set_rx_owner(p);
3283 
3284 		dma_wmb();
3285 
3286 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3287 	}
3288 	rx_q->dirty_rx = entry;
3289 }
3290 
3291 /**
3292  * stmmac_rx - manage the receive process
3293  * @priv: driver private structure
3294  * @limit: napi bugget
3295  * @queue: RX queue index.
3296  * Description :  this the function called by the napi poll method.
3297  * It gets all the frames inside the ring.
3298  */
3299 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3300 {
3301 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3302 	unsigned int entry = rx_q->cur_rx;
3303 	int coe = priv->hw->rx_csum;
3304 	unsigned int next_entry;
3305 	unsigned int count = 0;
3306 
3307 	if (netif_msg_rx_status(priv)) {
3308 		void *rx_head;
3309 
3310 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3311 		if (priv->extend_desc)
3312 			rx_head = (void *)rx_q->dma_erx;
3313 		else
3314 			rx_head = (void *)rx_q->dma_rx;
3315 
3316 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3317 	}
3318 	while (count < limit) {
3319 		int status;
3320 		struct dma_desc *p;
3321 		struct dma_desc *np;
3322 
3323 		if (priv->extend_desc)
3324 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3325 		else
3326 			p = rx_q->dma_rx + entry;
3327 
3328 		/* read the status of the incoming frame */
3329 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3330 						   &priv->xstats, p);
3331 		/* check if managed by the DMA otherwise go ahead */
3332 		if (unlikely(status & dma_own))
3333 			break;
3334 
3335 		count++;
3336 
3337 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3338 		next_entry = rx_q->cur_rx;
3339 
3340 		if (priv->extend_desc)
3341 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3342 		else
3343 			np = rx_q->dma_rx + next_entry;
3344 
3345 		prefetch(np);
3346 
3347 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3348 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3349 							   &priv->xstats,
3350 							   rx_q->dma_erx +
3351 							   entry);
3352 		if (unlikely(status == discard_frame)) {
3353 			priv->dev->stats.rx_errors++;
3354 			if (priv->hwts_rx_en && !priv->extend_desc) {
3355 				/* DESC2 & DESC3 will be overwritten by device
3356 				 * with timestamp value, hence reinitialize
3357 				 * them in stmmac_rx_refill() function so that
3358 				 * device can reuse it.
3359 				 */
3360 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3361 				rx_q->rx_skbuff[entry] = NULL;
3362 				dma_unmap_single(priv->device,
3363 						 rx_q->rx_skbuff_dma[entry],
3364 						 priv->dma_buf_sz,
3365 						 DMA_FROM_DEVICE);
3366 			}
3367 		} else {
3368 			struct sk_buff *skb;
3369 			int frame_len;
3370 			unsigned int des;
3371 
3372 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3373 				des = le32_to_cpu(p->des0);
3374 			else
3375 				des = le32_to_cpu(p->des2);
3376 
3377 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3378 
3379 			/*  If frame length is greater than skb buffer size
3380 			 *  (preallocated during init) then the packet is
3381 			 *  ignored
3382 			 */
3383 			if (frame_len > priv->dma_buf_sz) {
3384 				netdev_err(priv->dev,
3385 					   "len %d larger than size (%d)\n",
3386 					   frame_len, priv->dma_buf_sz);
3387 				priv->dev->stats.rx_length_errors++;
3388 				break;
3389 			}
3390 
3391 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3392 			 * Type frames (LLC/LLC-SNAP)
3393 			 */
3394 			if (unlikely(status != llc_snap))
3395 				frame_len -= ETH_FCS_LEN;
3396 
3397 			if (netif_msg_rx_status(priv)) {
3398 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3399 					   p, entry, des);
3400 				if (frame_len > ETH_FRAME_LEN)
3401 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3402 						   frame_len, status);
3403 			}
3404 
3405 			/* The zero-copy is always used for all the sizes
3406 			 * in case of GMAC4 because it needs
3407 			 * to refill the used descriptors, always.
3408 			 */
3409 			if (unlikely(!priv->plat->has_gmac4 &&
3410 				     ((frame_len < priv->rx_copybreak) ||
3411 				     stmmac_rx_threshold_count(rx_q)))) {
3412 				skb = netdev_alloc_skb_ip_align(priv->dev,
3413 								frame_len);
3414 				if (unlikely(!skb)) {
3415 					if (net_ratelimit())
3416 						dev_warn(priv->device,
3417 							 "packet dropped\n");
3418 					priv->dev->stats.rx_dropped++;
3419 					break;
3420 				}
3421 
3422 				dma_sync_single_for_cpu(priv->device,
3423 							rx_q->rx_skbuff_dma
3424 							[entry], frame_len,
3425 							DMA_FROM_DEVICE);
3426 				skb_copy_to_linear_data(skb,
3427 							rx_q->
3428 							rx_skbuff[entry]->data,
3429 							frame_len);
3430 
3431 				skb_put(skb, frame_len);
3432 				dma_sync_single_for_device(priv->device,
3433 							   rx_q->rx_skbuff_dma
3434 							   [entry], frame_len,
3435 							   DMA_FROM_DEVICE);
3436 			} else {
3437 				skb = rx_q->rx_skbuff[entry];
3438 				if (unlikely(!skb)) {
3439 					netdev_err(priv->dev,
3440 						   "%s: Inconsistent Rx chain\n",
3441 						   priv->dev->name);
3442 					priv->dev->stats.rx_dropped++;
3443 					break;
3444 				}
3445 				prefetch(skb->data - NET_IP_ALIGN);
3446 				rx_q->rx_skbuff[entry] = NULL;
3447 				rx_q->rx_zeroc_thresh++;
3448 
3449 				skb_put(skb, frame_len);
3450 				dma_unmap_single(priv->device,
3451 						 rx_q->rx_skbuff_dma[entry],
3452 						 priv->dma_buf_sz,
3453 						 DMA_FROM_DEVICE);
3454 			}
3455 
3456 			if (netif_msg_pktdata(priv)) {
3457 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3458 					   frame_len);
3459 				print_pkt(skb->data, frame_len);
3460 			}
3461 
3462 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3463 
3464 			stmmac_rx_vlan(priv->dev, skb);
3465 
3466 			skb->protocol = eth_type_trans(skb, priv->dev);
3467 
3468 			if (unlikely(!coe))
3469 				skb_checksum_none_assert(skb);
3470 			else
3471 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3472 
3473 			napi_gro_receive(&rx_q->napi, skb);
3474 
3475 			priv->dev->stats.rx_packets++;
3476 			priv->dev->stats.rx_bytes += frame_len;
3477 		}
3478 		entry = next_entry;
3479 	}
3480 
3481 	stmmac_rx_refill(priv, queue);
3482 
3483 	priv->xstats.rx_pkt_n += count;
3484 
3485 	return count;
3486 }
3487 
3488 /**
3489  *  stmmac_poll - stmmac poll method (NAPI)
3490  *  @napi : pointer to the napi structure.
3491  *  @budget : maximum number of packets that the current CPU can receive from
3492  *	      all interfaces.
3493  *  Description :
3494  *  To look at the incoming frames and clear the tx resources.
3495  */
3496 static int stmmac_poll(struct napi_struct *napi, int budget)
3497 {
3498 	struct stmmac_rx_queue *rx_q =
3499 		container_of(napi, struct stmmac_rx_queue, napi);
3500 	struct stmmac_priv *priv = rx_q->priv_data;
3501 	u32 tx_count = priv->plat->tx_queues_to_use;
3502 	u32 chan = rx_q->queue_index;
3503 	int work_done = 0;
3504 	u32 queue;
3505 
3506 	priv->xstats.napi_poll++;
3507 
3508 	/* check all the queues */
3509 	for (queue = 0; queue < tx_count; queue++)
3510 		stmmac_tx_clean(priv, queue);
3511 
3512 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3513 	if (work_done < budget) {
3514 		napi_complete_done(napi, work_done);
3515 		stmmac_enable_dma_irq(priv, chan);
3516 	}
3517 	return work_done;
3518 }
3519 
3520 /**
3521  *  stmmac_tx_timeout
3522  *  @dev : Pointer to net device structure
3523  *  Description: this function is called when a packet transmission fails to
3524  *   complete within a reasonable time. The driver will mark the error in the
3525  *   netdev structure and arrange for the device to be reset to a sane state
3526  *   in order to transmit a new packet.
3527  */
3528 static void stmmac_tx_timeout(struct net_device *dev)
3529 {
3530 	struct stmmac_priv *priv = netdev_priv(dev);
3531 	u32 tx_count = priv->plat->tx_queues_to_use;
3532 	u32 chan;
3533 
3534 	/* Clear Tx resources and restart transmitting again */
3535 	for (chan = 0; chan < tx_count; chan++)
3536 		stmmac_tx_err(priv, chan);
3537 }
3538 
3539 /**
3540  *  stmmac_set_rx_mode - entry point for multicast addressing
3541  *  @dev : pointer to the device structure
3542  *  Description:
3543  *  This function is a driver entry point which gets called by the kernel
3544  *  whenever multicast addresses must be enabled/disabled.
3545  *  Return value:
3546  *  void.
3547  */
3548 static void stmmac_set_rx_mode(struct net_device *dev)
3549 {
3550 	struct stmmac_priv *priv = netdev_priv(dev);
3551 
3552 	priv->hw->mac->set_filter(priv->hw, dev);
3553 }
3554 
3555 /**
3556  *  stmmac_change_mtu - entry point to change MTU size for the device.
3557  *  @dev : device pointer.
3558  *  @new_mtu : the new MTU size for the device.
3559  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3560  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3561  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3562  *  Return value:
3563  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3564  *  file on failure.
3565  */
3566 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3567 {
3568 	struct stmmac_priv *priv = netdev_priv(dev);
3569 
3570 	if (netif_running(dev)) {
3571 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3572 		return -EBUSY;
3573 	}
3574 
3575 	dev->mtu = new_mtu;
3576 
3577 	netdev_update_features(dev);
3578 
3579 	return 0;
3580 }
3581 
3582 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3583 					     netdev_features_t features)
3584 {
3585 	struct stmmac_priv *priv = netdev_priv(dev);
3586 
3587 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3588 		features &= ~NETIF_F_RXCSUM;
3589 
3590 	if (!priv->plat->tx_coe)
3591 		features &= ~NETIF_F_CSUM_MASK;
3592 
3593 	/* Some GMAC devices have a bugged Jumbo frame support that
3594 	 * needs to have the Tx COE disabled for oversized frames
3595 	 * (due to limited buffer sizes). In this case we disable
3596 	 * the TX csum insertion in the TDES and not use SF.
3597 	 */
3598 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3599 		features &= ~NETIF_F_CSUM_MASK;
3600 
3601 	/* Disable tso if asked by ethtool */
3602 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3603 		if (features & NETIF_F_TSO)
3604 			priv->tso = true;
3605 		else
3606 			priv->tso = false;
3607 	}
3608 
3609 	return features;
3610 }
3611 
3612 static int stmmac_set_features(struct net_device *netdev,
3613 			       netdev_features_t features)
3614 {
3615 	struct stmmac_priv *priv = netdev_priv(netdev);
3616 
3617 	/* Keep the COE Type in case of csum is supporting */
3618 	if (features & NETIF_F_RXCSUM)
3619 		priv->hw->rx_csum = priv->plat->rx_coe;
3620 	else
3621 		priv->hw->rx_csum = 0;
3622 	/* No check needed because rx_coe has been set before and it will be
3623 	 * fixed in case of issue.
3624 	 */
3625 	priv->hw->mac->rx_ipc(priv->hw);
3626 
3627 	return 0;
3628 }
3629 
3630 /**
3631  *  stmmac_interrupt - main ISR
3632  *  @irq: interrupt number.
3633  *  @dev_id: to pass the net device pointer.
3634  *  Description: this is the main driver interrupt service routine.
3635  *  It can call:
3636  *  o DMA service routine (to manage incoming frame reception and transmission
3637  *    status)
3638  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3639  *    interrupts.
3640  */
3641 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3642 {
3643 	struct net_device *dev = (struct net_device *)dev_id;
3644 	struct stmmac_priv *priv = netdev_priv(dev);
3645 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3646 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3647 	u32 queues_count;
3648 	u32 queue;
3649 
3650 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3651 
3652 	if (priv->irq_wake)
3653 		pm_wakeup_event(priv->device, 0);
3654 
3655 	if (unlikely(!dev)) {
3656 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3657 		return IRQ_NONE;
3658 	}
3659 
3660 	/* To handle GMAC own interrupts */
3661 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3662 		int status = priv->hw->mac->host_irq_status(priv->hw,
3663 							    &priv->xstats);
3664 
3665 		if (unlikely(status)) {
3666 			/* For LPI we need to save the tx status */
3667 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3668 				priv->tx_path_in_lpi_mode = true;
3669 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3670 				priv->tx_path_in_lpi_mode = false;
3671 		}
3672 
3673 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3674 			for (queue = 0; queue < queues_count; queue++) {
3675 				struct stmmac_rx_queue *rx_q =
3676 				&priv->rx_queue[queue];
3677 
3678 				status |=
3679 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3680 								   queue);
3681 
3682 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3683 				    priv->hw->dma->set_rx_tail_ptr)
3684 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3685 								rx_q->rx_tail_addr,
3686 								queue);
3687 			}
3688 		}
3689 
3690 		/* PCS link status */
3691 		if (priv->hw->pcs) {
3692 			if (priv->xstats.pcs_link)
3693 				netif_carrier_on(dev);
3694 			else
3695 				netif_carrier_off(dev);
3696 		}
3697 	}
3698 
3699 	/* To handle DMA interrupts */
3700 	stmmac_dma_interrupt(priv);
3701 
3702 	return IRQ_HANDLED;
3703 }
3704 
3705 #ifdef CONFIG_NET_POLL_CONTROLLER
3706 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3707  * to allow network I/O with interrupts disabled.
3708  */
3709 static void stmmac_poll_controller(struct net_device *dev)
3710 {
3711 	disable_irq(dev->irq);
3712 	stmmac_interrupt(dev->irq, dev);
3713 	enable_irq(dev->irq);
3714 }
3715 #endif
3716 
3717 /**
3718  *  stmmac_ioctl - Entry point for the Ioctl
3719  *  @dev: Device pointer.
3720  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3721  *  a proprietary structure used to pass information to the driver.
3722  *  @cmd: IOCTL command
3723  *  Description:
3724  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3725  */
3726 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3727 {
3728 	int ret = -EOPNOTSUPP;
3729 
3730 	if (!netif_running(dev))
3731 		return -EINVAL;
3732 
3733 	switch (cmd) {
3734 	case SIOCGMIIPHY:
3735 	case SIOCGMIIREG:
3736 	case SIOCSMIIREG:
3737 		if (!dev->phydev)
3738 			return -EINVAL;
3739 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3740 		break;
3741 	case SIOCSHWTSTAMP:
3742 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3743 		break;
3744 	default:
3745 		break;
3746 	}
3747 
3748 	return ret;
3749 }
3750 
3751 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3752 {
3753 	struct stmmac_priv *priv = netdev_priv(ndev);
3754 	int ret = 0;
3755 
3756 	ret = eth_mac_addr(ndev, addr);
3757 	if (ret)
3758 		return ret;
3759 
3760 	priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3761 
3762 	return ret;
3763 }
3764 
3765 #ifdef CONFIG_DEBUG_FS
3766 static struct dentry *stmmac_fs_dir;
3767 
3768 static void sysfs_display_ring(void *head, int size, int extend_desc,
3769 			       struct seq_file *seq)
3770 {
3771 	int i;
3772 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3773 	struct dma_desc *p = (struct dma_desc *)head;
3774 
3775 	for (i = 0; i < size; i++) {
3776 		if (extend_desc) {
3777 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3778 				   i, (unsigned int)virt_to_phys(ep),
3779 				   le32_to_cpu(ep->basic.des0),
3780 				   le32_to_cpu(ep->basic.des1),
3781 				   le32_to_cpu(ep->basic.des2),
3782 				   le32_to_cpu(ep->basic.des3));
3783 			ep++;
3784 		} else {
3785 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3786 				   i, (unsigned int)virt_to_phys(p),
3787 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3788 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3789 			p++;
3790 		}
3791 		seq_printf(seq, "\n");
3792 	}
3793 }
3794 
3795 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3796 {
3797 	struct net_device *dev = seq->private;
3798 	struct stmmac_priv *priv = netdev_priv(dev);
3799 	u32 rx_count = priv->plat->rx_queues_to_use;
3800 	u32 tx_count = priv->plat->tx_queues_to_use;
3801 	u32 queue;
3802 
3803 	for (queue = 0; queue < rx_count; queue++) {
3804 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3805 
3806 		seq_printf(seq, "RX Queue %d:\n", queue);
3807 
3808 		if (priv->extend_desc) {
3809 			seq_printf(seq, "Extended descriptor ring:\n");
3810 			sysfs_display_ring((void *)rx_q->dma_erx,
3811 					   DMA_RX_SIZE, 1, seq);
3812 		} else {
3813 			seq_printf(seq, "Descriptor ring:\n");
3814 			sysfs_display_ring((void *)rx_q->dma_rx,
3815 					   DMA_RX_SIZE, 0, seq);
3816 		}
3817 	}
3818 
3819 	for (queue = 0; queue < tx_count; queue++) {
3820 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3821 
3822 		seq_printf(seq, "TX Queue %d:\n", queue);
3823 
3824 		if (priv->extend_desc) {
3825 			seq_printf(seq, "Extended descriptor ring:\n");
3826 			sysfs_display_ring((void *)tx_q->dma_etx,
3827 					   DMA_TX_SIZE, 1, seq);
3828 		} else {
3829 			seq_printf(seq, "Descriptor ring:\n");
3830 			sysfs_display_ring((void *)tx_q->dma_tx,
3831 					   DMA_TX_SIZE, 0, seq);
3832 		}
3833 	}
3834 
3835 	return 0;
3836 }
3837 
3838 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3839 {
3840 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3841 }
3842 
3843 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3844 
3845 static const struct file_operations stmmac_rings_status_fops = {
3846 	.owner = THIS_MODULE,
3847 	.open = stmmac_sysfs_ring_open,
3848 	.read = seq_read,
3849 	.llseek = seq_lseek,
3850 	.release = single_release,
3851 };
3852 
3853 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3854 {
3855 	struct net_device *dev = seq->private;
3856 	struct stmmac_priv *priv = netdev_priv(dev);
3857 
3858 	if (!priv->hw_cap_support) {
3859 		seq_printf(seq, "DMA HW features not supported\n");
3860 		return 0;
3861 	}
3862 
3863 	seq_printf(seq, "==============================\n");
3864 	seq_printf(seq, "\tDMA HW features\n");
3865 	seq_printf(seq, "==============================\n");
3866 
3867 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3868 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3869 	seq_printf(seq, "\t1000 Mbps: %s\n",
3870 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3871 	seq_printf(seq, "\tHalf duplex: %s\n",
3872 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3873 	seq_printf(seq, "\tHash Filter: %s\n",
3874 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3875 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3876 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3877 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3878 		   (priv->dma_cap.pcs) ? "Y" : "N");
3879 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3880 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3881 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3882 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3883 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3884 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3885 	seq_printf(seq, "\tRMON module: %s\n",
3886 		   (priv->dma_cap.rmon) ? "Y" : "N");
3887 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3888 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3889 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3890 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3891 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3892 		   (priv->dma_cap.eee) ? "Y" : "N");
3893 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3894 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3895 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3896 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3897 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3898 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3899 	} else {
3900 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3901 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3902 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3903 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3904 	}
3905 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3906 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3907 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3908 		   priv->dma_cap.number_rx_channel);
3909 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3910 		   priv->dma_cap.number_tx_channel);
3911 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3912 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3913 
3914 	return 0;
3915 }
3916 
3917 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3918 {
3919 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3920 }
3921 
3922 static const struct file_operations stmmac_dma_cap_fops = {
3923 	.owner = THIS_MODULE,
3924 	.open = stmmac_sysfs_dma_cap_open,
3925 	.read = seq_read,
3926 	.llseek = seq_lseek,
3927 	.release = single_release,
3928 };
3929 
3930 static int stmmac_init_fs(struct net_device *dev)
3931 {
3932 	struct stmmac_priv *priv = netdev_priv(dev);
3933 
3934 	/* Create per netdev entries */
3935 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3936 
3937 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3938 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3939 
3940 		return -ENOMEM;
3941 	}
3942 
3943 	/* Entry to report DMA RX/TX rings */
3944 	priv->dbgfs_rings_status =
3945 		debugfs_create_file("descriptors_status", S_IRUGO,
3946 				    priv->dbgfs_dir, dev,
3947 				    &stmmac_rings_status_fops);
3948 
3949 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3950 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3951 		debugfs_remove_recursive(priv->dbgfs_dir);
3952 
3953 		return -ENOMEM;
3954 	}
3955 
3956 	/* Entry to report the DMA HW features */
3957 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3958 					    priv->dbgfs_dir,
3959 					    dev, &stmmac_dma_cap_fops);
3960 
3961 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3962 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3963 		debugfs_remove_recursive(priv->dbgfs_dir);
3964 
3965 		return -ENOMEM;
3966 	}
3967 
3968 	return 0;
3969 }
3970 
3971 static void stmmac_exit_fs(struct net_device *dev)
3972 {
3973 	struct stmmac_priv *priv = netdev_priv(dev);
3974 
3975 	debugfs_remove_recursive(priv->dbgfs_dir);
3976 }
3977 #endif /* CONFIG_DEBUG_FS */
3978 
3979 static const struct net_device_ops stmmac_netdev_ops = {
3980 	.ndo_open = stmmac_open,
3981 	.ndo_start_xmit = stmmac_xmit,
3982 	.ndo_stop = stmmac_release,
3983 	.ndo_change_mtu = stmmac_change_mtu,
3984 	.ndo_fix_features = stmmac_fix_features,
3985 	.ndo_set_features = stmmac_set_features,
3986 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3987 	.ndo_tx_timeout = stmmac_tx_timeout,
3988 	.ndo_do_ioctl = stmmac_ioctl,
3989 #ifdef CONFIG_NET_POLL_CONTROLLER
3990 	.ndo_poll_controller = stmmac_poll_controller,
3991 #endif
3992 	.ndo_set_mac_address = stmmac_set_mac_address,
3993 };
3994 
3995 /**
3996  *  stmmac_hw_init - Init the MAC device
3997  *  @priv: driver private structure
3998  *  Description: this function is to configure the MAC device according to
3999  *  some platform parameters or the HW capability register. It prepares the
4000  *  driver to use either ring or chain modes and to setup either enhanced or
4001  *  normal descriptors.
4002  */
4003 static int stmmac_hw_init(struct stmmac_priv *priv)
4004 {
4005 	struct mac_device_info *mac;
4006 
4007 	/* Identify the MAC HW device */
4008 	if (priv->plat->setup) {
4009 		mac = priv->plat->setup(priv);
4010 	} else if (priv->plat->has_gmac) {
4011 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4012 		mac = dwmac1000_setup(priv->ioaddr,
4013 				      priv->plat->multicast_filter_bins,
4014 				      priv->plat->unicast_filter_entries,
4015 				      &priv->synopsys_id);
4016 	} else if (priv->plat->has_gmac4) {
4017 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4018 		mac = dwmac4_setup(priv->ioaddr,
4019 				   priv->plat->multicast_filter_bins,
4020 				   priv->plat->unicast_filter_entries,
4021 				   &priv->synopsys_id);
4022 	} else {
4023 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4024 	}
4025 	if (!mac)
4026 		return -ENOMEM;
4027 
4028 	priv->hw = mac;
4029 
4030 	/* dwmac-sun8i only work in chain mode */
4031 	if (priv->plat->has_sun8i)
4032 		chain_mode = 1;
4033 
4034 	/* To use the chained or ring mode */
4035 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4036 		priv->hw->mode = &dwmac4_ring_mode_ops;
4037 	} else {
4038 		if (chain_mode) {
4039 			priv->hw->mode = &chain_mode_ops;
4040 			dev_info(priv->device, "Chain mode enabled\n");
4041 			priv->mode = STMMAC_CHAIN_MODE;
4042 		} else {
4043 			priv->hw->mode = &ring_mode_ops;
4044 			dev_info(priv->device, "Ring mode enabled\n");
4045 			priv->mode = STMMAC_RING_MODE;
4046 		}
4047 	}
4048 
4049 	/* Get the HW capability (new GMAC newer than 3.50a) */
4050 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4051 	if (priv->hw_cap_support) {
4052 		dev_info(priv->device, "DMA HW capability register supported\n");
4053 
4054 		/* We can override some gmac/dma configuration fields: e.g.
4055 		 * enh_desc, tx_coe (e.g. that are passed through the
4056 		 * platform) with the values from the HW capability
4057 		 * register (if supported).
4058 		 */
4059 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4060 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4061 		priv->hw->pmt = priv->plat->pmt;
4062 
4063 		/* TXCOE doesn't work in thresh DMA mode */
4064 		if (priv->plat->force_thresh_dma_mode)
4065 			priv->plat->tx_coe = 0;
4066 		else
4067 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4068 
4069 		/* In case of GMAC4 rx_coe is from HW cap register. */
4070 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4071 
4072 		if (priv->dma_cap.rx_coe_type2)
4073 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4074 		else if (priv->dma_cap.rx_coe_type1)
4075 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4076 
4077 	} else {
4078 		dev_info(priv->device, "No HW DMA feature register supported\n");
4079 	}
4080 
4081 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4082 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4083 		priv->hw->desc = &dwmac4_desc_ops;
4084 	else
4085 		stmmac_selec_desc_mode(priv);
4086 
4087 	if (priv->plat->rx_coe) {
4088 		priv->hw->rx_csum = priv->plat->rx_coe;
4089 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4090 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4091 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4092 	}
4093 	if (priv->plat->tx_coe)
4094 		dev_info(priv->device, "TX Checksum insertion supported\n");
4095 
4096 	if (priv->plat->pmt) {
4097 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4098 		device_set_wakeup_capable(priv->device, 1);
4099 	}
4100 
4101 	if (priv->dma_cap.tsoen)
4102 		dev_info(priv->device, "TSO supported\n");
4103 
4104 	return 0;
4105 }
4106 
4107 /**
4108  * stmmac_dvr_probe
4109  * @device: device pointer
4110  * @plat_dat: platform data pointer
4111  * @res: stmmac resource pointer
4112  * Description: this is the main probe function used to
4113  * call the alloc_etherdev, allocate the priv structure.
4114  * Return:
4115  * returns 0 on success, otherwise errno.
4116  */
4117 int stmmac_dvr_probe(struct device *device,
4118 		     struct plat_stmmacenet_data *plat_dat,
4119 		     struct stmmac_resources *res)
4120 {
4121 	struct net_device *ndev = NULL;
4122 	struct stmmac_priv *priv;
4123 	int ret = 0;
4124 	u32 queue;
4125 
4126 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4127 				  MTL_MAX_TX_QUEUES,
4128 				  MTL_MAX_RX_QUEUES);
4129 	if (!ndev)
4130 		return -ENOMEM;
4131 
4132 	SET_NETDEV_DEV(ndev, device);
4133 
4134 	priv = netdev_priv(ndev);
4135 	priv->device = device;
4136 	priv->dev = ndev;
4137 
4138 	stmmac_set_ethtool_ops(ndev);
4139 	priv->pause = pause;
4140 	priv->plat = plat_dat;
4141 	priv->ioaddr = res->addr;
4142 	priv->dev->base_addr = (unsigned long)res->addr;
4143 
4144 	priv->dev->irq = res->irq;
4145 	priv->wol_irq = res->wol_irq;
4146 	priv->lpi_irq = res->lpi_irq;
4147 
4148 	if (res->mac)
4149 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4150 
4151 	dev_set_drvdata(device, priv->dev);
4152 
4153 	/* Verify driver arguments */
4154 	stmmac_verify_args();
4155 
4156 	/* Override with kernel parameters if supplied XXX CRS XXX
4157 	 * this needs to have multiple instances
4158 	 */
4159 	if ((phyaddr >= 0) && (phyaddr <= 31))
4160 		priv->plat->phy_addr = phyaddr;
4161 
4162 	if (priv->plat->stmmac_rst) {
4163 		ret = reset_control_assert(priv->plat->stmmac_rst);
4164 		reset_control_deassert(priv->plat->stmmac_rst);
4165 		/* Some reset controllers have only reset callback instead of
4166 		 * assert + deassert callbacks pair.
4167 		 */
4168 		if (ret == -ENOTSUPP)
4169 			reset_control_reset(priv->plat->stmmac_rst);
4170 	}
4171 
4172 	/* Init MAC and get the capabilities */
4173 	ret = stmmac_hw_init(priv);
4174 	if (ret)
4175 		goto error_hw_init;
4176 
4177 	/* Configure real RX and TX queues */
4178 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4179 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4180 
4181 	ndev->netdev_ops = &stmmac_netdev_ops;
4182 
4183 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4184 			    NETIF_F_RXCSUM;
4185 
4186 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4187 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4188 		priv->tso = true;
4189 		dev_info(priv->device, "TSO feature enabled\n");
4190 	}
4191 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4192 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4193 #ifdef STMMAC_VLAN_TAG_USED
4194 	/* Both mac100 and gmac support receive VLAN tag detection */
4195 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4196 #endif
4197 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4198 
4199 	/* MTU range: 46 - hw-specific max */
4200 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4201 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4202 		ndev->max_mtu = JUMBO_LEN;
4203 	else
4204 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4205 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4206 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4207 	 */
4208 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4209 	    (priv->plat->maxmtu >= ndev->min_mtu))
4210 		ndev->max_mtu = priv->plat->maxmtu;
4211 	else if (priv->plat->maxmtu < ndev->min_mtu)
4212 		dev_warn(priv->device,
4213 			 "%s: warning: maxmtu having invalid value (%d)\n",
4214 			 __func__, priv->plat->maxmtu);
4215 
4216 	if (flow_ctrl)
4217 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4218 
4219 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4220 	 * In some case, for example on bugged HW this feature
4221 	 * has to be disable and this can be done by passing the
4222 	 * riwt_off field from the platform.
4223 	 */
4224 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4225 		priv->use_riwt = 1;
4226 		dev_info(priv->device,
4227 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4228 	}
4229 
4230 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4231 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4232 
4233 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4234 			       (8 * priv->plat->rx_queues_to_use));
4235 	}
4236 
4237 	spin_lock_init(&priv->lock);
4238 
4239 	/* If a specific clk_csr value is passed from the platform
4240 	 * this means that the CSR Clock Range selection cannot be
4241 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4242 	 * set the MDC clock dynamically according to the csr actual
4243 	 * clock input.
4244 	 */
4245 	if (!priv->plat->clk_csr)
4246 		stmmac_clk_csr_set(priv);
4247 	else
4248 		priv->clk_csr = priv->plat->clk_csr;
4249 
4250 	stmmac_check_pcs_mode(priv);
4251 
4252 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4253 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4254 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4255 		/* MDIO bus Registration */
4256 		ret = stmmac_mdio_register(ndev);
4257 		if (ret < 0) {
4258 			dev_err(priv->device,
4259 				"%s: MDIO bus (id: %d) registration failed",
4260 				__func__, priv->plat->bus_id);
4261 			goto error_mdio_register;
4262 		}
4263 	}
4264 
4265 	ret = register_netdev(ndev);
4266 	if (ret) {
4267 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4268 			__func__, ret);
4269 		goto error_netdev_register;
4270 	}
4271 
4272 	return ret;
4273 
4274 error_netdev_register:
4275 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4276 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4277 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4278 		stmmac_mdio_unregister(ndev);
4279 error_mdio_register:
4280 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4281 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4282 
4283 		netif_napi_del(&rx_q->napi);
4284 	}
4285 error_hw_init:
4286 	free_netdev(ndev);
4287 
4288 	return ret;
4289 }
4290 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4291 
4292 /**
4293  * stmmac_dvr_remove
4294  * @dev: device pointer
4295  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4296  * changes the link status, releases the DMA descriptor rings.
4297  */
4298 int stmmac_dvr_remove(struct device *dev)
4299 {
4300 	struct net_device *ndev = dev_get_drvdata(dev);
4301 	struct stmmac_priv *priv = netdev_priv(ndev);
4302 
4303 	netdev_info(priv->dev, "%s: removing driver", __func__);
4304 
4305 	stmmac_stop_all_dma(priv);
4306 
4307 	priv->hw->mac->set_mac(priv->ioaddr, false);
4308 	netif_carrier_off(ndev);
4309 	unregister_netdev(ndev);
4310 	if (priv->plat->stmmac_rst)
4311 		reset_control_assert(priv->plat->stmmac_rst);
4312 	clk_disable_unprepare(priv->plat->pclk);
4313 	clk_disable_unprepare(priv->plat->stmmac_clk);
4314 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4315 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4316 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4317 		stmmac_mdio_unregister(ndev);
4318 	free_netdev(ndev);
4319 
4320 	return 0;
4321 }
4322 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4323 
4324 /**
4325  * stmmac_suspend - suspend callback
4326  * @dev: device pointer
4327  * Description: this is the function to suspend the device and it is called
4328  * by the platform driver to stop the network queue, release the resources,
4329  * program the PMT register (for WoL), clean and release driver resources.
4330  */
4331 int stmmac_suspend(struct device *dev)
4332 {
4333 	struct net_device *ndev = dev_get_drvdata(dev);
4334 	struct stmmac_priv *priv = netdev_priv(ndev);
4335 	unsigned long flags;
4336 
4337 	if (!ndev || !netif_running(ndev))
4338 		return 0;
4339 
4340 	if (ndev->phydev)
4341 		phy_stop(ndev->phydev);
4342 
4343 	spin_lock_irqsave(&priv->lock, flags);
4344 
4345 	netif_device_detach(ndev);
4346 	stmmac_stop_all_queues(priv);
4347 
4348 	stmmac_disable_all_queues(priv);
4349 
4350 	/* Stop TX/RX DMA */
4351 	stmmac_stop_all_dma(priv);
4352 
4353 	/* Enable Power down mode by programming the PMT regs */
4354 	if (device_may_wakeup(priv->device)) {
4355 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4356 		priv->irq_wake = 1;
4357 	} else {
4358 		priv->hw->mac->set_mac(priv->ioaddr, false);
4359 		pinctrl_pm_select_sleep_state(priv->device);
4360 		/* Disable clock in case of PWM is off */
4361 		clk_disable(priv->plat->pclk);
4362 		clk_disable(priv->plat->stmmac_clk);
4363 	}
4364 	spin_unlock_irqrestore(&priv->lock, flags);
4365 
4366 	priv->oldlink = false;
4367 	priv->speed = SPEED_UNKNOWN;
4368 	priv->oldduplex = DUPLEX_UNKNOWN;
4369 	return 0;
4370 }
4371 EXPORT_SYMBOL_GPL(stmmac_suspend);
4372 
4373 /**
4374  * stmmac_reset_queues_param - reset queue parameters
4375  * @dev: device pointer
4376  */
4377 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4378 {
4379 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4380 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4381 	u32 queue;
4382 
4383 	for (queue = 0; queue < rx_cnt; queue++) {
4384 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4385 
4386 		rx_q->cur_rx = 0;
4387 		rx_q->dirty_rx = 0;
4388 	}
4389 
4390 	for (queue = 0; queue < tx_cnt; queue++) {
4391 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4392 
4393 		tx_q->cur_tx = 0;
4394 		tx_q->dirty_tx = 0;
4395 	}
4396 }
4397 
4398 /**
4399  * stmmac_resume - resume callback
4400  * @dev: device pointer
4401  * Description: when resume this function is invoked to setup the DMA and CORE
4402  * in a usable state.
4403  */
4404 int stmmac_resume(struct device *dev)
4405 {
4406 	struct net_device *ndev = dev_get_drvdata(dev);
4407 	struct stmmac_priv *priv = netdev_priv(ndev);
4408 	unsigned long flags;
4409 
4410 	if (!netif_running(ndev))
4411 		return 0;
4412 
4413 	/* Power Down bit, into the PM register, is cleared
4414 	 * automatically as soon as a magic packet or a Wake-up frame
4415 	 * is received. Anyway, it's better to manually clear
4416 	 * this bit because it can generate problems while resuming
4417 	 * from another devices (e.g. serial console).
4418 	 */
4419 	if (device_may_wakeup(priv->device)) {
4420 		spin_lock_irqsave(&priv->lock, flags);
4421 		priv->hw->mac->pmt(priv->hw, 0);
4422 		spin_unlock_irqrestore(&priv->lock, flags);
4423 		priv->irq_wake = 0;
4424 	} else {
4425 		pinctrl_pm_select_default_state(priv->device);
4426 		/* enable the clk previously disabled */
4427 		clk_enable(priv->plat->stmmac_clk);
4428 		clk_enable(priv->plat->pclk);
4429 		/* reset the phy so that it's ready */
4430 		if (priv->mii)
4431 			stmmac_mdio_reset(priv->mii);
4432 	}
4433 
4434 	netif_device_attach(ndev);
4435 
4436 	spin_lock_irqsave(&priv->lock, flags);
4437 
4438 	stmmac_reset_queues_param(priv);
4439 
4440 	/* reset private mss value to force mss context settings at
4441 	 * next tso xmit (only used for gmac4).
4442 	 */
4443 	priv->mss = 0;
4444 
4445 	stmmac_clear_descriptors(priv);
4446 
4447 	stmmac_hw_setup(ndev, false);
4448 	stmmac_init_tx_coalesce(priv);
4449 	stmmac_set_rx_mode(ndev);
4450 
4451 	stmmac_enable_all_queues(priv);
4452 
4453 	stmmac_start_all_queues(priv);
4454 
4455 	spin_unlock_irqrestore(&priv->lock, flags);
4456 
4457 	if (ndev->phydev)
4458 		phy_start(ndev->phydev);
4459 
4460 	return 0;
4461 }
4462 EXPORT_SYMBOL_GPL(stmmac_resume);
4463 
4464 #ifndef MODULE
4465 static int __init stmmac_cmdline_opt(char *str)
4466 {
4467 	char *opt;
4468 
4469 	if (!str || !*str)
4470 		return -EINVAL;
4471 	while ((opt = strsep(&str, ",")) != NULL) {
4472 		if (!strncmp(opt, "debug:", 6)) {
4473 			if (kstrtoint(opt + 6, 0, &debug))
4474 				goto err;
4475 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4476 			if (kstrtoint(opt + 8, 0, &phyaddr))
4477 				goto err;
4478 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4479 			if (kstrtoint(opt + 7, 0, &buf_sz))
4480 				goto err;
4481 		} else if (!strncmp(opt, "tc:", 3)) {
4482 			if (kstrtoint(opt + 3, 0, &tc))
4483 				goto err;
4484 		} else if (!strncmp(opt, "watchdog:", 9)) {
4485 			if (kstrtoint(opt + 9, 0, &watchdog))
4486 				goto err;
4487 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4488 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4489 				goto err;
4490 		} else if (!strncmp(opt, "pause:", 6)) {
4491 			if (kstrtoint(opt + 6, 0, &pause))
4492 				goto err;
4493 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4494 			if (kstrtoint(opt + 10, 0, &eee_timer))
4495 				goto err;
4496 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4497 			if (kstrtoint(opt + 11, 0, &chain_mode))
4498 				goto err;
4499 		}
4500 	}
4501 	return 0;
4502 
4503 err:
4504 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4505 	return -EINVAL;
4506 }
4507 
4508 __setup("stmmaceth=", stmmac_cmdline_opt);
4509 #endif /* MODULE */
4510 
4511 static int __init stmmac_init(void)
4512 {
4513 #ifdef CONFIG_DEBUG_FS
4514 	/* Create debugfs main directory if it doesn't exist yet */
4515 	if (!stmmac_fs_dir) {
4516 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4517 
4518 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4519 			pr_err("ERROR %s, debugfs create directory failed\n",
4520 			       STMMAC_RESOURCE_NAME);
4521 
4522 			return -ENOMEM;
4523 		}
4524 	}
4525 #endif
4526 
4527 	return 0;
4528 }
4529 
4530 static void __exit stmmac_exit(void)
4531 {
4532 #ifdef CONFIG_DEBUG_FS
4533 	debugfs_remove_recursive(stmmac_fs_dir);
4534 #endif
4535 }
4536 
4537 module_init(stmmac_init)
4538 module_exit(stmmac_exit)
4539 
4540 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4541 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4542 MODULE_LICENSE("GPL");
4543