xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 
239 	if (priv->plat->has_sun8i) {
240 		if (clk_rate > 160000000)
241 			priv->clk_csr = 0x03;
242 		else if (clk_rate > 80000000)
243 			priv->clk_csr = 0x02;
244 		else if (clk_rate > 40000000)
245 			priv->clk_csr = 0x01;
246 		else
247 			priv->clk_csr = 0;
248 	}
249 }
250 
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256 
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 	u32 avail;
261 
262 	if (tx_q->dirty_tx > tx_q->cur_tx)
263 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 	else
265 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266 
267 	return avail;
268 }
269 
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 	u32 dirty;
279 
280 	if (rx_q->dirty_rx <= rx_q->cur_rx)
281 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 	else
283 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284 
285 	return dirty;
286 }
287 
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 	struct net_device *ndev = priv->dev;
297 	struct phy_device *phydev = ndev->phydev;
298 
299 	if (likely(priv->plat->fix_mac_speed))
300 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302 
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
312 	u32 queue;
313 
314 	/* check if all TX queues have the work finished */
315 	for (queue = 0; queue < tx_cnt; queue++) {
316 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317 
318 		if (tx_q->dirty_tx != tx_q->cur_tx)
319 			return; /* still unfinished work */
320 	}
321 
322 	/* Check and enter in LPI mode */
323 	if (!priv->tx_path_in_lpi_mode)
324 		priv->hw->mac->set_eee_mode(priv->hw,
325 					    priv->plat->en_tx_lpi_clockgating);
326 }
327 
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 	priv->hw->mac->reset_eee_mode(priv->hw);
337 	del_timer_sync(&priv->eee_ctrl_timer);
338 	priv->tx_path_in_lpi_mode = false;
339 }
340 
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
349 {
350 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
351 
352 	stmmac_enable_eee_mode(priv);
353 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355 
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 	struct net_device *ndev = priv->dev;
367 	unsigned long flags;
368 	bool ret = false;
369 
370 	/* Using PCS we cannot dial with the phy registers at this stage
371 	 * so we do not support extra feature like EEE.
372 	 */
373 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
376 		goto out;
377 
378 	/* MAC core supports the EEE feature. */
379 	if (priv->dma_cap.eee) {
380 		int tx_lpi_timer = priv->tx_lpi_timer;
381 
382 		/* Check if the PHY supports EEE */
383 		if (phy_init_eee(ndev->phydev, 1)) {
384 			/* To manage at run-time if the EEE cannot be supported
385 			 * anymore (for example because the lp caps have been
386 			 * changed).
387 			 * In that case the driver disable own timers.
388 			 */
389 			spin_lock_irqsave(&priv->lock, flags);
390 			if (priv->eee_active) {
391 				netdev_dbg(priv->dev, "disable EEE\n");
392 				del_timer_sync(&priv->eee_ctrl_timer);
393 				priv->hw->mac->set_eee_timer(priv->hw, 0,
394 							     tx_lpi_timer);
395 			}
396 			priv->eee_active = 0;
397 			spin_unlock_irqrestore(&priv->lock, flags);
398 			goto out;
399 		}
400 		/* Activate the EEE and start timers */
401 		spin_lock_irqsave(&priv->lock, flags);
402 		if (!priv->eee_active) {
403 			priv->eee_active = 1;
404 			timer_setup(&priv->eee_ctrl_timer,
405 				    stmmac_eee_ctrl_timer, 0);
406 			mod_timer(&priv->eee_ctrl_timer,
407 				  STMMAC_LPI_T(eee_timer));
408 
409 			priv->hw->mac->set_eee_timer(priv->hw,
410 						     STMMAC_DEFAULT_LIT_LS,
411 						     tx_lpi_timer);
412 		}
413 		/* Set HW EEE according to the speed */
414 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
415 
416 		ret = true;
417 		spin_unlock_irqrestore(&priv->lock, flags);
418 
419 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
420 	}
421 out:
422 	return ret;
423 }
424 
425 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
426  * @priv: driver private structure
427  * @p : descriptor pointer
428  * @skb : the socket buffer
429  * Description :
430  * This function will read timestamp from the descriptor & pass it to stack.
431  * and also perform some sanity checks.
432  */
433 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434 				   struct dma_desc *p, struct sk_buff *skb)
435 {
436 	struct skb_shared_hwtstamps shhwtstamp;
437 	u64 ns;
438 
439 	if (!priv->hwts_tx_en)
440 		return;
441 
442 	/* exit if skb doesn't support hw tstamp */
443 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444 		return;
445 
446 	/* check tx tstamp status */
447 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
448 		/* get the valid tstamp */
449 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
450 
451 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
452 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
453 
454 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
455 		/* pass tstamp to stack */
456 		skb_tstamp_tx(skb, &shhwtstamp);
457 	}
458 
459 	return;
460 }
461 
462 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
463  * @priv: driver private structure
464  * @p : descriptor pointer
465  * @np : next descriptor pointer
466  * @skb : the socket buffer
467  * Description :
468  * This function will read received packet's timestamp from the descriptor
469  * and pass it to stack. It also perform some sanity checks.
470  */
471 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
472 				   struct dma_desc *np, struct sk_buff *skb)
473 {
474 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
475 	struct dma_desc *desc = p;
476 	u64 ns;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 	/* For GMAC4, the valid timestamp is from CTX next desc. */
481 	if (priv->plat->has_gmac4)
482 		desc = np;
483 
484 	/* Check if timestamp is available */
485 	if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
486 		ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
487 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
488 		shhwtstamp = skb_hwtstamps(skb);
489 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
490 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
491 	} else  {
492 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
493 	}
494 }
495 
496 /**
497  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
498  *  @dev: device pointer.
499  *  @ifr: An IOCTL specific structure, that can contain a pointer to
500  *  a proprietary structure used to pass information to the driver.
501  *  Description:
502  *  This function configures the MAC to enable/disable both outgoing(TX)
503  *  and incoming(RX) packets time stamping based on user input.
504  *  Return Value:
505  *  0 on success and an appropriate -ve integer on failure.
506  */
507 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
508 {
509 	struct stmmac_priv *priv = netdev_priv(dev);
510 	struct hwtstamp_config config;
511 	struct timespec64 now;
512 	u64 temp = 0;
513 	u32 ptp_v2 = 0;
514 	u32 tstamp_all = 0;
515 	u32 ptp_over_ipv4_udp = 0;
516 	u32 ptp_over_ipv6_udp = 0;
517 	u32 ptp_over_ethernet = 0;
518 	u32 snap_type_sel = 0;
519 	u32 ts_master_en = 0;
520 	u32 ts_event_en = 0;
521 	u32 value = 0;
522 	u32 sec_inc;
523 
524 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
525 		netdev_alert(priv->dev, "No support for HW time stamping\n");
526 		priv->hwts_tx_en = 0;
527 		priv->hwts_rx_en = 0;
528 
529 		return -EOPNOTSUPP;
530 	}
531 
532 	if (copy_from_user(&config, ifr->ifr_data,
533 			   sizeof(struct hwtstamp_config)))
534 		return -EFAULT;
535 
536 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
537 		   __func__, config.flags, config.tx_type, config.rx_filter);
538 
539 	/* reserved for future extensions */
540 	if (config.flags)
541 		return -EINVAL;
542 
543 	if (config.tx_type != HWTSTAMP_TX_OFF &&
544 	    config.tx_type != HWTSTAMP_TX_ON)
545 		return -ERANGE;
546 
547 	if (priv->adv_ts) {
548 		switch (config.rx_filter) {
549 		case HWTSTAMP_FILTER_NONE:
550 			/* time stamp no incoming packet at all */
551 			config.rx_filter = HWTSTAMP_FILTER_NONE;
552 			break;
553 
554 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
555 			/* PTP v1, UDP, any kind of event packet */
556 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
557 			/* take time stamp for all event messages */
558 			if (priv->plat->has_gmac4)
559 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
560 			else
561 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
562 
563 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
564 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
565 			break;
566 
567 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
568 			/* PTP v1, UDP, Sync packet */
569 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
570 			/* take time stamp for SYNC messages only */
571 			ts_event_en = PTP_TCR_TSEVNTENA;
572 
573 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
574 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
575 			break;
576 
577 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
578 			/* PTP v1, UDP, Delay_req packet */
579 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
580 			/* take time stamp for Delay_Req messages only */
581 			ts_master_en = PTP_TCR_TSMSTRENA;
582 			ts_event_en = PTP_TCR_TSEVNTENA;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
589 			/* PTP v2, UDP, any kind of event packet */
590 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
591 			ptp_v2 = PTP_TCR_TSVER2ENA;
592 			/* take time stamp for all event messages */
593 			if (priv->plat->has_gmac4)
594 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
595 			else
596 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
597 
598 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
599 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
600 			break;
601 
602 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
603 			/* PTP v2, UDP, Sync packet */
604 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
605 			ptp_v2 = PTP_TCR_TSVER2ENA;
606 			/* take time stamp for SYNC messages only */
607 			ts_event_en = PTP_TCR_TSEVNTENA;
608 
609 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
610 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
611 			break;
612 
613 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
614 			/* PTP v2, UDP, Delay_req packet */
615 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
616 			ptp_v2 = PTP_TCR_TSVER2ENA;
617 			/* take time stamp for Delay_Req messages only */
618 			ts_master_en = PTP_TCR_TSMSTRENA;
619 			ts_event_en = PTP_TCR_TSEVNTENA;
620 
621 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
622 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
623 			break;
624 
625 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
626 			/* PTP v2/802.AS1 any layer, any kind of event packet */
627 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
628 			ptp_v2 = PTP_TCR_TSVER2ENA;
629 			/* take time stamp for all event messages */
630 			if (priv->plat->has_gmac4)
631 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
632 			else
633 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
634 
635 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637 			ptp_over_ethernet = PTP_TCR_TSIPENA;
638 			break;
639 
640 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
641 			/* PTP v2/802.AS1, any layer, Sync packet */
642 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
643 			ptp_v2 = PTP_TCR_TSVER2ENA;
644 			/* take time stamp for SYNC messages only */
645 			ts_event_en = PTP_TCR_TSEVNTENA;
646 
647 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649 			ptp_over_ethernet = PTP_TCR_TSIPENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653 			/* PTP v2/802.AS1, any layer, Delay_req packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			ptp_over_ethernet = PTP_TCR_TSIPENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_NTP_ALL:
666 		case HWTSTAMP_FILTER_ALL:
667 			/* time stamp any incoming packet */
668 			config.rx_filter = HWTSTAMP_FILTER_ALL;
669 			tstamp_all = PTP_TCR_TSENALL;
670 			break;
671 
672 		default:
673 			return -ERANGE;
674 		}
675 	} else {
676 		switch (config.rx_filter) {
677 		case HWTSTAMP_FILTER_NONE:
678 			config.rx_filter = HWTSTAMP_FILTER_NONE;
679 			break;
680 		default:
681 			/* PTP v1, UDP, any kind of event packet */
682 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
683 			break;
684 		}
685 	}
686 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
687 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688 
689 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
690 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
691 	else {
692 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
693 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
694 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
695 			 ts_master_en | snap_type_sel);
696 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
697 
698 		/* program Sub Second Increment reg */
699 		sec_inc = priv->hw->ptp->config_sub_second_increment(
700 			priv->ptpaddr, priv->plat->clk_ptp_rate,
701 			priv->plat->has_gmac4);
702 		temp = div_u64(1000000000ULL, sec_inc);
703 
704 		/* calculate default added value:
705 		 * formula is :
706 		 * addend = (2^32)/freq_div_ratio;
707 		 * where, freq_div_ratio = 1e9ns/sec_inc
708 		 */
709 		temp = (u64)(temp << 32);
710 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
711 		priv->hw->ptp->config_addend(priv->ptpaddr,
712 					     priv->default_addend);
713 
714 		/* initialize system time */
715 		ktime_get_real_ts64(&now);
716 
717 		/* lower 32 bits of tv_sec are safe until y2106 */
718 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
719 					    now.tv_nsec);
720 	}
721 
722 	return copy_to_user(ifr->ifr_data, &config,
723 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
724 }
725 
726 /**
727  * stmmac_init_ptp - init PTP
728  * @priv: driver private structure
729  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
730  * This is done by looking at the HW cap. register.
731  * This function also registers the ptp driver.
732  */
733 static int stmmac_init_ptp(struct stmmac_priv *priv)
734 {
735 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
736 		return -EOPNOTSUPP;
737 
738 	priv->adv_ts = 0;
739 	/* Check if adv_ts can be enabled for dwmac 4.x core */
740 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
741 		priv->adv_ts = 1;
742 	/* Dwmac 3.x core with extend_desc can support adv_ts */
743 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
744 		priv->adv_ts = 1;
745 
746 	if (priv->dma_cap.time_stamp)
747 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
748 
749 	if (priv->adv_ts)
750 		netdev_info(priv->dev,
751 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
752 
753 	priv->hw->ptp = &stmmac_ptp;
754 	priv->hwts_tx_en = 0;
755 	priv->hwts_rx_en = 0;
756 
757 	stmmac_ptp_register(priv);
758 
759 	return 0;
760 }
761 
762 static void stmmac_release_ptp(struct stmmac_priv *priv)
763 {
764 	if (priv->plat->clk_ptp_ref)
765 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
766 	stmmac_ptp_unregister(priv);
767 }
768 
769 /**
770  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
771  *  @priv: driver private structure
772  *  Description: It is used for configuring the flow control in all queues
773  */
774 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
775 {
776 	u32 tx_cnt = priv->plat->tx_queues_to_use;
777 
778 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
779 				 priv->pause, tx_cnt);
780 }
781 
782 /**
783  * stmmac_adjust_link - adjusts the link parameters
784  * @dev: net device structure
785  * Description: this is the helper called by the physical abstraction layer
786  * drivers to communicate the phy link status. According the speed and duplex
787  * this driver can invoke registered glue-logic as well.
788  * It also invoke the eee initialization because it could happen when switch
789  * on different networks (that are eee capable).
790  */
791 static void stmmac_adjust_link(struct net_device *dev)
792 {
793 	struct stmmac_priv *priv = netdev_priv(dev);
794 	struct phy_device *phydev = dev->phydev;
795 	unsigned long flags;
796 	bool new_state = false;
797 
798 	if (!phydev)
799 		return;
800 
801 	spin_lock_irqsave(&priv->lock, flags);
802 
803 	if (phydev->link) {
804 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
805 
806 		/* Now we make sure that we can be in full duplex mode.
807 		 * If not, we operate in half-duplex mode. */
808 		if (phydev->duplex != priv->oldduplex) {
809 			new_state = true;
810 			if (!phydev->duplex)
811 				ctrl &= ~priv->hw->link.duplex;
812 			else
813 				ctrl |= priv->hw->link.duplex;
814 			priv->oldduplex = phydev->duplex;
815 		}
816 		/* Flow Control operation */
817 		if (phydev->pause)
818 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
819 
820 		if (phydev->speed != priv->speed) {
821 			new_state = true;
822 			ctrl &= ~priv->hw->link.speed_mask;
823 			switch (phydev->speed) {
824 			case SPEED_1000:
825 				ctrl |= priv->hw->link.speed1000;
826 				break;
827 			case SPEED_100:
828 				ctrl |= priv->hw->link.speed100;
829 				break;
830 			case SPEED_10:
831 				ctrl |= priv->hw->link.speed10;
832 				break;
833 			default:
834 				netif_warn(priv, link, priv->dev,
835 					   "broken speed: %d\n", phydev->speed);
836 				phydev->speed = SPEED_UNKNOWN;
837 				break;
838 			}
839 			if (phydev->speed != SPEED_UNKNOWN)
840 				stmmac_hw_fix_mac_speed(priv);
841 			priv->speed = phydev->speed;
842 		}
843 
844 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
845 
846 		if (!priv->oldlink) {
847 			new_state = true;
848 			priv->oldlink = true;
849 		}
850 	} else if (priv->oldlink) {
851 		new_state = true;
852 		priv->oldlink = false;
853 		priv->speed = SPEED_UNKNOWN;
854 		priv->oldduplex = DUPLEX_UNKNOWN;
855 	}
856 
857 	if (new_state && netif_msg_link(priv))
858 		phy_print_status(phydev);
859 
860 	spin_unlock_irqrestore(&priv->lock, flags);
861 
862 	if (phydev->is_pseudo_fixed_link)
863 		/* Stop PHY layer to call the hook to adjust the link in case
864 		 * of a switch is attached to the stmmac driver.
865 		 */
866 		phydev->irq = PHY_IGNORE_INTERRUPT;
867 	else
868 		/* At this stage, init the EEE if supported.
869 		 * Never called in case of fixed_link.
870 		 */
871 		priv->eee_enabled = stmmac_eee_init(priv);
872 }
873 
874 /**
875  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PCS.
878  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
879  * configured for the TBI, RTBI, or SGMII PHY interface.
880  */
881 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
882 {
883 	int interface = priv->plat->interface;
884 
885 	if (priv->dma_cap.pcs) {
886 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
887 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
888 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
889 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
890 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
891 			priv->hw->pcs = STMMAC_PCS_RGMII;
892 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
893 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
894 			priv->hw->pcs = STMMAC_PCS_SGMII;
895 		}
896 	}
897 }
898 
899 /**
900  * stmmac_init_phy - PHY initialization
901  * @dev: net device structure
902  * Description: it initializes the driver's PHY state, and attaches the PHY
903  * to the mac driver.
904  *  Return value:
905  *  0 on success
906  */
907 static int stmmac_init_phy(struct net_device *dev)
908 {
909 	struct stmmac_priv *priv = netdev_priv(dev);
910 	struct phy_device *phydev;
911 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
912 	char bus_id[MII_BUS_ID_SIZE];
913 	int interface = priv->plat->interface;
914 	int max_speed = priv->plat->max_speed;
915 	priv->oldlink = false;
916 	priv->speed = SPEED_UNKNOWN;
917 	priv->oldduplex = DUPLEX_UNKNOWN;
918 
919 	if (priv->plat->phy_node) {
920 		phydev = of_phy_connect(dev, priv->plat->phy_node,
921 					&stmmac_adjust_link, 0, interface);
922 	} else {
923 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
924 			 priv->plat->bus_id);
925 
926 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
927 			 priv->plat->phy_addr);
928 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
929 			   phy_id_fmt);
930 
931 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
932 				     interface);
933 	}
934 
935 	if (IS_ERR_OR_NULL(phydev)) {
936 		netdev_err(priv->dev, "Could not attach to PHY\n");
937 		if (!phydev)
938 			return -ENODEV;
939 
940 		return PTR_ERR(phydev);
941 	}
942 
943 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
944 	if ((interface == PHY_INTERFACE_MODE_MII) ||
945 	    (interface == PHY_INTERFACE_MODE_RMII) ||
946 		(max_speed < 1000 && max_speed > 0))
947 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
948 					 SUPPORTED_1000baseT_Full);
949 
950 	/*
951 	 * Broken HW is sometimes missing the pull-up resistor on the
952 	 * MDIO line, which results in reads to non-existent devices returning
953 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
954 	 * device as well.
955 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
956 	 */
957 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
958 		phy_disconnect(phydev);
959 		return -ENODEV;
960 	}
961 
962 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
963 	 * subsequent PHY polling, make sure we force a link transition if
964 	 * we have a UP/DOWN/UP transition
965 	 */
966 	if (phydev->is_pseudo_fixed_link)
967 		phydev->irq = PHY_POLL;
968 
969 	phy_attached_info(phydev);
970 	return 0;
971 }
972 
973 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
974 {
975 	u32 rx_cnt = priv->plat->rx_queues_to_use;
976 	void *head_rx;
977 	u32 queue;
978 
979 	/* Display RX rings */
980 	for (queue = 0; queue < rx_cnt; queue++) {
981 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
982 
983 		pr_info("\tRX Queue %u rings\n", queue);
984 
985 		if (priv->extend_desc)
986 			head_rx = (void *)rx_q->dma_erx;
987 		else
988 			head_rx = (void *)rx_q->dma_rx;
989 
990 		/* Display RX ring */
991 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
992 	}
993 }
994 
995 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
996 {
997 	u32 tx_cnt = priv->plat->tx_queues_to_use;
998 	void *head_tx;
999 	u32 queue;
1000 
1001 	/* Display TX rings */
1002 	for (queue = 0; queue < tx_cnt; queue++) {
1003 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1004 
1005 		pr_info("\tTX Queue %d rings\n", queue);
1006 
1007 		if (priv->extend_desc)
1008 			head_tx = (void *)tx_q->dma_etx;
1009 		else
1010 			head_tx = (void *)tx_q->dma_tx;
1011 
1012 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1013 	}
1014 }
1015 
1016 static void stmmac_display_rings(struct stmmac_priv *priv)
1017 {
1018 	/* Display RX ring */
1019 	stmmac_display_rx_rings(priv);
1020 
1021 	/* Display TX ring */
1022 	stmmac_display_tx_rings(priv);
1023 }
1024 
1025 static int stmmac_set_bfsize(int mtu, int bufsize)
1026 {
1027 	int ret = bufsize;
1028 
1029 	if (mtu >= BUF_SIZE_4KiB)
1030 		ret = BUF_SIZE_8KiB;
1031 	else if (mtu >= BUF_SIZE_2KiB)
1032 		ret = BUF_SIZE_4KiB;
1033 	else if (mtu > DEFAULT_BUFSIZE)
1034 		ret = BUF_SIZE_2KiB;
1035 	else
1036 		ret = DEFAULT_BUFSIZE;
1037 
1038 	return ret;
1039 }
1040 
1041 /**
1042  * stmmac_clear_rx_descriptors - clear RX descriptors
1043  * @priv: driver private structure
1044  * @queue: RX queue index
1045  * Description: this function is called to clear the RX descriptors
1046  * in case of both basic and extended descriptors are used.
1047  */
1048 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1049 {
1050 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1051 	int i;
1052 
1053 	/* Clear the RX descriptors */
1054 	for (i = 0; i < DMA_RX_SIZE; i++)
1055 		if (priv->extend_desc)
1056 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1057 						     priv->use_riwt, priv->mode,
1058 						     (i == DMA_RX_SIZE - 1));
1059 		else
1060 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1061 						     priv->use_riwt, priv->mode,
1062 						     (i == DMA_RX_SIZE - 1));
1063 }
1064 
1065 /**
1066  * stmmac_clear_tx_descriptors - clear tx descriptors
1067  * @priv: driver private structure
1068  * @queue: TX queue index.
1069  * Description: this function is called to clear the TX descriptors
1070  * in case of both basic and extended descriptors are used.
1071  */
1072 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1073 {
1074 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1075 	int i;
1076 
1077 	/* Clear the TX descriptors */
1078 	for (i = 0; i < DMA_TX_SIZE; i++)
1079 		if (priv->extend_desc)
1080 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1081 						     priv->mode,
1082 						     (i == DMA_TX_SIZE - 1));
1083 		else
1084 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1085 						     priv->mode,
1086 						     (i == DMA_TX_SIZE - 1));
1087 }
1088 
1089 /**
1090  * stmmac_clear_descriptors - clear descriptors
1091  * @priv: driver private structure
1092  * Description: this function is called to clear the TX and RX descriptors
1093  * in case of both basic and extended descriptors are used.
1094  */
1095 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1096 {
1097 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1098 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1099 	u32 queue;
1100 
1101 	/* Clear the RX descriptors */
1102 	for (queue = 0; queue < rx_queue_cnt; queue++)
1103 		stmmac_clear_rx_descriptors(priv, queue);
1104 
1105 	/* Clear the TX descriptors */
1106 	for (queue = 0; queue < tx_queue_cnt; queue++)
1107 		stmmac_clear_tx_descriptors(priv, queue);
1108 }
1109 
1110 /**
1111  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1112  * @priv: driver private structure
1113  * @p: descriptor pointer
1114  * @i: descriptor index
1115  * @flags: gfp flag
1116  * @queue: RX queue index
1117  * Description: this function is called to allocate a receive buffer, perform
1118  * the DMA mapping and init the descriptor.
1119  */
1120 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1121 				  int i, gfp_t flags, u32 queue)
1122 {
1123 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1124 	struct sk_buff *skb;
1125 
1126 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1127 	if (!skb) {
1128 		netdev_err(priv->dev,
1129 			   "%s: Rx init fails; skb is NULL\n", __func__);
1130 		return -ENOMEM;
1131 	}
1132 	rx_q->rx_skbuff[i] = skb;
1133 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1134 						priv->dma_buf_sz,
1135 						DMA_FROM_DEVICE);
1136 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1137 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1138 		dev_kfree_skb_any(skb);
1139 		return -EINVAL;
1140 	}
1141 
1142 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1143 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1144 	else
1145 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146 
1147 	if ((priv->hw->mode->init_desc3) &&
1148 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1149 		priv->hw->mode->init_desc3(p);
1150 
1151 	return 0;
1152 }
1153 
1154 /**
1155  * stmmac_free_rx_buffer - free RX dma buffers
1156  * @priv: private structure
1157  * @queue: RX queue index
1158  * @i: buffer index.
1159  */
1160 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1161 {
1162 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1163 
1164 	if (rx_q->rx_skbuff[i]) {
1165 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1166 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1167 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1168 	}
1169 	rx_q->rx_skbuff[i] = NULL;
1170 }
1171 
1172 /**
1173  * stmmac_free_tx_buffer - free RX dma buffers
1174  * @priv: private structure
1175  * @queue: RX queue index
1176  * @i: buffer index.
1177  */
1178 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1179 {
1180 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1181 
1182 	if (tx_q->tx_skbuff_dma[i].buf) {
1183 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1184 			dma_unmap_page(priv->device,
1185 				       tx_q->tx_skbuff_dma[i].buf,
1186 				       tx_q->tx_skbuff_dma[i].len,
1187 				       DMA_TO_DEVICE);
1188 		else
1189 			dma_unmap_single(priv->device,
1190 					 tx_q->tx_skbuff_dma[i].buf,
1191 					 tx_q->tx_skbuff_dma[i].len,
1192 					 DMA_TO_DEVICE);
1193 	}
1194 
1195 	if (tx_q->tx_skbuff[i]) {
1196 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1197 		tx_q->tx_skbuff[i] = NULL;
1198 		tx_q->tx_skbuff_dma[i].buf = 0;
1199 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1200 	}
1201 }
1202 
1203 /**
1204  * init_dma_rx_desc_rings - init the RX descriptor rings
1205  * @dev: net device structure
1206  * @flags: gfp flag.
1207  * Description: this function initializes the DMA RX descriptors
1208  * and allocates the socket buffers. It supports the chained and ring
1209  * modes.
1210  */
1211 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1212 {
1213 	struct stmmac_priv *priv = netdev_priv(dev);
1214 	u32 rx_count = priv->plat->rx_queues_to_use;
1215 	unsigned int bfsize = 0;
1216 	int ret = -ENOMEM;
1217 	int queue;
1218 	int i;
1219 
1220 	if (priv->hw->mode->set_16kib_bfsize)
1221 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1222 
1223 	if (bfsize < BUF_SIZE_16KiB)
1224 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1225 
1226 	priv->dma_buf_sz = bfsize;
1227 
1228 	/* RX INITIALIZATION */
1229 	netif_dbg(priv, probe, priv->dev,
1230 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1231 
1232 	for (queue = 0; queue < rx_count; queue++) {
1233 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1234 
1235 		netif_dbg(priv, probe, priv->dev,
1236 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1237 			  (u32)rx_q->dma_rx_phy);
1238 
1239 		for (i = 0; i < DMA_RX_SIZE; i++) {
1240 			struct dma_desc *p;
1241 
1242 			if (priv->extend_desc)
1243 				p = &((rx_q->dma_erx + i)->basic);
1244 			else
1245 				p = rx_q->dma_rx + i;
1246 
1247 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1248 						     queue);
1249 			if (ret)
1250 				goto err_init_rx_buffers;
1251 
1252 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1253 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1254 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1255 		}
1256 
1257 		rx_q->cur_rx = 0;
1258 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1259 
1260 		stmmac_clear_rx_descriptors(priv, queue);
1261 
1262 		/* Setup the chained descriptor addresses */
1263 		if (priv->mode == STMMAC_CHAIN_MODE) {
1264 			if (priv->extend_desc)
1265 				priv->hw->mode->init(rx_q->dma_erx,
1266 						     rx_q->dma_rx_phy,
1267 						     DMA_RX_SIZE, 1);
1268 			else
1269 				priv->hw->mode->init(rx_q->dma_rx,
1270 						     rx_q->dma_rx_phy,
1271 						     DMA_RX_SIZE, 0);
1272 		}
1273 	}
1274 
1275 	buf_sz = bfsize;
1276 
1277 	return 0;
1278 
1279 err_init_rx_buffers:
1280 	while (queue >= 0) {
1281 		while (--i >= 0)
1282 			stmmac_free_rx_buffer(priv, queue, i);
1283 
1284 		if (queue == 0)
1285 			break;
1286 
1287 		i = DMA_RX_SIZE;
1288 		queue--;
1289 	}
1290 
1291 	return ret;
1292 }
1293 
1294 /**
1295  * init_dma_tx_desc_rings - init the TX descriptor rings
1296  * @dev: net device structure.
1297  * Description: this function initializes the DMA TX descriptors
1298  * and allocates the socket buffers. It supports the chained and ring
1299  * modes.
1300  */
1301 static int init_dma_tx_desc_rings(struct net_device *dev)
1302 {
1303 	struct stmmac_priv *priv = netdev_priv(dev);
1304 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1305 	u32 queue;
1306 	int i;
1307 
1308 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1309 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1310 
1311 		netif_dbg(priv, probe, priv->dev,
1312 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1313 			 (u32)tx_q->dma_tx_phy);
1314 
1315 		/* Setup the chained descriptor addresses */
1316 		if (priv->mode == STMMAC_CHAIN_MODE) {
1317 			if (priv->extend_desc)
1318 				priv->hw->mode->init(tx_q->dma_etx,
1319 						     tx_q->dma_tx_phy,
1320 						     DMA_TX_SIZE, 1);
1321 			else
1322 				priv->hw->mode->init(tx_q->dma_tx,
1323 						     tx_q->dma_tx_phy,
1324 						     DMA_TX_SIZE, 0);
1325 		}
1326 
1327 		for (i = 0; i < DMA_TX_SIZE; i++) {
1328 			struct dma_desc *p;
1329 			if (priv->extend_desc)
1330 				p = &((tx_q->dma_etx + i)->basic);
1331 			else
1332 				p = tx_q->dma_tx + i;
1333 
1334 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1335 				p->des0 = 0;
1336 				p->des1 = 0;
1337 				p->des2 = 0;
1338 				p->des3 = 0;
1339 			} else {
1340 				p->des2 = 0;
1341 			}
1342 
1343 			tx_q->tx_skbuff_dma[i].buf = 0;
1344 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1345 			tx_q->tx_skbuff_dma[i].len = 0;
1346 			tx_q->tx_skbuff_dma[i].last_segment = false;
1347 			tx_q->tx_skbuff[i] = NULL;
1348 		}
1349 
1350 		tx_q->dirty_tx = 0;
1351 		tx_q->cur_tx = 0;
1352 
1353 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 /**
1360  * init_dma_desc_rings - init the RX/TX descriptor rings
1361  * @dev: net device structure
1362  * @flags: gfp flag.
1363  * Description: this function initializes the DMA RX/TX descriptors
1364  * and allocates the socket buffers. It supports the chained and ring
1365  * modes.
1366  */
1367 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1368 {
1369 	struct stmmac_priv *priv = netdev_priv(dev);
1370 	int ret;
1371 
1372 	ret = init_dma_rx_desc_rings(dev, flags);
1373 	if (ret)
1374 		return ret;
1375 
1376 	ret = init_dma_tx_desc_rings(dev);
1377 
1378 	stmmac_clear_descriptors(priv);
1379 
1380 	if (netif_msg_hw(priv))
1381 		stmmac_display_rings(priv);
1382 
1383 	return ret;
1384 }
1385 
1386 /**
1387  * dma_free_rx_skbufs - free RX dma buffers
1388  * @priv: private structure
1389  * @queue: RX queue index
1390  */
1391 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1392 {
1393 	int i;
1394 
1395 	for (i = 0; i < DMA_RX_SIZE; i++)
1396 		stmmac_free_rx_buffer(priv, queue, i);
1397 }
1398 
1399 /**
1400  * dma_free_tx_skbufs - free TX dma buffers
1401  * @priv: private structure
1402  * @queue: TX queue index
1403  */
1404 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1405 {
1406 	int i;
1407 
1408 	for (i = 0; i < DMA_TX_SIZE; i++)
1409 		stmmac_free_tx_buffer(priv, queue, i);
1410 }
1411 
1412 /**
1413  * free_dma_rx_desc_resources - free RX dma desc resources
1414  * @priv: private structure
1415  */
1416 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1417 {
1418 	u32 rx_count = priv->plat->rx_queues_to_use;
1419 	u32 queue;
1420 
1421 	/* Free RX queue resources */
1422 	for (queue = 0; queue < rx_count; queue++) {
1423 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1424 
1425 		/* Release the DMA RX socket buffers */
1426 		dma_free_rx_skbufs(priv, queue);
1427 
1428 		/* Free DMA regions of consistent memory previously allocated */
1429 		if (!priv->extend_desc)
1430 			dma_free_coherent(priv->device,
1431 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1432 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1433 		else
1434 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1435 					  sizeof(struct dma_extended_desc),
1436 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1437 
1438 		kfree(rx_q->rx_skbuff_dma);
1439 		kfree(rx_q->rx_skbuff);
1440 	}
1441 }
1442 
1443 /**
1444  * free_dma_tx_desc_resources - free TX dma desc resources
1445  * @priv: private structure
1446  */
1447 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1448 {
1449 	u32 tx_count = priv->plat->tx_queues_to_use;
1450 	u32 queue;
1451 
1452 	/* Free TX queue resources */
1453 	for (queue = 0; queue < tx_count; queue++) {
1454 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1455 
1456 		/* Release the DMA TX socket buffers */
1457 		dma_free_tx_skbufs(priv, queue);
1458 
1459 		/* Free DMA regions of consistent memory previously allocated */
1460 		if (!priv->extend_desc)
1461 			dma_free_coherent(priv->device,
1462 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1463 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1464 		else
1465 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1466 					  sizeof(struct dma_extended_desc),
1467 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1468 
1469 		kfree(tx_q->tx_skbuff_dma);
1470 		kfree(tx_q->tx_skbuff);
1471 	}
1472 }
1473 
1474 /**
1475  * alloc_dma_rx_desc_resources - alloc RX resources.
1476  * @priv: private structure
1477  * Description: according to which descriptor can be used (extend or basic)
1478  * this function allocates the resources for TX and RX paths. In case of
1479  * reception, for example, it pre-allocated the RX socket buffer in order to
1480  * allow zero-copy mechanism.
1481  */
1482 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1483 {
1484 	u32 rx_count = priv->plat->rx_queues_to_use;
1485 	int ret = -ENOMEM;
1486 	u32 queue;
1487 
1488 	/* RX queues buffers and DMA */
1489 	for (queue = 0; queue < rx_count; queue++) {
1490 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1491 
1492 		rx_q->queue_index = queue;
1493 		rx_q->priv_data = priv;
1494 
1495 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1496 						    sizeof(dma_addr_t),
1497 						    GFP_KERNEL);
1498 		if (!rx_q->rx_skbuff_dma)
1499 			goto err_dma;
1500 
1501 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1502 						sizeof(struct sk_buff *),
1503 						GFP_KERNEL);
1504 		if (!rx_q->rx_skbuff)
1505 			goto err_dma;
1506 
1507 		if (priv->extend_desc) {
1508 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1509 							    DMA_RX_SIZE *
1510 							    sizeof(struct
1511 							    dma_extended_desc),
1512 							    &rx_q->dma_rx_phy,
1513 							    GFP_KERNEL);
1514 			if (!rx_q->dma_erx)
1515 				goto err_dma;
1516 
1517 		} else {
1518 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1519 							   DMA_RX_SIZE *
1520 							   sizeof(struct
1521 							   dma_desc),
1522 							   &rx_q->dma_rx_phy,
1523 							   GFP_KERNEL);
1524 			if (!rx_q->dma_rx)
1525 				goto err_dma;
1526 		}
1527 	}
1528 
1529 	return 0;
1530 
1531 err_dma:
1532 	free_dma_rx_desc_resources(priv);
1533 
1534 	return ret;
1535 }
1536 
1537 /**
1538  * alloc_dma_tx_desc_resources - alloc TX resources.
1539  * @priv: private structure
1540  * Description: according to which descriptor can be used (extend or basic)
1541  * this function allocates the resources for TX and RX paths. In case of
1542  * reception, for example, it pre-allocated the RX socket buffer in order to
1543  * allow zero-copy mechanism.
1544  */
1545 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1546 {
1547 	u32 tx_count = priv->plat->tx_queues_to_use;
1548 	int ret = -ENOMEM;
1549 	u32 queue;
1550 
1551 	/* TX queues buffers and DMA */
1552 	for (queue = 0; queue < tx_count; queue++) {
1553 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1554 
1555 		tx_q->queue_index = queue;
1556 		tx_q->priv_data = priv;
1557 
1558 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1559 						    sizeof(*tx_q->tx_skbuff_dma),
1560 						    GFP_KERNEL);
1561 		if (!tx_q->tx_skbuff_dma)
1562 			goto err_dma;
1563 
1564 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1565 						sizeof(struct sk_buff *),
1566 						GFP_KERNEL);
1567 		if (!tx_q->tx_skbuff)
1568 			goto err_dma;
1569 
1570 		if (priv->extend_desc) {
1571 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1572 							    DMA_TX_SIZE *
1573 							    sizeof(struct
1574 							    dma_extended_desc),
1575 							    &tx_q->dma_tx_phy,
1576 							    GFP_KERNEL);
1577 			if (!tx_q->dma_etx)
1578 				goto err_dma;
1579 		} else {
1580 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1581 							   DMA_TX_SIZE *
1582 							   sizeof(struct
1583 								  dma_desc),
1584 							   &tx_q->dma_tx_phy,
1585 							   GFP_KERNEL);
1586 			if (!tx_q->dma_tx)
1587 				goto err_dma;
1588 		}
1589 	}
1590 
1591 	return 0;
1592 
1593 err_dma:
1594 	free_dma_tx_desc_resources(priv);
1595 
1596 	return ret;
1597 }
1598 
1599 /**
1600  * alloc_dma_desc_resources - alloc TX/RX resources.
1601  * @priv: private structure
1602  * Description: according to which descriptor can be used (extend or basic)
1603  * this function allocates the resources for TX and RX paths. In case of
1604  * reception, for example, it pre-allocated the RX socket buffer in order to
1605  * allow zero-copy mechanism.
1606  */
1607 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1608 {
1609 	/* RX Allocation */
1610 	int ret = alloc_dma_rx_desc_resources(priv);
1611 
1612 	if (ret)
1613 		return ret;
1614 
1615 	ret = alloc_dma_tx_desc_resources(priv);
1616 
1617 	return ret;
1618 }
1619 
1620 /**
1621  * free_dma_desc_resources - free dma desc resources
1622  * @priv: private structure
1623  */
1624 static void free_dma_desc_resources(struct stmmac_priv *priv)
1625 {
1626 	/* Release the DMA RX socket buffers */
1627 	free_dma_rx_desc_resources(priv);
1628 
1629 	/* Release the DMA TX socket buffers */
1630 	free_dma_tx_desc_resources(priv);
1631 }
1632 
1633 /**
1634  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1635  *  @priv: driver private structure
1636  *  Description: It is used for enabling the rx queues in the MAC
1637  */
1638 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1639 {
1640 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1641 	int queue;
1642 	u8 mode;
1643 
1644 	for (queue = 0; queue < rx_queues_count; queue++) {
1645 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1646 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1647 	}
1648 }
1649 
1650 /**
1651  * stmmac_start_rx_dma - start RX DMA channel
1652  * @priv: driver private structure
1653  * @chan: RX channel index
1654  * Description:
1655  * This starts a RX DMA channel
1656  */
1657 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1658 {
1659 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1660 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1661 }
1662 
1663 /**
1664  * stmmac_start_tx_dma - start TX DMA channel
1665  * @priv: driver private structure
1666  * @chan: TX channel index
1667  * Description:
1668  * This starts a TX DMA channel
1669  */
1670 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1671 {
1672 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1673 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1674 }
1675 
1676 /**
1677  * stmmac_stop_rx_dma - stop RX DMA channel
1678  * @priv: driver private structure
1679  * @chan: RX channel index
1680  * Description:
1681  * This stops a RX DMA channel
1682  */
1683 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1684 {
1685 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1686 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1687 }
1688 
1689 /**
1690  * stmmac_stop_tx_dma - stop TX DMA channel
1691  * @priv: driver private structure
1692  * @chan: TX channel index
1693  * Description:
1694  * This stops a TX DMA channel
1695  */
1696 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1697 {
1698 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1699 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1700 }
1701 
1702 /**
1703  * stmmac_start_all_dma - start all RX and TX DMA channels
1704  * @priv: driver private structure
1705  * Description:
1706  * This starts all the RX and TX DMA channels
1707  */
1708 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1709 {
1710 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1711 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1712 	u32 chan = 0;
1713 
1714 	for (chan = 0; chan < rx_channels_count; chan++)
1715 		stmmac_start_rx_dma(priv, chan);
1716 
1717 	for (chan = 0; chan < tx_channels_count; chan++)
1718 		stmmac_start_tx_dma(priv, chan);
1719 }
1720 
1721 /**
1722  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1723  * @priv: driver private structure
1724  * Description:
1725  * This stops the RX and TX DMA channels
1726  */
1727 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1728 {
1729 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731 	u32 chan = 0;
1732 
1733 	for (chan = 0; chan < rx_channels_count; chan++)
1734 		stmmac_stop_rx_dma(priv, chan);
1735 
1736 	for (chan = 0; chan < tx_channels_count; chan++)
1737 		stmmac_stop_tx_dma(priv, chan);
1738 }
1739 
1740 /**
1741  *  stmmac_dma_operation_mode - HW DMA operation mode
1742  *  @priv: driver private structure
1743  *  Description: it is used for configuring the DMA operation mode register in
1744  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1745  */
1746 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1747 {
1748 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750 	int rxfifosz = priv->plat->rx_fifo_size;
1751 	int txfifosz = priv->plat->tx_fifo_size;
1752 	u32 txmode = 0;
1753 	u32 rxmode = 0;
1754 	u32 chan = 0;
1755 	u8 qmode = 0;
1756 
1757 	if (rxfifosz == 0)
1758 		rxfifosz = priv->dma_cap.rx_fifo_size;
1759 	if (txfifosz == 0)
1760 		txfifosz = priv->dma_cap.tx_fifo_size;
1761 
1762 	/* Adjust for real per queue fifo size */
1763 	rxfifosz /= rx_channels_count;
1764 	txfifosz /= tx_channels_count;
1765 
1766 	if (priv->plat->force_thresh_dma_mode) {
1767 		txmode = tc;
1768 		rxmode = tc;
1769 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1770 		/*
1771 		 * In case of GMAC, SF mode can be enabled
1772 		 * to perform the TX COE in HW. This depends on:
1773 		 * 1) TX COE if actually supported
1774 		 * 2) There is no bugged Jumbo frame support
1775 		 *    that needs to not insert csum in the TDES.
1776 		 */
1777 		txmode = SF_DMA_MODE;
1778 		rxmode = SF_DMA_MODE;
1779 		priv->xstats.threshold = SF_DMA_MODE;
1780 	} else {
1781 		txmode = tc;
1782 		rxmode = SF_DMA_MODE;
1783 	}
1784 
1785 	/* configure all channels */
1786 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1787 		for (chan = 0; chan < rx_channels_count; chan++) {
1788 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1789 
1790 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1791 						   rxfifosz, qmode);
1792 		}
1793 
1794 		for (chan = 0; chan < tx_channels_count; chan++) {
1795 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1796 
1797 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1798 						   txfifosz, qmode);
1799 		}
1800 	} else {
1801 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1802 					rxfifosz);
1803 	}
1804 }
1805 
1806 /**
1807  * stmmac_tx_clean - to manage the transmission completion
1808  * @priv: driver private structure
1809  * @queue: TX queue index
1810  * Description: it reclaims the transmit resources after transmission completes.
1811  */
1812 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1813 {
1814 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1815 	unsigned int bytes_compl = 0, pkts_compl = 0;
1816 	unsigned int entry;
1817 
1818 	netif_tx_lock(priv->dev);
1819 
1820 	priv->xstats.tx_clean++;
1821 
1822 	entry = tx_q->dirty_tx;
1823 	while (entry != tx_q->cur_tx) {
1824 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1825 		struct dma_desc *p;
1826 		int status;
1827 
1828 		if (priv->extend_desc)
1829 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1830 		else
1831 			p = tx_q->dma_tx + entry;
1832 
1833 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1834 						      &priv->xstats, p,
1835 						      priv->ioaddr);
1836 		/* Check if the descriptor is owned by the DMA */
1837 		if (unlikely(status & tx_dma_own))
1838 			break;
1839 
1840 		/* Just consider the last segment and ...*/
1841 		if (likely(!(status & tx_not_ls))) {
1842 			/* ... verify the status error condition */
1843 			if (unlikely(status & tx_err)) {
1844 				priv->dev->stats.tx_errors++;
1845 			} else {
1846 				priv->dev->stats.tx_packets++;
1847 				priv->xstats.tx_pkt_n++;
1848 			}
1849 			stmmac_get_tx_hwtstamp(priv, p, skb);
1850 		}
1851 
1852 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1853 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1854 				dma_unmap_page(priv->device,
1855 					       tx_q->tx_skbuff_dma[entry].buf,
1856 					       tx_q->tx_skbuff_dma[entry].len,
1857 					       DMA_TO_DEVICE);
1858 			else
1859 				dma_unmap_single(priv->device,
1860 						 tx_q->tx_skbuff_dma[entry].buf,
1861 						 tx_q->tx_skbuff_dma[entry].len,
1862 						 DMA_TO_DEVICE);
1863 			tx_q->tx_skbuff_dma[entry].buf = 0;
1864 			tx_q->tx_skbuff_dma[entry].len = 0;
1865 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1866 		}
1867 
1868 		if (priv->hw->mode->clean_desc3)
1869 			priv->hw->mode->clean_desc3(tx_q, p);
1870 
1871 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1872 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1873 
1874 		if (likely(skb != NULL)) {
1875 			pkts_compl++;
1876 			bytes_compl += skb->len;
1877 			dev_consume_skb_any(skb);
1878 			tx_q->tx_skbuff[entry] = NULL;
1879 		}
1880 
1881 		priv->hw->desc->release_tx_desc(p, priv->mode);
1882 
1883 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1884 	}
1885 	tx_q->dirty_tx = entry;
1886 
1887 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1888 				  pkts_compl, bytes_compl);
1889 
1890 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1891 								queue))) &&
1892 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1893 
1894 		netif_dbg(priv, tx_done, priv->dev,
1895 			  "%s: restart transmit\n", __func__);
1896 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1897 	}
1898 
1899 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1900 		stmmac_enable_eee_mode(priv);
1901 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1902 	}
1903 	netif_tx_unlock(priv->dev);
1904 }
1905 
1906 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1907 {
1908 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1909 }
1910 
1911 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1912 {
1913 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1914 }
1915 
1916 /**
1917  * stmmac_tx_err - to manage the tx error
1918  * @priv: driver private structure
1919  * @chan: channel index
1920  * Description: it cleans the descriptors and restarts the transmission
1921  * in case of transmission errors.
1922  */
1923 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1924 {
1925 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1926 	int i;
1927 
1928 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1929 
1930 	stmmac_stop_tx_dma(priv, chan);
1931 	dma_free_tx_skbufs(priv, chan);
1932 	for (i = 0; i < DMA_TX_SIZE; i++)
1933 		if (priv->extend_desc)
1934 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1935 						     priv->mode,
1936 						     (i == DMA_TX_SIZE - 1));
1937 		else
1938 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1939 						     priv->mode,
1940 						     (i == DMA_TX_SIZE - 1));
1941 	tx_q->dirty_tx = 0;
1942 	tx_q->cur_tx = 0;
1943 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1944 	stmmac_start_tx_dma(priv, chan);
1945 
1946 	priv->dev->stats.tx_errors++;
1947 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1948 }
1949 
1950 /**
1951  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1952  *  @priv: driver private structure
1953  *  @txmode: TX operating mode
1954  *  @rxmode: RX operating mode
1955  *  @chan: channel index
1956  *  Description: it is used for configuring of the DMA operation mode in
1957  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1958  *  mode.
1959  */
1960 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1961 					  u32 rxmode, u32 chan)
1962 {
1963 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1964 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1965 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1966 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1967 	int rxfifosz = priv->plat->rx_fifo_size;
1968 	int txfifosz = priv->plat->tx_fifo_size;
1969 
1970 	if (rxfifosz == 0)
1971 		rxfifosz = priv->dma_cap.rx_fifo_size;
1972 	if (txfifosz == 0)
1973 		txfifosz = priv->dma_cap.tx_fifo_size;
1974 
1975 	/* Adjust for real per queue fifo size */
1976 	rxfifosz /= rx_channels_count;
1977 	txfifosz /= tx_channels_count;
1978 
1979 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1980 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1981 					   rxfifosz, rxqmode);
1982 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1983 					   txfifosz, txqmode);
1984 	} else {
1985 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1986 					rxfifosz);
1987 	}
1988 }
1989 
1990 /**
1991  * stmmac_dma_interrupt - DMA ISR
1992  * @priv: driver private structure
1993  * Description: this is the DMA ISR. It is called by the main ISR.
1994  * It calls the dwmac dma routine and schedule poll method in case of some
1995  * work can be done.
1996  */
1997 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1998 {
1999 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2000 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2001 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2002 				tx_channel_count : rx_channel_count;
2003 	u32 chan;
2004 	bool poll_scheduled = false;
2005 	int status[channels_to_check];
2006 
2007 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2008 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2009 	 * stmmac_channel struct.
2010 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2011 	 * all tx queues rather than just a single tx queue.
2012 	 */
2013 	for (chan = 0; chan < channels_to_check; chan++)
2014 		status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
2015 							    &priv->xstats,
2016 							    chan);
2017 
2018 	for (chan = 0; chan < rx_channel_count; chan++) {
2019 		if (likely(status[chan] & handle_rx)) {
2020 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2021 
2022 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2023 				stmmac_disable_dma_irq(priv, chan);
2024 				__napi_schedule(&rx_q->napi);
2025 				poll_scheduled = true;
2026 			}
2027 		}
2028 	}
2029 
2030 	/* If we scheduled poll, we already know that tx queues will be checked.
2031 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2032 	 * completed transmission, if so, call stmmac_poll (once).
2033 	 */
2034 	if (!poll_scheduled) {
2035 		for (chan = 0; chan < tx_channel_count; chan++) {
2036 			if (status[chan] & handle_tx) {
2037 				/* It doesn't matter what rx queue we choose
2038 				 * here. We use 0 since it always exists.
2039 				 */
2040 				struct stmmac_rx_queue *rx_q =
2041 					&priv->rx_queue[0];
2042 
2043 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2044 					stmmac_disable_dma_irq(priv, chan);
2045 					__napi_schedule(&rx_q->napi);
2046 				}
2047 				break;
2048 			}
2049 		}
2050 	}
2051 
2052 	for (chan = 0; chan < tx_channel_count; chan++) {
2053 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2054 			/* Try to bump up the dma threshold on this failure */
2055 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2056 			    (tc <= 256)) {
2057 				tc += 64;
2058 				if (priv->plat->force_thresh_dma_mode)
2059 					stmmac_set_dma_operation_mode(priv,
2060 								      tc,
2061 								      tc,
2062 								      chan);
2063 				else
2064 					stmmac_set_dma_operation_mode(priv,
2065 								    tc,
2066 								    SF_DMA_MODE,
2067 								    chan);
2068 				priv->xstats.threshold = tc;
2069 			}
2070 		} else if (unlikely(status[chan] == tx_hard_error)) {
2071 			stmmac_tx_err(priv, chan);
2072 		}
2073 	}
2074 }
2075 
2076 /**
2077  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2078  * @priv: driver private structure
2079  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2080  */
2081 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2082 {
2083 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2084 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2085 
2086 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2087 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2088 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2089 	} else {
2090 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2091 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2092 	}
2093 
2094 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2095 
2096 	if (priv->dma_cap.rmon) {
2097 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2098 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2099 	} else
2100 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2101 }
2102 
2103 /**
2104  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2105  * @priv: driver private structure
2106  * Description: select the Enhanced/Alternate or Normal descriptors.
2107  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2108  * supported by the HW capability register.
2109  */
2110 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2111 {
2112 	if (priv->plat->enh_desc) {
2113 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2114 
2115 		/* GMAC older than 3.50 has no extended descriptors */
2116 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2117 			dev_info(priv->device, "Enabled extended descriptors\n");
2118 			priv->extend_desc = 1;
2119 		} else
2120 			dev_warn(priv->device, "Extended descriptors not supported\n");
2121 
2122 		priv->hw->desc = &enh_desc_ops;
2123 	} else {
2124 		dev_info(priv->device, "Normal descriptors\n");
2125 		priv->hw->desc = &ndesc_ops;
2126 	}
2127 }
2128 
2129 /**
2130  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2131  * @priv: driver private structure
2132  * Description:
2133  *  new GMAC chip generations have a new register to indicate the
2134  *  presence of the optional feature/functions.
2135  *  This can be also used to override the value passed through the
2136  *  platform and necessary for old MAC10/100 and GMAC chips.
2137  */
2138 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2139 {
2140 	u32 ret = 0;
2141 
2142 	if (priv->hw->dma->get_hw_feature) {
2143 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2144 					      &priv->dma_cap);
2145 		ret = 1;
2146 	}
2147 
2148 	return ret;
2149 }
2150 
2151 /**
2152  * stmmac_check_ether_addr - check if the MAC addr is valid
2153  * @priv: driver private structure
2154  * Description:
2155  * it is to verify if the MAC address is valid, in case of failures it
2156  * generates a random MAC address
2157  */
2158 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2159 {
2160 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2161 		priv->hw->mac->get_umac_addr(priv->hw,
2162 					     priv->dev->dev_addr, 0);
2163 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2164 			eth_hw_addr_random(priv->dev);
2165 		netdev_info(priv->dev, "device MAC address %pM\n",
2166 			    priv->dev->dev_addr);
2167 	}
2168 }
2169 
2170 /**
2171  * stmmac_init_dma_engine - DMA init.
2172  * @priv: driver private structure
2173  * Description:
2174  * It inits the DMA invoking the specific MAC/GMAC callback.
2175  * Some DMA parameters can be passed from the platform;
2176  * in case of these are not passed a default is kept for the MAC or GMAC.
2177  */
2178 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2179 {
2180 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2181 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2182 	struct stmmac_rx_queue *rx_q;
2183 	struct stmmac_tx_queue *tx_q;
2184 	u32 dummy_dma_rx_phy = 0;
2185 	u32 dummy_dma_tx_phy = 0;
2186 	u32 chan = 0;
2187 	int atds = 0;
2188 	int ret = 0;
2189 
2190 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2191 		dev_err(priv->device, "Invalid DMA configuration\n");
2192 		return -EINVAL;
2193 	}
2194 
2195 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2196 		atds = 1;
2197 
2198 	ret = priv->hw->dma->reset(priv->ioaddr);
2199 	if (ret) {
2200 		dev_err(priv->device, "Failed to reset the dma\n");
2201 		return ret;
2202 	}
2203 
2204 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2205 		/* DMA Configuration */
2206 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2207 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2208 
2209 		/* DMA RX Channel Configuration */
2210 		for (chan = 0; chan < rx_channels_count; chan++) {
2211 			rx_q = &priv->rx_queue[chan];
2212 
2213 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2214 						    priv->plat->dma_cfg,
2215 						    rx_q->dma_rx_phy, chan);
2216 
2217 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2218 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2219 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2220 						       rx_q->rx_tail_addr,
2221 						       chan);
2222 		}
2223 
2224 		/* DMA TX Channel Configuration */
2225 		for (chan = 0; chan < tx_channels_count; chan++) {
2226 			tx_q = &priv->tx_queue[chan];
2227 
2228 			priv->hw->dma->init_chan(priv->ioaddr,
2229 						 priv->plat->dma_cfg,
2230 						 chan);
2231 
2232 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2233 						    priv->plat->dma_cfg,
2234 						    tx_q->dma_tx_phy, chan);
2235 
2236 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2237 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2238 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2239 						       tx_q->tx_tail_addr,
2240 						       chan);
2241 		}
2242 	} else {
2243 		rx_q = &priv->rx_queue[chan];
2244 		tx_q = &priv->tx_queue[chan];
2245 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2246 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2247 	}
2248 
2249 	if (priv->plat->axi && priv->hw->dma->axi)
2250 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2251 
2252 	return ret;
2253 }
2254 
2255 /**
2256  * stmmac_tx_timer - mitigation sw timer for tx.
2257  * @data: data pointer
2258  * Description:
2259  * This is the timer handler to directly invoke the stmmac_tx_clean.
2260  */
2261 static void stmmac_tx_timer(struct timer_list *t)
2262 {
2263 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2264 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2265 	u32 queue;
2266 
2267 	/* let's scan all the tx queues */
2268 	for (queue = 0; queue < tx_queues_count; queue++)
2269 		stmmac_tx_clean(priv, queue);
2270 }
2271 
2272 /**
2273  * stmmac_init_tx_coalesce - init tx mitigation options.
2274  * @priv: driver private structure
2275  * Description:
2276  * This inits the transmit coalesce parameters: i.e. timer rate,
2277  * timer handler and default threshold used for enabling the
2278  * interrupt on completion bit.
2279  */
2280 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2281 {
2282 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2283 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2284 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2285 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2286 	add_timer(&priv->txtimer);
2287 }
2288 
2289 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2290 {
2291 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2292 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2293 	u32 chan;
2294 
2295 	/* set TX ring length */
2296 	if (priv->hw->dma->set_tx_ring_len) {
2297 		for (chan = 0; chan < tx_channels_count; chan++)
2298 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2299 						       (DMA_TX_SIZE - 1), chan);
2300 	}
2301 
2302 	/* set RX ring length */
2303 	if (priv->hw->dma->set_rx_ring_len) {
2304 		for (chan = 0; chan < rx_channels_count; chan++)
2305 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2306 						       (DMA_RX_SIZE - 1), chan);
2307 	}
2308 }
2309 
2310 /**
2311  *  stmmac_set_tx_queue_weight - Set TX queue weight
2312  *  @priv: driver private structure
2313  *  Description: It is used for setting TX queues weight
2314  */
2315 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2316 {
2317 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2318 	u32 weight;
2319 	u32 queue;
2320 
2321 	for (queue = 0; queue < tx_queues_count; queue++) {
2322 		weight = priv->plat->tx_queues_cfg[queue].weight;
2323 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2324 	}
2325 }
2326 
2327 /**
2328  *  stmmac_configure_cbs - Configure CBS in TX queue
2329  *  @priv: driver private structure
2330  *  Description: It is used for configuring CBS in AVB TX queues
2331  */
2332 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2333 {
2334 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2335 	u32 mode_to_use;
2336 	u32 queue;
2337 
2338 	/* queue 0 is reserved for legacy traffic */
2339 	for (queue = 1; queue < tx_queues_count; queue++) {
2340 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2341 		if (mode_to_use == MTL_QUEUE_DCB)
2342 			continue;
2343 
2344 		priv->hw->mac->config_cbs(priv->hw,
2345 				priv->plat->tx_queues_cfg[queue].send_slope,
2346 				priv->plat->tx_queues_cfg[queue].idle_slope,
2347 				priv->plat->tx_queues_cfg[queue].high_credit,
2348 				priv->plat->tx_queues_cfg[queue].low_credit,
2349 				queue);
2350 	}
2351 }
2352 
2353 /**
2354  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2355  *  @priv: driver private structure
2356  *  Description: It is used for mapping RX queues to RX dma channels
2357  */
2358 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2359 {
2360 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2361 	u32 queue;
2362 	u32 chan;
2363 
2364 	for (queue = 0; queue < rx_queues_count; queue++) {
2365 		chan = priv->plat->rx_queues_cfg[queue].chan;
2366 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2367 	}
2368 }
2369 
2370 /**
2371  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2372  *  @priv: driver private structure
2373  *  Description: It is used for configuring the RX Queue Priority
2374  */
2375 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2376 {
2377 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2378 	u32 queue;
2379 	u32 prio;
2380 
2381 	for (queue = 0; queue < rx_queues_count; queue++) {
2382 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2383 			continue;
2384 
2385 		prio = priv->plat->rx_queues_cfg[queue].prio;
2386 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2387 	}
2388 }
2389 
2390 /**
2391  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2392  *  @priv: driver private structure
2393  *  Description: It is used for configuring the TX Queue Priority
2394  */
2395 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2396 {
2397 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2398 	u32 queue;
2399 	u32 prio;
2400 
2401 	for (queue = 0; queue < tx_queues_count; queue++) {
2402 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2403 			continue;
2404 
2405 		prio = priv->plat->tx_queues_cfg[queue].prio;
2406 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2407 	}
2408 }
2409 
2410 /**
2411  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2412  *  @priv: driver private structure
2413  *  Description: It is used for configuring the RX queue routing
2414  */
2415 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2416 {
2417 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2418 	u32 queue;
2419 	u8 packet;
2420 
2421 	for (queue = 0; queue < rx_queues_count; queue++) {
2422 		/* no specific packet type routing specified for the queue */
2423 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2424 			continue;
2425 
2426 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2427 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2428 	}
2429 }
2430 
2431 /**
2432  *  stmmac_mtl_configuration - Configure MTL
2433  *  @priv: driver private structure
2434  *  Description: It is used for configurring MTL
2435  */
2436 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2437 {
2438 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2439 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2440 
2441 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2442 		stmmac_set_tx_queue_weight(priv);
2443 
2444 	/* Configure MTL RX algorithms */
2445 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2446 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2447 						priv->plat->rx_sched_algorithm);
2448 
2449 	/* Configure MTL TX algorithms */
2450 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2451 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2452 						priv->plat->tx_sched_algorithm);
2453 
2454 	/* Configure CBS in AVB TX queues */
2455 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2456 		stmmac_configure_cbs(priv);
2457 
2458 	/* Map RX MTL to DMA channels */
2459 	if (priv->hw->mac->map_mtl_to_dma)
2460 		stmmac_rx_queue_dma_chan_map(priv);
2461 
2462 	/* Enable MAC RX Queues */
2463 	if (priv->hw->mac->rx_queue_enable)
2464 		stmmac_mac_enable_rx_queues(priv);
2465 
2466 	/* Set RX priorities */
2467 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2468 		stmmac_mac_config_rx_queues_prio(priv);
2469 
2470 	/* Set TX priorities */
2471 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2472 		stmmac_mac_config_tx_queues_prio(priv);
2473 
2474 	/* Set RX routing */
2475 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2476 		stmmac_mac_config_rx_queues_routing(priv);
2477 }
2478 
2479 /**
2480  * stmmac_hw_setup - setup mac in a usable state.
2481  *  @dev : pointer to the device structure.
2482  *  Description:
2483  *  this is the main function to setup the HW in a usable state because the
2484  *  dma engine is reset, the core registers are configured (e.g. AXI,
2485  *  Checksum features, timers). The DMA is ready to start receiving and
2486  *  transmitting.
2487  *  Return value:
2488  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2489  *  file on failure.
2490  */
2491 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2492 {
2493 	struct stmmac_priv *priv = netdev_priv(dev);
2494 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2495 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2496 	u32 chan;
2497 	int ret;
2498 
2499 	/* DMA initialization and SW reset */
2500 	ret = stmmac_init_dma_engine(priv);
2501 	if (ret < 0) {
2502 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2503 			   __func__);
2504 		return ret;
2505 	}
2506 
2507 	/* Copy the MAC addr into the HW  */
2508 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2509 
2510 	/* PS and related bits will be programmed according to the speed */
2511 	if (priv->hw->pcs) {
2512 		int speed = priv->plat->mac_port_sel_speed;
2513 
2514 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2515 		    (speed == SPEED_1000)) {
2516 			priv->hw->ps = speed;
2517 		} else {
2518 			dev_warn(priv->device, "invalid port speed\n");
2519 			priv->hw->ps = 0;
2520 		}
2521 	}
2522 
2523 	/* Initialize the MAC Core */
2524 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2525 
2526 	/* Initialize MTL*/
2527 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2528 		stmmac_mtl_configuration(priv);
2529 
2530 	ret = priv->hw->mac->rx_ipc(priv->hw);
2531 	if (!ret) {
2532 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2533 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2534 		priv->hw->rx_csum = 0;
2535 	}
2536 
2537 	/* Enable the MAC Rx/Tx */
2538 	priv->hw->mac->set_mac(priv->ioaddr, true);
2539 
2540 	/* Set the HW DMA mode and the COE */
2541 	stmmac_dma_operation_mode(priv);
2542 
2543 	stmmac_mmc_setup(priv);
2544 
2545 	if (init_ptp) {
2546 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2547 		if (ret < 0)
2548 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2549 
2550 		ret = stmmac_init_ptp(priv);
2551 		if (ret == -EOPNOTSUPP)
2552 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2553 		else if (ret)
2554 			netdev_warn(priv->dev, "PTP init failed\n");
2555 	}
2556 
2557 #ifdef CONFIG_DEBUG_FS
2558 	ret = stmmac_init_fs(dev);
2559 	if (ret < 0)
2560 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2561 			    __func__);
2562 #endif
2563 	/* Start the ball rolling... */
2564 	stmmac_start_all_dma(priv);
2565 
2566 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2567 
2568 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2569 		priv->rx_riwt = MAX_DMA_RIWT;
2570 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2571 	}
2572 
2573 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2574 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2575 
2576 	/* set TX and RX rings length */
2577 	stmmac_set_rings_length(priv);
2578 
2579 	/* Enable TSO */
2580 	if (priv->tso) {
2581 		for (chan = 0; chan < tx_cnt; chan++)
2582 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2583 	}
2584 
2585 	return 0;
2586 }
2587 
2588 static void stmmac_hw_teardown(struct net_device *dev)
2589 {
2590 	struct stmmac_priv *priv = netdev_priv(dev);
2591 
2592 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2593 }
2594 
2595 /**
2596  *  stmmac_open - open entry point of the driver
2597  *  @dev : pointer to the device structure.
2598  *  Description:
2599  *  This function is the open entry point of the driver.
2600  *  Return value:
2601  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2602  *  file on failure.
2603  */
2604 static int stmmac_open(struct net_device *dev)
2605 {
2606 	struct stmmac_priv *priv = netdev_priv(dev);
2607 	int ret;
2608 
2609 	stmmac_check_ether_addr(priv);
2610 
2611 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2612 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2613 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2614 		ret = stmmac_init_phy(dev);
2615 		if (ret) {
2616 			netdev_err(priv->dev,
2617 				   "%s: Cannot attach to PHY (error: %d)\n",
2618 				   __func__, ret);
2619 			return ret;
2620 		}
2621 	}
2622 
2623 	/* Extra statistics */
2624 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2625 	priv->xstats.threshold = tc;
2626 
2627 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2628 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2629 	priv->mss = 0;
2630 
2631 	ret = alloc_dma_desc_resources(priv);
2632 	if (ret < 0) {
2633 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2634 			   __func__);
2635 		goto dma_desc_error;
2636 	}
2637 
2638 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2639 	if (ret < 0) {
2640 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2641 			   __func__);
2642 		goto init_error;
2643 	}
2644 
2645 	ret = stmmac_hw_setup(dev, true);
2646 	if (ret < 0) {
2647 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2648 		goto init_error;
2649 	}
2650 
2651 	stmmac_init_tx_coalesce(priv);
2652 
2653 	if (dev->phydev)
2654 		phy_start(dev->phydev);
2655 
2656 	/* Request the IRQ lines */
2657 	ret = request_irq(dev->irq, stmmac_interrupt,
2658 			  IRQF_SHARED, dev->name, dev);
2659 	if (unlikely(ret < 0)) {
2660 		netdev_err(priv->dev,
2661 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2662 			   __func__, dev->irq, ret);
2663 		goto irq_error;
2664 	}
2665 
2666 	/* Request the Wake IRQ in case of another line is used for WoL */
2667 	if (priv->wol_irq != dev->irq) {
2668 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2669 				  IRQF_SHARED, dev->name, dev);
2670 		if (unlikely(ret < 0)) {
2671 			netdev_err(priv->dev,
2672 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2673 				   __func__, priv->wol_irq, ret);
2674 			goto wolirq_error;
2675 		}
2676 	}
2677 
2678 	/* Request the IRQ lines */
2679 	if (priv->lpi_irq > 0) {
2680 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2681 				  dev->name, dev);
2682 		if (unlikely(ret < 0)) {
2683 			netdev_err(priv->dev,
2684 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2685 				   __func__, priv->lpi_irq, ret);
2686 			goto lpiirq_error;
2687 		}
2688 	}
2689 
2690 	stmmac_enable_all_queues(priv);
2691 	stmmac_start_all_queues(priv);
2692 
2693 	return 0;
2694 
2695 lpiirq_error:
2696 	if (priv->wol_irq != dev->irq)
2697 		free_irq(priv->wol_irq, dev);
2698 wolirq_error:
2699 	free_irq(dev->irq, dev);
2700 irq_error:
2701 	if (dev->phydev)
2702 		phy_stop(dev->phydev);
2703 
2704 	del_timer_sync(&priv->txtimer);
2705 	stmmac_hw_teardown(dev);
2706 init_error:
2707 	free_dma_desc_resources(priv);
2708 dma_desc_error:
2709 	if (dev->phydev)
2710 		phy_disconnect(dev->phydev);
2711 
2712 	return ret;
2713 }
2714 
2715 /**
2716  *  stmmac_release - close entry point of the driver
2717  *  @dev : device pointer.
2718  *  Description:
2719  *  This is the stop entry point of the driver.
2720  */
2721 static int stmmac_release(struct net_device *dev)
2722 {
2723 	struct stmmac_priv *priv = netdev_priv(dev);
2724 
2725 	if (priv->eee_enabled)
2726 		del_timer_sync(&priv->eee_ctrl_timer);
2727 
2728 	/* Stop and disconnect the PHY */
2729 	if (dev->phydev) {
2730 		phy_stop(dev->phydev);
2731 		phy_disconnect(dev->phydev);
2732 	}
2733 
2734 	stmmac_stop_all_queues(priv);
2735 
2736 	stmmac_disable_all_queues(priv);
2737 
2738 	del_timer_sync(&priv->txtimer);
2739 
2740 	/* Free the IRQ lines */
2741 	free_irq(dev->irq, dev);
2742 	if (priv->wol_irq != dev->irq)
2743 		free_irq(priv->wol_irq, dev);
2744 	if (priv->lpi_irq > 0)
2745 		free_irq(priv->lpi_irq, dev);
2746 
2747 	/* Stop TX/RX DMA and clear the descriptors */
2748 	stmmac_stop_all_dma(priv);
2749 
2750 	/* Release and free the Rx/Tx resources */
2751 	free_dma_desc_resources(priv);
2752 
2753 	/* Disable the MAC Rx/Tx */
2754 	priv->hw->mac->set_mac(priv->ioaddr, false);
2755 
2756 	netif_carrier_off(dev);
2757 
2758 #ifdef CONFIG_DEBUG_FS
2759 	stmmac_exit_fs(dev);
2760 #endif
2761 
2762 	stmmac_release_ptp(priv);
2763 
2764 	return 0;
2765 }
2766 
2767 /**
2768  *  stmmac_tso_allocator - close entry point of the driver
2769  *  @priv: driver private structure
2770  *  @des: buffer start address
2771  *  @total_len: total length to fill in descriptors
2772  *  @last_segmant: condition for the last descriptor
2773  *  @queue: TX queue index
2774  *  Description:
2775  *  This function fills descriptor and request new descriptors according to
2776  *  buffer length to fill
2777  */
2778 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2779 				 int total_len, bool last_segment, u32 queue)
2780 {
2781 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2782 	struct dma_desc *desc;
2783 	u32 buff_size;
2784 	int tmp_len;
2785 
2786 	tmp_len = total_len;
2787 
2788 	while (tmp_len > 0) {
2789 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2790 		desc = tx_q->dma_tx + tx_q->cur_tx;
2791 
2792 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2793 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2794 			    TSO_MAX_BUFF_SIZE : tmp_len;
2795 
2796 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2797 			0, 1,
2798 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2799 			0, 0);
2800 
2801 		tmp_len -= TSO_MAX_BUFF_SIZE;
2802 	}
2803 }
2804 
2805 /**
2806  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2807  *  @skb : the socket buffer
2808  *  @dev : device pointer
2809  *  Description: this is the transmit function that is called on TSO frames
2810  *  (support available on GMAC4 and newer chips).
2811  *  Diagram below show the ring programming in case of TSO frames:
2812  *
2813  *  First Descriptor
2814  *   --------
2815  *   | DES0 |---> buffer1 = L2/L3/L4 header
2816  *   | DES1 |---> TCP Payload (can continue on next descr...)
2817  *   | DES2 |---> buffer 1 and 2 len
2818  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2819  *   --------
2820  *	|
2821  *     ...
2822  *	|
2823  *   --------
2824  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2825  *   | DES1 | --|
2826  *   | DES2 | --> buffer 1 and 2 len
2827  *   | DES3 |
2828  *   --------
2829  *
2830  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2831  */
2832 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2833 {
2834 	struct dma_desc *desc, *first, *mss_desc = NULL;
2835 	struct stmmac_priv *priv = netdev_priv(dev);
2836 	int nfrags = skb_shinfo(skb)->nr_frags;
2837 	u32 queue = skb_get_queue_mapping(skb);
2838 	unsigned int first_entry, des;
2839 	struct stmmac_tx_queue *tx_q;
2840 	int tmp_pay_len = 0;
2841 	u32 pay_len, mss;
2842 	u8 proto_hdr_len;
2843 	int i;
2844 
2845 	tx_q = &priv->tx_queue[queue];
2846 
2847 	/* Compute header lengths */
2848 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2849 
2850 	/* Desc availability based on threshold should be enough safe */
2851 	if (unlikely(stmmac_tx_avail(priv, queue) <
2852 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2853 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2854 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2855 								queue));
2856 			/* This is a hard error, log it. */
2857 			netdev_err(priv->dev,
2858 				   "%s: Tx Ring full when queue awake\n",
2859 				   __func__);
2860 		}
2861 		return NETDEV_TX_BUSY;
2862 	}
2863 
2864 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2865 
2866 	mss = skb_shinfo(skb)->gso_size;
2867 
2868 	/* set new MSS value if needed */
2869 	if (mss != priv->mss) {
2870 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2871 		priv->hw->desc->set_mss(mss_desc, mss);
2872 		priv->mss = mss;
2873 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2874 	}
2875 
2876 	if (netif_msg_tx_queued(priv)) {
2877 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2878 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2879 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2880 			skb->data_len);
2881 	}
2882 
2883 	first_entry = tx_q->cur_tx;
2884 
2885 	desc = tx_q->dma_tx + first_entry;
2886 	first = desc;
2887 
2888 	/* first descriptor: fill Headers on Buf1 */
2889 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2890 			     DMA_TO_DEVICE);
2891 	if (dma_mapping_error(priv->device, des))
2892 		goto dma_map_err;
2893 
2894 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2895 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2896 
2897 	first->des0 = cpu_to_le32(des);
2898 
2899 	/* Fill start of payload in buff2 of first descriptor */
2900 	if (pay_len)
2901 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2902 
2903 	/* If needed take extra descriptors to fill the remaining payload */
2904 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2905 
2906 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2907 
2908 	/* Prepare fragments */
2909 	for (i = 0; i < nfrags; i++) {
2910 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2911 
2912 		des = skb_frag_dma_map(priv->device, frag, 0,
2913 				       skb_frag_size(frag),
2914 				       DMA_TO_DEVICE);
2915 		if (dma_mapping_error(priv->device, des))
2916 			goto dma_map_err;
2917 
2918 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2919 				     (i == nfrags - 1), queue);
2920 
2921 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2922 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2923 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2924 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2925 	}
2926 
2927 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2928 
2929 	/* Only the last descriptor gets to point to the skb. */
2930 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2931 
2932 	/* We've used all descriptors we need for this skb, however,
2933 	 * advance cur_tx so that it references a fresh descriptor.
2934 	 * ndo_start_xmit will fill this descriptor the next time it's
2935 	 * called and stmmac_tx_clean may clean up to this descriptor.
2936 	 */
2937 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2938 
2939 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2940 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2941 			  __func__);
2942 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2943 	}
2944 
2945 	dev->stats.tx_bytes += skb->len;
2946 	priv->xstats.tx_tso_frames++;
2947 	priv->xstats.tx_tso_nfrags += nfrags;
2948 
2949 	/* Manage tx mitigation */
2950 	priv->tx_count_frames += nfrags + 1;
2951 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2952 		mod_timer(&priv->txtimer,
2953 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2954 	} else {
2955 		priv->tx_count_frames = 0;
2956 		priv->hw->desc->set_tx_ic(desc);
2957 		priv->xstats.tx_set_ic_bit++;
2958 	}
2959 
2960 	skb_tx_timestamp(skb);
2961 
2962 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2963 		     priv->hwts_tx_en)) {
2964 		/* declare that device is doing timestamping */
2965 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2966 		priv->hw->desc->enable_tx_timestamp(first);
2967 	}
2968 
2969 	/* Complete the first descriptor before granting the DMA */
2970 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2971 			proto_hdr_len,
2972 			pay_len,
2973 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2974 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2975 
2976 	/* If context desc is used to change MSS */
2977 	if (mss_desc)
2978 		priv->hw->desc->set_tx_owner(mss_desc);
2979 
2980 	/* The own bit must be the latest setting done when prepare the
2981 	 * descriptor and then barrier is needed to make sure that
2982 	 * all is coherent before granting the DMA engine.
2983 	 */
2984 	dma_wmb();
2985 
2986 	if (netif_msg_pktdata(priv)) {
2987 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2988 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2989 			tx_q->cur_tx, first, nfrags);
2990 
2991 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2992 					     0);
2993 
2994 		pr_info(">>> frame to be transmitted: ");
2995 		print_pkt(skb->data, skb_headlen(skb));
2996 	}
2997 
2998 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2999 
3000 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3001 				       queue);
3002 
3003 	return NETDEV_TX_OK;
3004 
3005 dma_map_err:
3006 	dev_err(priv->device, "Tx dma map failed\n");
3007 	dev_kfree_skb(skb);
3008 	priv->dev->stats.tx_dropped++;
3009 	return NETDEV_TX_OK;
3010 }
3011 
3012 /**
3013  *  stmmac_xmit - Tx entry point of the driver
3014  *  @skb : the socket buffer
3015  *  @dev : device pointer
3016  *  Description : this is the tx entry point of the driver.
3017  *  It programs the chain or the ring and supports oversized frames
3018  *  and SG feature.
3019  */
3020 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3021 {
3022 	struct stmmac_priv *priv = netdev_priv(dev);
3023 	unsigned int nopaged_len = skb_headlen(skb);
3024 	int i, csum_insertion = 0, is_jumbo = 0;
3025 	u32 queue = skb_get_queue_mapping(skb);
3026 	int nfrags = skb_shinfo(skb)->nr_frags;
3027 	int entry;
3028 	unsigned int first_entry;
3029 	struct dma_desc *desc, *first;
3030 	struct stmmac_tx_queue *tx_q;
3031 	unsigned int enh_desc;
3032 	unsigned int des;
3033 
3034 	tx_q = &priv->tx_queue[queue];
3035 
3036 	/* Manage oversized TCP frames for GMAC4 device */
3037 	if (skb_is_gso(skb) && priv->tso) {
3038 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3039 			return stmmac_tso_xmit(skb, dev);
3040 	}
3041 
3042 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3043 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3044 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3045 								queue));
3046 			/* This is a hard error, log it. */
3047 			netdev_err(priv->dev,
3048 				   "%s: Tx Ring full when queue awake\n",
3049 				   __func__);
3050 		}
3051 		return NETDEV_TX_BUSY;
3052 	}
3053 
3054 	if (priv->tx_path_in_lpi_mode)
3055 		stmmac_disable_eee_mode(priv);
3056 
3057 	entry = tx_q->cur_tx;
3058 	first_entry = entry;
3059 
3060 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3061 
3062 	if (likely(priv->extend_desc))
3063 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3064 	else
3065 		desc = tx_q->dma_tx + entry;
3066 
3067 	first = desc;
3068 
3069 	enh_desc = priv->plat->enh_desc;
3070 	/* To program the descriptors according to the size of the frame */
3071 	if (enh_desc)
3072 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3073 
3074 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3075 					 DWMAC_CORE_4_00)) {
3076 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3077 		if (unlikely(entry < 0))
3078 			goto dma_map_err;
3079 	}
3080 
3081 	for (i = 0; i < nfrags; i++) {
3082 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3083 		int len = skb_frag_size(frag);
3084 		bool last_segment = (i == (nfrags - 1));
3085 
3086 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3087 
3088 		if (likely(priv->extend_desc))
3089 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3090 		else
3091 			desc = tx_q->dma_tx + entry;
3092 
3093 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3094 				       DMA_TO_DEVICE);
3095 		if (dma_mapping_error(priv->device, des))
3096 			goto dma_map_err; /* should reuse desc w/o issues */
3097 
3098 		tx_q->tx_skbuff[entry] = NULL;
3099 
3100 		tx_q->tx_skbuff_dma[entry].buf = des;
3101 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3102 			desc->des0 = cpu_to_le32(des);
3103 		else
3104 			desc->des2 = cpu_to_le32(des);
3105 
3106 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3107 		tx_q->tx_skbuff_dma[entry].len = len;
3108 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3109 
3110 		/* Prepare the descriptor and set the own bit too */
3111 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3112 						priv->mode, 1, last_segment,
3113 						skb->len);
3114 	}
3115 
3116 	/* Only the last descriptor gets to point to the skb. */
3117 	tx_q->tx_skbuff[entry] = skb;
3118 
3119 	/* We've used all descriptors we need for this skb, however,
3120 	 * advance cur_tx so that it references a fresh descriptor.
3121 	 * ndo_start_xmit will fill this descriptor the next time it's
3122 	 * called and stmmac_tx_clean may clean up to this descriptor.
3123 	 */
3124 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3125 	tx_q->cur_tx = entry;
3126 
3127 	if (netif_msg_pktdata(priv)) {
3128 		void *tx_head;
3129 
3130 		netdev_dbg(priv->dev,
3131 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3132 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3133 			   entry, first, nfrags);
3134 
3135 		if (priv->extend_desc)
3136 			tx_head = (void *)tx_q->dma_etx;
3137 		else
3138 			tx_head = (void *)tx_q->dma_tx;
3139 
3140 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3141 
3142 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3143 		print_pkt(skb->data, skb->len);
3144 	}
3145 
3146 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3147 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3148 			  __func__);
3149 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3150 	}
3151 
3152 	dev->stats.tx_bytes += skb->len;
3153 
3154 	/* According to the coalesce parameter the IC bit for the latest
3155 	 * segment is reset and the timer re-started to clean the tx status.
3156 	 * This approach takes care about the fragments: desc is the first
3157 	 * element in case of no SG.
3158 	 */
3159 	priv->tx_count_frames += nfrags + 1;
3160 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3161 		mod_timer(&priv->txtimer,
3162 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3163 	} else {
3164 		priv->tx_count_frames = 0;
3165 		priv->hw->desc->set_tx_ic(desc);
3166 		priv->xstats.tx_set_ic_bit++;
3167 	}
3168 
3169 	skb_tx_timestamp(skb);
3170 
3171 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3172 	 * problems because all the descriptors are actually ready to be
3173 	 * passed to the DMA engine.
3174 	 */
3175 	if (likely(!is_jumbo)) {
3176 		bool last_segment = (nfrags == 0);
3177 
3178 		des = dma_map_single(priv->device, skb->data,
3179 				     nopaged_len, DMA_TO_DEVICE);
3180 		if (dma_mapping_error(priv->device, des))
3181 			goto dma_map_err;
3182 
3183 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3184 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3185 			first->des0 = cpu_to_le32(des);
3186 		else
3187 			first->des2 = cpu_to_le32(des);
3188 
3189 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3190 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3191 
3192 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3193 			     priv->hwts_tx_en)) {
3194 			/* declare that device is doing timestamping */
3195 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3196 			priv->hw->desc->enable_tx_timestamp(first);
3197 		}
3198 
3199 		/* Prepare the first descriptor setting the OWN bit too */
3200 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3201 						csum_insertion, priv->mode, 1,
3202 						last_segment, skb->len);
3203 
3204 		/* The own bit must be the latest setting done when prepare the
3205 		 * descriptor and then barrier is needed to make sure that
3206 		 * all is coherent before granting the DMA engine.
3207 		 */
3208 		dma_wmb();
3209 	}
3210 
3211 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3212 
3213 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3214 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3215 	else
3216 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3217 					       queue);
3218 
3219 	return NETDEV_TX_OK;
3220 
3221 dma_map_err:
3222 	netdev_err(priv->dev, "Tx DMA map failed\n");
3223 	dev_kfree_skb(skb);
3224 	priv->dev->stats.tx_dropped++;
3225 	return NETDEV_TX_OK;
3226 }
3227 
3228 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3229 {
3230 	struct ethhdr *ehdr;
3231 	u16 vlanid;
3232 
3233 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3234 	    NETIF_F_HW_VLAN_CTAG_RX &&
3235 	    !__vlan_get_tag(skb, &vlanid)) {
3236 		/* pop the vlan tag */
3237 		ehdr = (struct ethhdr *)skb->data;
3238 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3239 		skb_pull(skb, VLAN_HLEN);
3240 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3241 	}
3242 }
3243 
3244 
3245 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3246 {
3247 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3248 		return 0;
3249 
3250 	return 1;
3251 }
3252 
3253 /**
3254  * stmmac_rx_refill - refill used skb preallocated buffers
3255  * @priv: driver private structure
3256  * @queue: RX queue index
3257  * Description : this is to reallocate the skb for the reception process
3258  * that is based on zero-copy.
3259  */
3260 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3261 {
3262 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3263 	int dirty = stmmac_rx_dirty(priv, queue);
3264 	unsigned int entry = rx_q->dirty_rx;
3265 
3266 	int bfsize = priv->dma_buf_sz;
3267 
3268 	while (dirty-- > 0) {
3269 		struct dma_desc *p;
3270 
3271 		if (priv->extend_desc)
3272 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3273 		else
3274 			p = rx_q->dma_rx + entry;
3275 
3276 		if (likely(!rx_q->rx_skbuff[entry])) {
3277 			struct sk_buff *skb;
3278 
3279 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3280 			if (unlikely(!skb)) {
3281 				/* so for a while no zero-copy! */
3282 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3283 				if (unlikely(net_ratelimit()))
3284 					dev_err(priv->device,
3285 						"fail to alloc skb entry %d\n",
3286 						entry);
3287 				break;
3288 			}
3289 
3290 			rx_q->rx_skbuff[entry] = skb;
3291 			rx_q->rx_skbuff_dma[entry] =
3292 			    dma_map_single(priv->device, skb->data, bfsize,
3293 					   DMA_FROM_DEVICE);
3294 			if (dma_mapping_error(priv->device,
3295 					      rx_q->rx_skbuff_dma[entry])) {
3296 				netdev_err(priv->dev, "Rx DMA map failed\n");
3297 				dev_kfree_skb(skb);
3298 				break;
3299 			}
3300 
3301 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3302 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3303 				p->des1 = 0;
3304 			} else {
3305 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3306 			}
3307 			if (priv->hw->mode->refill_desc3)
3308 				priv->hw->mode->refill_desc3(rx_q, p);
3309 
3310 			if (rx_q->rx_zeroc_thresh > 0)
3311 				rx_q->rx_zeroc_thresh--;
3312 
3313 			netif_dbg(priv, rx_status, priv->dev,
3314 				  "refill entry #%d\n", entry);
3315 		}
3316 		dma_wmb();
3317 
3318 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3319 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3320 		else
3321 			priv->hw->desc->set_rx_owner(p);
3322 
3323 		dma_wmb();
3324 
3325 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3326 	}
3327 	rx_q->dirty_rx = entry;
3328 }
3329 
3330 /**
3331  * stmmac_rx - manage the receive process
3332  * @priv: driver private structure
3333  * @limit: napi bugget
3334  * @queue: RX queue index.
3335  * Description :  this the function called by the napi poll method.
3336  * It gets all the frames inside the ring.
3337  */
3338 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3339 {
3340 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3341 	unsigned int entry = rx_q->cur_rx;
3342 	int coe = priv->hw->rx_csum;
3343 	unsigned int next_entry;
3344 	unsigned int count = 0;
3345 
3346 	if (netif_msg_rx_status(priv)) {
3347 		void *rx_head;
3348 
3349 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3350 		if (priv->extend_desc)
3351 			rx_head = (void *)rx_q->dma_erx;
3352 		else
3353 			rx_head = (void *)rx_q->dma_rx;
3354 
3355 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3356 	}
3357 	while (count < limit) {
3358 		int status;
3359 		struct dma_desc *p;
3360 		struct dma_desc *np;
3361 
3362 		if (priv->extend_desc)
3363 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3364 		else
3365 			p = rx_q->dma_rx + entry;
3366 
3367 		/* read the status of the incoming frame */
3368 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3369 						   &priv->xstats, p);
3370 		/* check if managed by the DMA otherwise go ahead */
3371 		if (unlikely(status & dma_own))
3372 			break;
3373 
3374 		count++;
3375 
3376 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3377 		next_entry = rx_q->cur_rx;
3378 
3379 		if (priv->extend_desc)
3380 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3381 		else
3382 			np = rx_q->dma_rx + next_entry;
3383 
3384 		prefetch(np);
3385 
3386 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3387 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3388 							   &priv->xstats,
3389 							   rx_q->dma_erx +
3390 							   entry);
3391 		if (unlikely(status == discard_frame)) {
3392 			priv->dev->stats.rx_errors++;
3393 			if (priv->hwts_rx_en && !priv->extend_desc) {
3394 				/* DESC2 & DESC3 will be overwritten by device
3395 				 * with timestamp value, hence reinitialize
3396 				 * them in stmmac_rx_refill() function so that
3397 				 * device can reuse it.
3398 				 */
3399 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3400 				rx_q->rx_skbuff[entry] = NULL;
3401 				dma_unmap_single(priv->device,
3402 						 rx_q->rx_skbuff_dma[entry],
3403 						 priv->dma_buf_sz,
3404 						 DMA_FROM_DEVICE);
3405 			}
3406 		} else {
3407 			struct sk_buff *skb;
3408 			int frame_len;
3409 			unsigned int des;
3410 
3411 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3412 				des = le32_to_cpu(p->des0);
3413 			else
3414 				des = le32_to_cpu(p->des2);
3415 
3416 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3417 
3418 			/*  If frame length is greater than skb buffer size
3419 			 *  (preallocated during init) then the packet is
3420 			 *  ignored
3421 			 */
3422 			if (frame_len > priv->dma_buf_sz) {
3423 				netdev_err(priv->dev,
3424 					   "len %d larger than size (%d)\n",
3425 					   frame_len, priv->dma_buf_sz);
3426 				priv->dev->stats.rx_length_errors++;
3427 				break;
3428 			}
3429 
3430 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3431 			 * Type frames (LLC/LLC-SNAP)
3432 			 */
3433 			if (unlikely(status != llc_snap))
3434 				frame_len -= ETH_FCS_LEN;
3435 
3436 			if (netif_msg_rx_status(priv)) {
3437 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3438 					   p, entry, des);
3439 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3440 					   frame_len, status);
3441 			}
3442 
3443 			/* The zero-copy is always used for all the sizes
3444 			 * in case of GMAC4 because it needs
3445 			 * to refill the used descriptors, always.
3446 			 */
3447 			if (unlikely(!priv->plat->has_gmac4 &&
3448 				     ((frame_len < priv->rx_copybreak) ||
3449 				     stmmac_rx_threshold_count(rx_q)))) {
3450 				skb = netdev_alloc_skb_ip_align(priv->dev,
3451 								frame_len);
3452 				if (unlikely(!skb)) {
3453 					if (net_ratelimit())
3454 						dev_warn(priv->device,
3455 							 "packet dropped\n");
3456 					priv->dev->stats.rx_dropped++;
3457 					break;
3458 				}
3459 
3460 				dma_sync_single_for_cpu(priv->device,
3461 							rx_q->rx_skbuff_dma
3462 							[entry], frame_len,
3463 							DMA_FROM_DEVICE);
3464 				skb_copy_to_linear_data(skb,
3465 							rx_q->
3466 							rx_skbuff[entry]->data,
3467 							frame_len);
3468 
3469 				skb_put(skb, frame_len);
3470 				dma_sync_single_for_device(priv->device,
3471 							   rx_q->rx_skbuff_dma
3472 							   [entry], frame_len,
3473 							   DMA_FROM_DEVICE);
3474 			} else {
3475 				skb = rx_q->rx_skbuff[entry];
3476 				if (unlikely(!skb)) {
3477 					netdev_err(priv->dev,
3478 						   "%s: Inconsistent Rx chain\n",
3479 						   priv->dev->name);
3480 					priv->dev->stats.rx_dropped++;
3481 					break;
3482 				}
3483 				prefetch(skb->data - NET_IP_ALIGN);
3484 				rx_q->rx_skbuff[entry] = NULL;
3485 				rx_q->rx_zeroc_thresh++;
3486 
3487 				skb_put(skb, frame_len);
3488 				dma_unmap_single(priv->device,
3489 						 rx_q->rx_skbuff_dma[entry],
3490 						 priv->dma_buf_sz,
3491 						 DMA_FROM_DEVICE);
3492 			}
3493 
3494 			if (netif_msg_pktdata(priv)) {
3495 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3496 					   frame_len);
3497 				print_pkt(skb->data, frame_len);
3498 			}
3499 
3500 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3501 
3502 			stmmac_rx_vlan(priv->dev, skb);
3503 
3504 			skb->protocol = eth_type_trans(skb, priv->dev);
3505 
3506 			if (unlikely(!coe))
3507 				skb_checksum_none_assert(skb);
3508 			else
3509 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3510 
3511 			napi_gro_receive(&rx_q->napi, skb);
3512 
3513 			priv->dev->stats.rx_packets++;
3514 			priv->dev->stats.rx_bytes += frame_len;
3515 		}
3516 		entry = next_entry;
3517 	}
3518 
3519 	stmmac_rx_refill(priv, queue);
3520 
3521 	priv->xstats.rx_pkt_n += count;
3522 
3523 	return count;
3524 }
3525 
3526 /**
3527  *  stmmac_poll - stmmac poll method (NAPI)
3528  *  @napi : pointer to the napi structure.
3529  *  @budget : maximum number of packets that the current CPU can receive from
3530  *	      all interfaces.
3531  *  Description :
3532  *  To look at the incoming frames and clear the tx resources.
3533  */
3534 static int stmmac_poll(struct napi_struct *napi, int budget)
3535 {
3536 	struct stmmac_rx_queue *rx_q =
3537 		container_of(napi, struct stmmac_rx_queue, napi);
3538 	struct stmmac_priv *priv = rx_q->priv_data;
3539 	u32 tx_count = priv->plat->tx_queues_to_use;
3540 	u32 chan = rx_q->queue_index;
3541 	int work_done = 0;
3542 	u32 queue;
3543 
3544 	priv->xstats.napi_poll++;
3545 
3546 	/* check all the queues */
3547 	for (queue = 0; queue < tx_count; queue++)
3548 		stmmac_tx_clean(priv, queue);
3549 
3550 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3551 	if (work_done < budget) {
3552 		napi_complete_done(napi, work_done);
3553 		stmmac_enable_dma_irq(priv, chan);
3554 	}
3555 	return work_done;
3556 }
3557 
3558 /**
3559  *  stmmac_tx_timeout
3560  *  @dev : Pointer to net device structure
3561  *  Description: this function is called when a packet transmission fails to
3562  *   complete within a reasonable time. The driver will mark the error in the
3563  *   netdev structure and arrange for the device to be reset to a sane state
3564  *   in order to transmit a new packet.
3565  */
3566 static void stmmac_tx_timeout(struct net_device *dev)
3567 {
3568 	struct stmmac_priv *priv = netdev_priv(dev);
3569 	u32 tx_count = priv->plat->tx_queues_to_use;
3570 	u32 chan;
3571 
3572 	/* Clear Tx resources and restart transmitting again */
3573 	for (chan = 0; chan < tx_count; chan++)
3574 		stmmac_tx_err(priv, chan);
3575 }
3576 
3577 /**
3578  *  stmmac_set_rx_mode - entry point for multicast addressing
3579  *  @dev : pointer to the device structure
3580  *  Description:
3581  *  This function is a driver entry point which gets called by the kernel
3582  *  whenever multicast addresses must be enabled/disabled.
3583  *  Return value:
3584  *  void.
3585  */
3586 static void stmmac_set_rx_mode(struct net_device *dev)
3587 {
3588 	struct stmmac_priv *priv = netdev_priv(dev);
3589 
3590 	priv->hw->mac->set_filter(priv->hw, dev);
3591 }
3592 
3593 /**
3594  *  stmmac_change_mtu - entry point to change MTU size for the device.
3595  *  @dev : device pointer.
3596  *  @new_mtu : the new MTU size for the device.
3597  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3598  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3599  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3600  *  Return value:
3601  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3602  *  file on failure.
3603  */
3604 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 
3608 	if (netif_running(dev)) {
3609 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3610 		return -EBUSY;
3611 	}
3612 
3613 	dev->mtu = new_mtu;
3614 
3615 	netdev_update_features(dev);
3616 
3617 	return 0;
3618 }
3619 
3620 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3621 					     netdev_features_t features)
3622 {
3623 	struct stmmac_priv *priv = netdev_priv(dev);
3624 
3625 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3626 		features &= ~NETIF_F_RXCSUM;
3627 
3628 	if (!priv->plat->tx_coe)
3629 		features &= ~NETIF_F_CSUM_MASK;
3630 
3631 	/* Some GMAC devices have a bugged Jumbo frame support that
3632 	 * needs to have the Tx COE disabled for oversized frames
3633 	 * (due to limited buffer sizes). In this case we disable
3634 	 * the TX csum insertion in the TDES and not use SF.
3635 	 */
3636 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3637 		features &= ~NETIF_F_CSUM_MASK;
3638 
3639 	/* Disable tso if asked by ethtool */
3640 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3641 		if (features & NETIF_F_TSO)
3642 			priv->tso = true;
3643 		else
3644 			priv->tso = false;
3645 	}
3646 
3647 	return features;
3648 }
3649 
3650 static int stmmac_set_features(struct net_device *netdev,
3651 			       netdev_features_t features)
3652 {
3653 	struct stmmac_priv *priv = netdev_priv(netdev);
3654 
3655 	/* Keep the COE Type in case of csum is supporting */
3656 	if (features & NETIF_F_RXCSUM)
3657 		priv->hw->rx_csum = priv->plat->rx_coe;
3658 	else
3659 		priv->hw->rx_csum = 0;
3660 	/* No check needed because rx_coe has been set before and it will be
3661 	 * fixed in case of issue.
3662 	 */
3663 	priv->hw->mac->rx_ipc(priv->hw);
3664 
3665 	return 0;
3666 }
3667 
3668 /**
3669  *  stmmac_interrupt - main ISR
3670  *  @irq: interrupt number.
3671  *  @dev_id: to pass the net device pointer.
3672  *  Description: this is the main driver interrupt service routine.
3673  *  It can call:
3674  *  o DMA service routine (to manage incoming frame reception and transmission
3675  *    status)
3676  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3677  *    interrupts.
3678  */
3679 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3680 {
3681 	struct net_device *dev = (struct net_device *)dev_id;
3682 	struct stmmac_priv *priv = netdev_priv(dev);
3683 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3684 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3685 	u32 queues_count;
3686 	u32 queue;
3687 
3688 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3689 
3690 	if (priv->irq_wake)
3691 		pm_wakeup_event(priv->device, 0);
3692 
3693 	if (unlikely(!dev)) {
3694 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3695 		return IRQ_NONE;
3696 	}
3697 
3698 	/* To handle GMAC own interrupts */
3699 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3700 		int status = priv->hw->mac->host_irq_status(priv->hw,
3701 							    &priv->xstats);
3702 
3703 		if (unlikely(status)) {
3704 			/* For LPI we need to save the tx status */
3705 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3706 				priv->tx_path_in_lpi_mode = true;
3707 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3708 				priv->tx_path_in_lpi_mode = false;
3709 		}
3710 
3711 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3712 			for (queue = 0; queue < queues_count; queue++) {
3713 				struct stmmac_rx_queue *rx_q =
3714 				&priv->rx_queue[queue];
3715 
3716 				status |=
3717 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3718 								   queue);
3719 
3720 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3721 				    priv->hw->dma->set_rx_tail_ptr)
3722 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3723 								rx_q->rx_tail_addr,
3724 								queue);
3725 			}
3726 		}
3727 
3728 		/* PCS link status */
3729 		if (priv->hw->pcs) {
3730 			if (priv->xstats.pcs_link)
3731 				netif_carrier_on(dev);
3732 			else
3733 				netif_carrier_off(dev);
3734 		}
3735 	}
3736 
3737 	/* To handle DMA interrupts */
3738 	stmmac_dma_interrupt(priv);
3739 
3740 	return IRQ_HANDLED;
3741 }
3742 
3743 #ifdef CONFIG_NET_POLL_CONTROLLER
3744 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3745  * to allow network I/O with interrupts disabled.
3746  */
3747 static void stmmac_poll_controller(struct net_device *dev)
3748 {
3749 	disable_irq(dev->irq);
3750 	stmmac_interrupt(dev->irq, dev);
3751 	enable_irq(dev->irq);
3752 }
3753 #endif
3754 
3755 /**
3756  *  stmmac_ioctl - Entry point for the Ioctl
3757  *  @dev: Device pointer.
3758  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3759  *  a proprietary structure used to pass information to the driver.
3760  *  @cmd: IOCTL command
3761  *  Description:
3762  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3763  */
3764 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3765 {
3766 	int ret = -EOPNOTSUPP;
3767 
3768 	if (!netif_running(dev))
3769 		return -EINVAL;
3770 
3771 	switch (cmd) {
3772 	case SIOCGMIIPHY:
3773 	case SIOCGMIIREG:
3774 	case SIOCSMIIREG:
3775 		if (!dev->phydev)
3776 			return -EINVAL;
3777 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3778 		break;
3779 	case SIOCSHWTSTAMP:
3780 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3781 		break;
3782 	default:
3783 		break;
3784 	}
3785 
3786 	return ret;
3787 }
3788 
3789 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3790 {
3791 	struct stmmac_priv *priv = netdev_priv(ndev);
3792 	int ret = 0;
3793 
3794 	ret = eth_mac_addr(ndev, addr);
3795 	if (ret)
3796 		return ret;
3797 
3798 	priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3799 
3800 	return ret;
3801 }
3802 
3803 #ifdef CONFIG_DEBUG_FS
3804 static struct dentry *stmmac_fs_dir;
3805 
3806 static void sysfs_display_ring(void *head, int size, int extend_desc,
3807 			       struct seq_file *seq)
3808 {
3809 	int i;
3810 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3811 	struct dma_desc *p = (struct dma_desc *)head;
3812 
3813 	for (i = 0; i < size; i++) {
3814 		if (extend_desc) {
3815 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3816 				   i, (unsigned int)virt_to_phys(ep),
3817 				   le32_to_cpu(ep->basic.des0),
3818 				   le32_to_cpu(ep->basic.des1),
3819 				   le32_to_cpu(ep->basic.des2),
3820 				   le32_to_cpu(ep->basic.des3));
3821 			ep++;
3822 		} else {
3823 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3824 				   i, (unsigned int)virt_to_phys(p),
3825 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3826 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3827 			p++;
3828 		}
3829 		seq_printf(seq, "\n");
3830 	}
3831 }
3832 
3833 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3834 {
3835 	struct net_device *dev = seq->private;
3836 	struct stmmac_priv *priv = netdev_priv(dev);
3837 	u32 rx_count = priv->plat->rx_queues_to_use;
3838 	u32 tx_count = priv->plat->tx_queues_to_use;
3839 	u32 queue;
3840 
3841 	for (queue = 0; queue < rx_count; queue++) {
3842 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3843 
3844 		seq_printf(seq, "RX Queue %d:\n", queue);
3845 
3846 		if (priv->extend_desc) {
3847 			seq_printf(seq, "Extended descriptor ring:\n");
3848 			sysfs_display_ring((void *)rx_q->dma_erx,
3849 					   DMA_RX_SIZE, 1, seq);
3850 		} else {
3851 			seq_printf(seq, "Descriptor ring:\n");
3852 			sysfs_display_ring((void *)rx_q->dma_rx,
3853 					   DMA_RX_SIZE, 0, seq);
3854 		}
3855 	}
3856 
3857 	for (queue = 0; queue < tx_count; queue++) {
3858 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3859 
3860 		seq_printf(seq, "TX Queue %d:\n", queue);
3861 
3862 		if (priv->extend_desc) {
3863 			seq_printf(seq, "Extended descriptor ring:\n");
3864 			sysfs_display_ring((void *)tx_q->dma_etx,
3865 					   DMA_TX_SIZE, 1, seq);
3866 		} else {
3867 			seq_printf(seq, "Descriptor ring:\n");
3868 			sysfs_display_ring((void *)tx_q->dma_tx,
3869 					   DMA_TX_SIZE, 0, seq);
3870 		}
3871 	}
3872 
3873 	return 0;
3874 }
3875 
3876 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3877 {
3878 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3879 }
3880 
3881 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3882 
3883 static const struct file_operations stmmac_rings_status_fops = {
3884 	.owner = THIS_MODULE,
3885 	.open = stmmac_sysfs_ring_open,
3886 	.read = seq_read,
3887 	.llseek = seq_lseek,
3888 	.release = single_release,
3889 };
3890 
3891 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3892 {
3893 	struct net_device *dev = seq->private;
3894 	struct stmmac_priv *priv = netdev_priv(dev);
3895 
3896 	if (!priv->hw_cap_support) {
3897 		seq_printf(seq, "DMA HW features not supported\n");
3898 		return 0;
3899 	}
3900 
3901 	seq_printf(seq, "==============================\n");
3902 	seq_printf(seq, "\tDMA HW features\n");
3903 	seq_printf(seq, "==============================\n");
3904 
3905 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3906 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3907 	seq_printf(seq, "\t1000 Mbps: %s\n",
3908 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3909 	seq_printf(seq, "\tHalf duplex: %s\n",
3910 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3911 	seq_printf(seq, "\tHash Filter: %s\n",
3912 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3913 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3914 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3915 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3916 		   (priv->dma_cap.pcs) ? "Y" : "N");
3917 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3918 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3919 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3920 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3921 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3922 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3923 	seq_printf(seq, "\tRMON module: %s\n",
3924 		   (priv->dma_cap.rmon) ? "Y" : "N");
3925 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3926 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3927 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3928 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3929 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3930 		   (priv->dma_cap.eee) ? "Y" : "N");
3931 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3932 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3933 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3934 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3935 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3936 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3937 	} else {
3938 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3939 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3940 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3941 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3942 	}
3943 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3944 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3945 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3946 		   priv->dma_cap.number_rx_channel);
3947 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3948 		   priv->dma_cap.number_tx_channel);
3949 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3950 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3951 
3952 	return 0;
3953 }
3954 
3955 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3956 {
3957 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3958 }
3959 
3960 static const struct file_operations stmmac_dma_cap_fops = {
3961 	.owner = THIS_MODULE,
3962 	.open = stmmac_sysfs_dma_cap_open,
3963 	.read = seq_read,
3964 	.llseek = seq_lseek,
3965 	.release = single_release,
3966 };
3967 
3968 static int stmmac_init_fs(struct net_device *dev)
3969 {
3970 	struct stmmac_priv *priv = netdev_priv(dev);
3971 
3972 	/* Create per netdev entries */
3973 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3974 
3975 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3976 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3977 
3978 		return -ENOMEM;
3979 	}
3980 
3981 	/* Entry to report DMA RX/TX rings */
3982 	priv->dbgfs_rings_status =
3983 		debugfs_create_file("descriptors_status", S_IRUGO,
3984 				    priv->dbgfs_dir, dev,
3985 				    &stmmac_rings_status_fops);
3986 
3987 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3988 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3989 		debugfs_remove_recursive(priv->dbgfs_dir);
3990 
3991 		return -ENOMEM;
3992 	}
3993 
3994 	/* Entry to report the DMA HW features */
3995 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3996 					    priv->dbgfs_dir,
3997 					    dev, &stmmac_dma_cap_fops);
3998 
3999 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4000 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4001 		debugfs_remove_recursive(priv->dbgfs_dir);
4002 
4003 		return -ENOMEM;
4004 	}
4005 
4006 	return 0;
4007 }
4008 
4009 static void stmmac_exit_fs(struct net_device *dev)
4010 {
4011 	struct stmmac_priv *priv = netdev_priv(dev);
4012 
4013 	debugfs_remove_recursive(priv->dbgfs_dir);
4014 }
4015 #endif /* CONFIG_DEBUG_FS */
4016 
4017 static const struct net_device_ops stmmac_netdev_ops = {
4018 	.ndo_open = stmmac_open,
4019 	.ndo_start_xmit = stmmac_xmit,
4020 	.ndo_stop = stmmac_release,
4021 	.ndo_change_mtu = stmmac_change_mtu,
4022 	.ndo_fix_features = stmmac_fix_features,
4023 	.ndo_set_features = stmmac_set_features,
4024 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4025 	.ndo_tx_timeout = stmmac_tx_timeout,
4026 	.ndo_do_ioctl = stmmac_ioctl,
4027 #ifdef CONFIG_NET_POLL_CONTROLLER
4028 	.ndo_poll_controller = stmmac_poll_controller,
4029 #endif
4030 	.ndo_set_mac_address = stmmac_set_mac_address,
4031 };
4032 
4033 /**
4034  *  stmmac_hw_init - Init the MAC device
4035  *  @priv: driver private structure
4036  *  Description: this function is to configure the MAC device according to
4037  *  some platform parameters or the HW capability register. It prepares the
4038  *  driver to use either ring or chain modes and to setup either enhanced or
4039  *  normal descriptors.
4040  */
4041 static int stmmac_hw_init(struct stmmac_priv *priv)
4042 {
4043 	struct mac_device_info *mac;
4044 
4045 	/* Identify the MAC HW device */
4046 	if (priv->plat->setup) {
4047 		mac = priv->plat->setup(priv);
4048 	} else if (priv->plat->has_gmac) {
4049 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4050 		mac = dwmac1000_setup(priv->ioaddr,
4051 				      priv->plat->multicast_filter_bins,
4052 				      priv->plat->unicast_filter_entries,
4053 				      &priv->synopsys_id);
4054 	} else if (priv->plat->has_gmac4) {
4055 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4056 		mac = dwmac4_setup(priv->ioaddr,
4057 				   priv->plat->multicast_filter_bins,
4058 				   priv->plat->unicast_filter_entries,
4059 				   &priv->synopsys_id);
4060 	} else {
4061 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4062 	}
4063 	if (!mac)
4064 		return -ENOMEM;
4065 
4066 	priv->hw = mac;
4067 
4068 	/* dwmac-sun8i only work in chain mode */
4069 	if (priv->plat->has_sun8i)
4070 		chain_mode = 1;
4071 
4072 	/* To use the chained or ring mode */
4073 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4074 		priv->hw->mode = &dwmac4_ring_mode_ops;
4075 	} else {
4076 		if (chain_mode) {
4077 			priv->hw->mode = &chain_mode_ops;
4078 			dev_info(priv->device, "Chain mode enabled\n");
4079 			priv->mode = STMMAC_CHAIN_MODE;
4080 		} else {
4081 			priv->hw->mode = &ring_mode_ops;
4082 			dev_info(priv->device, "Ring mode enabled\n");
4083 			priv->mode = STMMAC_RING_MODE;
4084 		}
4085 	}
4086 
4087 	/* Get the HW capability (new GMAC newer than 3.50a) */
4088 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4089 	if (priv->hw_cap_support) {
4090 		dev_info(priv->device, "DMA HW capability register supported\n");
4091 
4092 		/* We can override some gmac/dma configuration fields: e.g.
4093 		 * enh_desc, tx_coe (e.g. that are passed through the
4094 		 * platform) with the values from the HW capability
4095 		 * register (if supported).
4096 		 */
4097 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4098 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4099 		priv->hw->pmt = priv->plat->pmt;
4100 
4101 		/* TXCOE doesn't work in thresh DMA mode */
4102 		if (priv->plat->force_thresh_dma_mode)
4103 			priv->plat->tx_coe = 0;
4104 		else
4105 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4106 
4107 		/* In case of GMAC4 rx_coe is from HW cap register. */
4108 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4109 
4110 		if (priv->dma_cap.rx_coe_type2)
4111 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4112 		else if (priv->dma_cap.rx_coe_type1)
4113 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4114 
4115 	} else {
4116 		dev_info(priv->device, "No HW DMA feature register supported\n");
4117 	}
4118 
4119 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4120 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4121 		priv->hw->desc = &dwmac4_desc_ops;
4122 	else
4123 		stmmac_selec_desc_mode(priv);
4124 
4125 	if (priv->plat->rx_coe) {
4126 		priv->hw->rx_csum = priv->plat->rx_coe;
4127 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4128 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4129 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4130 	}
4131 	if (priv->plat->tx_coe)
4132 		dev_info(priv->device, "TX Checksum insertion supported\n");
4133 
4134 	if (priv->plat->pmt) {
4135 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4136 		device_set_wakeup_capable(priv->device, 1);
4137 	}
4138 
4139 	if (priv->dma_cap.tsoen)
4140 		dev_info(priv->device, "TSO supported\n");
4141 
4142 	return 0;
4143 }
4144 
4145 /**
4146  * stmmac_dvr_probe
4147  * @device: device pointer
4148  * @plat_dat: platform data pointer
4149  * @res: stmmac resource pointer
4150  * Description: this is the main probe function used to
4151  * call the alloc_etherdev, allocate the priv structure.
4152  * Return:
4153  * returns 0 on success, otherwise errno.
4154  */
4155 int stmmac_dvr_probe(struct device *device,
4156 		     struct plat_stmmacenet_data *plat_dat,
4157 		     struct stmmac_resources *res)
4158 {
4159 	struct net_device *ndev = NULL;
4160 	struct stmmac_priv *priv;
4161 	int ret = 0;
4162 	u32 queue;
4163 
4164 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4165 				  MTL_MAX_TX_QUEUES,
4166 				  MTL_MAX_RX_QUEUES);
4167 	if (!ndev)
4168 		return -ENOMEM;
4169 
4170 	SET_NETDEV_DEV(ndev, device);
4171 
4172 	priv = netdev_priv(ndev);
4173 	priv->device = device;
4174 	priv->dev = ndev;
4175 
4176 	stmmac_set_ethtool_ops(ndev);
4177 	priv->pause = pause;
4178 	priv->plat = plat_dat;
4179 	priv->ioaddr = res->addr;
4180 	priv->dev->base_addr = (unsigned long)res->addr;
4181 
4182 	priv->dev->irq = res->irq;
4183 	priv->wol_irq = res->wol_irq;
4184 	priv->lpi_irq = res->lpi_irq;
4185 
4186 	if (res->mac)
4187 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4188 
4189 	dev_set_drvdata(device, priv->dev);
4190 
4191 	/* Verify driver arguments */
4192 	stmmac_verify_args();
4193 
4194 	/* Override with kernel parameters if supplied XXX CRS XXX
4195 	 * this needs to have multiple instances
4196 	 */
4197 	if ((phyaddr >= 0) && (phyaddr <= 31))
4198 		priv->plat->phy_addr = phyaddr;
4199 
4200 	if (priv->plat->stmmac_rst) {
4201 		ret = reset_control_assert(priv->plat->stmmac_rst);
4202 		reset_control_deassert(priv->plat->stmmac_rst);
4203 		/* Some reset controllers have only reset callback instead of
4204 		 * assert + deassert callbacks pair.
4205 		 */
4206 		if (ret == -ENOTSUPP)
4207 			reset_control_reset(priv->plat->stmmac_rst);
4208 	}
4209 
4210 	/* Init MAC and get the capabilities */
4211 	ret = stmmac_hw_init(priv);
4212 	if (ret)
4213 		goto error_hw_init;
4214 
4215 	/* Configure real RX and TX queues */
4216 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4217 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4218 
4219 	ndev->netdev_ops = &stmmac_netdev_ops;
4220 
4221 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4222 			    NETIF_F_RXCSUM;
4223 
4224 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4225 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4226 		priv->tso = true;
4227 		dev_info(priv->device, "TSO feature enabled\n");
4228 	}
4229 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4230 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4231 #ifdef STMMAC_VLAN_TAG_USED
4232 	/* Both mac100 and gmac support receive VLAN tag detection */
4233 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4234 #endif
4235 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4236 
4237 	/* MTU range: 46 - hw-specific max */
4238 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4239 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4240 		ndev->max_mtu = JUMBO_LEN;
4241 	else
4242 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4243 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4244 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4245 	 */
4246 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4247 	    (priv->plat->maxmtu >= ndev->min_mtu))
4248 		ndev->max_mtu = priv->plat->maxmtu;
4249 	else if (priv->plat->maxmtu < ndev->min_mtu)
4250 		dev_warn(priv->device,
4251 			 "%s: warning: maxmtu having invalid value (%d)\n",
4252 			 __func__, priv->plat->maxmtu);
4253 
4254 	if (flow_ctrl)
4255 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4256 
4257 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4258 	 * In some case, for example on bugged HW this feature
4259 	 * has to be disable and this can be done by passing the
4260 	 * riwt_off field from the platform.
4261 	 */
4262 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4263 		priv->use_riwt = 1;
4264 		dev_info(priv->device,
4265 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4266 	}
4267 
4268 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4269 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4270 
4271 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4272 			       (8 * priv->plat->rx_queues_to_use));
4273 	}
4274 
4275 	spin_lock_init(&priv->lock);
4276 
4277 	/* If a specific clk_csr value is passed from the platform
4278 	 * this means that the CSR Clock Range selection cannot be
4279 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4280 	 * set the MDC clock dynamically according to the csr actual
4281 	 * clock input.
4282 	 */
4283 	if (!priv->plat->clk_csr)
4284 		stmmac_clk_csr_set(priv);
4285 	else
4286 		priv->clk_csr = priv->plat->clk_csr;
4287 
4288 	stmmac_check_pcs_mode(priv);
4289 
4290 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4291 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4292 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4293 		/* MDIO bus Registration */
4294 		ret = stmmac_mdio_register(ndev);
4295 		if (ret < 0) {
4296 			dev_err(priv->device,
4297 				"%s: MDIO bus (id: %d) registration failed",
4298 				__func__, priv->plat->bus_id);
4299 			goto error_mdio_register;
4300 		}
4301 	}
4302 
4303 	ret = register_netdev(ndev);
4304 	if (ret) {
4305 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4306 			__func__, ret);
4307 		goto error_netdev_register;
4308 	}
4309 
4310 	return ret;
4311 
4312 error_netdev_register:
4313 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4314 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4315 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4316 		stmmac_mdio_unregister(ndev);
4317 error_mdio_register:
4318 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4319 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4320 
4321 		netif_napi_del(&rx_q->napi);
4322 	}
4323 error_hw_init:
4324 	free_netdev(ndev);
4325 
4326 	return ret;
4327 }
4328 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4329 
4330 /**
4331  * stmmac_dvr_remove
4332  * @dev: device pointer
4333  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4334  * changes the link status, releases the DMA descriptor rings.
4335  */
4336 int stmmac_dvr_remove(struct device *dev)
4337 {
4338 	struct net_device *ndev = dev_get_drvdata(dev);
4339 	struct stmmac_priv *priv = netdev_priv(ndev);
4340 
4341 	netdev_info(priv->dev, "%s: removing driver", __func__);
4342 
4343 	stmmac_stop_all_dma(priv);
4344 
4345 	priv->hw->mac->set_mac(priv->ioaddr, false);
4346 	netif_carrier_off(ndev);
4347 	unregister_netdev(ndev);
4348 	if (priv->plat->stmmac_rst)
4349 		reset_control_assert(priv->plat->stmmac_rst);
4350 	clk_disable_unprepare(priv->plat->pclk);
4351 	clk_disable_unprepare(priv->plat->stmmac_clk);
4352 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4353 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4354 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4355 		stmmac_mdio_unregister(ndev);
4356 	free_netdev(ndev);
4357 
4358 	return 0;
4359 }
4360 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4361 
4362 /**
4363  * stmmac_suspend - suspend callback
4364  * @dev: device pointer
4365  * Description: this is the function to suspend the device and it is called
4366  * by the platform driver to stop the network queue, release the resources,
4367  * program the PMT register (for WoL), clean and release driver resources.
4368  */
4369 int stmmac_suspend(struct device *dev)
4370 {
4371 	struct net_device *ndev = dev_get_drvdata(dev);
4372 	struct stmmac_priv *priv = netdev_priv(ndev);
4373 	unsigned long flags;
4374 
4375 	if (!ndev || !netif_running(ndev))
4376 		return 0;
4377 
4378 	if (ndev->phydev)
4379 		phy_stop(ndev->phydev);
4380 
4381 	spin_lock_irqsave(&priv->lock, flags);
4382 
4383 	netif_device_detach(ndev);
4384 	stmmac_stop_all_queues(priv);
4385 
4386 	stmmac_disable_all_queues(priv);
4387 
4388 	/* Stop TX/RX DMA */
4389 	stmmac_stop_all_dma(priv);
4390 
4391 	/* Enable Power down mode by programming the PMT regs */
4392 	if (device_may_wakeup(priv->device)) {
4393 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4394 		priv->irq_wake = 1;
4395 	} else {
4396 		priv->hw->mac->set_mac(priv->ioaddr, false);
4397 		pinctrl_pm_select_sleep_state(priv->device);
4398 		/* Disable clock in case of PWM is off */
4399 		clk_disable(priv->plat->pclk);
4400 		clk_disable(priv->plat->stmmac_clk);
4401 	}
4402 	spin_unlock_irqrestore(&priv->lock, flags);
4403 
4404 	priv->oldlink = false;
4405 	priv->speed = SPEED_UNKNOWN;
4406 	priv->oldduplex = DUPLEX_UNKNOWN;
4407 	return 0;
4408 }
4409 EXPORT_SYMBOL_GPL(stmmac_suspend);
4410 
4411 /**
4412  * stmmac_reset_queues_param - reset queue parameters
4413  * @dev: device pointer
4414  */
4415 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4416 {
4417 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4418 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4419 	u32 queue;
4420 
4421 	for (queue = 0; queue < rx_cnt; queue++) {
4422 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4423 
4424 		rx_q->cur_rx = 0;
4425 		rx_q->dirty_rx = 0;
4426 	}
4427 
4428 	for (queue = 0; queue < tx_cnt; queue++) {
4429 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4430 
4431 		tx_q->cur_tx = 0;
4432 		tx_q->dirty_tx = 0;
4433 	}
4434 }
4435 
4436 /**
4437  * stmmac_resume - resume callback
4438  * @dev: device pointer
4439  * Description: when resume this function is invoked to setup the DMA and CORE
4440  * in a usable state.
4441  */
4442 int stmmac_resume(struct device *dev)
4443 {
4444 	struct net_device *ndev = dev_get_drvdata(dev);
4445 	struct stmmac_priv *priv = netdev_priv(ndev);
4446 	unsigned long flags;
4447 
4448 	if (!netif_running(ndev))
4449 		return 0;
4450 
4451 	/* Power Down bit, into the PM register, is cleared
4452 	 * automatically as soon as a magic packet or a Wake-up frame
4453 	 * is received. Anyway, it's better to manually clear
4454 	 * this bit because it can generate problems while resuming
4455 	 * from another devices (e.g. serial console).
4456 	 */
4457 	if (device_may_wakeup(priv->device)) {
4458 		spin_lock_irqsave(&priv->lock, flags);
4459 		priv->hw->mac->pmt(priv->hw, 0);
4460 		spin_unlock_irqrestore(&priv->lock, flags);
4461 		priv->irq_wake = 0;
4462 	} else {
4463 		pinctrl_pm_select_default_state(priv->device);
4464 		/* enable the clk previously disabled */
4465 		clk_enable(priv->plat->stmmac_clk);
4466 		clk_enable(priv->plat->pclk);
4467 		/* reset the phy so that it's ready */
4468 		if (priv->mii)
4469 			stmmac_mdio_reset(priv->mii);
4470 	}
4471 
4472 	netif_device_attach(ndev);
4473 
4474 	spin_lock_irqsave(&priv->lock, flags);
4475 
4476 	stmmac_reset_queues_param(priv);
4477 
4478 	/* reset private mss value to force mss context settings at
4479 	 * next tso xmit (only used for gmac4).
4480 	 */
4481 	priv->mss = 0;
4482 
4483 	stmmac_clear_descriptors(priv);
4484 
4485 	stmmac_hw_setup(ndev, false);
4486 	stmmac_init_tx_coalesce(priv);
4487 	stmmac_set_rx_mode(ndev);
4488 
4489 	stmmac_enable_all_queues(priv);
4490 
4491 	stmmac_start_all_queues(priv);
4492 
4493 	spin_unlock_irqrestore(&priv->lock, flags);
4494 
4495 	if (ndev->phydev)
4496 		phy_start(ndev->phydev);
4497 
4498 	return 0;
4499 }
4500 EXPORT_SYMBOL_GPL(stmmac_resume);
4501 
4502 #ifndef MODULE
4503 static int __init stmmac_cmdline_opt(char *str)
4504 {
4505 	char *opt;
4506 
4507 	if (!str || !*str)
4508 		return -EINVAL;
4509 	while ((opt = strsep(&str, ",")) != NULL) {
4510 		if (!strncmp(opt, "debug:", 6)) {
4511 			if (kstrtoint(opt + 6, 0, &debug))
4512 				goto err;
4513 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4514 			if (kstrtoint(opt + 8, 0, &phyaddr))
4515 				goto err;
4516 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4517 			if (kstrtoint(opt + 7, 0, &buf_sz))
4518 				goto err;
4519 		} else if (!strncmp(opt, "tc:", 3)) {
4520 			if (kstrtoint(opt + 3, 0, &tc))
4521 				goto err;
4522 		} else if (!strncmp(opt, "watchdog:", 9)) {
4523 			if (kstrtoint(opt + 9, 0, &watchdog))
4524 				goto err;
4525 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4526 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4527 				goto err;
4528 		} else if (!strncmp(opt, "pause:", 6)) {
4529 			if (kstrtoint(opt + 6, 0, &pause))
4530 				goto err;
4531 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4532 			if (kstrtoint(opt + 10, 0, &eee_timer))
4533 				goto err;
4534 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4535 			if (kstrtoint(opt + 11, 0, &chain_mode))
4536 				goto err;
4537 		}
4538 	}
4539 	return 0;
4540 
4541 err:
4542 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4543 	return -EINVAL;
4544 }
4545 
4546 __setup("stmmaceth=", stmmac_cmdline_opt);
4547 #endif /* MODULE */
4548 
4549 static int __init stmmac_init(void)
4550 {
4551 #ifdef CONFIG_DEBUG_FS
4552 	/* Create debugfs main directory if it doesn't exist yet */
4553 	if (!stmmac_fs_dir) {
4554 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4555 
4556 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4557 			pr_err("ERROR %s, debugfs create directory failed\n",
4558 			       STMMAC_RESOURCE_NAME);
4559 
4560 			return -ENOMEM;
4561 		}
4562 	}
4563 #endif
4564 
4565 	return 0;
4566 }
4567 
4568 static void __exit stmmac_exit(void)
4569 {
4570 #ifdef CONFIG_DEBUG_FS
4571 	debugfs_remove_recursive(stmmac_fs_dir);
4572 #endif
4573 }
4574 
4575 module_init(stmmac_init)
4576 module_exit(stmmac_exit)
4577 
4578 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4579 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4580 MODULE_LICENSE("GPL");
4581