1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 }
239 
240 static void print_pkt(unsigned char *buf, int len)
241 {
242 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
243 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
244 }
245 
246 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
247 {
248 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
249 	u32 avail;
250 
251 	if (tx_q->dirty_tx > tx_q->cur_tx)
252 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
253 	else
254 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
255 
256 	return avail;
257 }
258 
259 /**
260  * stmmac_rx_dirty - Get RX queue dirty
261  * @priv: driver private structure
262  * @queue: RX queue index
263  */
264 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
265 {
266 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
267 	u32 dirty;
268 
269 	if (rx_q->dirty_rx <= rx_q->cur_rx)
270 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
271 	else
272 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
273 
274 	return dirty;
275 }
276 
277 /**
278  * stmmac_hw_fix_mac_speed - callback for speed selection
279  * @priv: driver private structure
280  * Description: on some platforms (e.g. ST), some HW system configuration
281  * registers have to be set according to the link speed negotiated.
282  */
283 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
284 {
285 	struct net_device *ndev = priv->dev;
286 	struct phy_device *phydev = ndev->phydev;
287 
288 	if (likely(priv->plat->fix_mac_speed))
289 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
290 }
291 
292 /**
293  * stmmac_enable_eee_mode - check and enter in LPI mode
294  * @priv: driver private structure
295  * Description: this function is to verify and enter in LPI mode in case of
296  * EEE.
297  */
298 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
299 {
300 	u32 tx_cnt = priv->plat->tx_queues_to_use;
301 	u32 queue;
302 
303 	/* check if all TX queues have the work finished */
304 	for (queue = 0; queue < tx_cnt; queue++) {
305 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
306 
307 		if (tx_q->dirty_tx != tx_q->cur_tx)
308 			return; /* still unfinished work */
309 	}
310 
311 	/* Check and enter in LPI mode */
312 	if (!priv->tx_path_in_lpi_mode)
313 		priv->hw->mac->set_eee_mode(priv->hw,
314 					    priv->plat->en_tx_lpi_clockgating);
315 }
316 
317 /**
318  * stmmac_disable_eee_mode - disable and exit from LPI mode
319  * @priv: driver private structure
320  * Description: this function is to exit and disable EEE in case of
321  * LPI state is true. This is called by the xmit.
322  */
323 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
324 {
325 	priv->hw->mac->reset_eee_mode(priv->hw);
326 	del_timer_sync(&priv->eee_ctrl_timer);
327 	priv->tx_path_in_lpi_mode = false;
328 }
329 
330 /**
331  * stmmac_eee_ctrl_timer - EEE TX SW timer.
332  * @arg : data hook
333  * Description:
334  *  if there is no data transfer and if we are not in LPI state,
335  *  then MAC Transmitter can be moved to LPI state.
336  */
337 static void stmmac_eee_ctrl_timer(unsigned long arg)
338 {
339 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
340 
341 	stmmac_enable_eee_mode(priv);
342 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
343 }
344 
345 /**
346  * stmmac_eee_init - init EEE
347  * @priv: driver private structure
348  * Description:
349  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
350  *  can also manage EEE, this function enable the LPI state and start related
351  *  timer.
352  */
353 bool stmmac_eee_init(struct stmmac_priv *priv)
354 {
355 	struct net_device *ndev = priv->dev;
356 	unsigned long flags;
357 	bool ret = false;
358 
359 	/* Using PCS we cannot dial with the phy registers at this stage
360 	 * so we do not support extra feature like EEE.
361 	 */
362 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
363 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
364 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
365 		goto out;
366 
367 	/* MAC core supports the EEE feature. */
368 	if (priv->dma_cap.eee) {
369 		int tx_lpi_timer = priv->tx_lpi_timer;
370 
371 		/* Check if the PHY supports EEE */
372 		if (phy_init_eee(ndev->phydev, 1)) {
373 			/* To manage at run-time if the EEE cannot be supported
374 			 * anymore (for example because the lp caps have been
375 			 * changed).
376 			 * In that case the driver disable own timers.
377 			 */
378 			spin_lock_irqsave(&priv->lock, flags);
379 			if (priv->eee_active) {
380 				netdev_dbg(priv->dev, "disable EEE\n");
381 				del_timer_sync(&priv->eee_ctrl_timer);
382 				priv->hw->mac->set_eee_timer(priv->hw, 0,
383 							     tx_lpi_timer);
384 			}
385 			priv->eee_active = 0;
386 			spin_unlock_irqrestore(&priv->lock, flags);
387 			goto out;
388 		}
389 		/* Activate the EEE and start timers */
390 		spin_lock_irqsave(&priv->lock, flags);
391 		if (!priv->eee_active) {
392 			priv->eee_active = 1;
393 			setup_timer(&priv->eee_ctrl_timer,
394 				    stmmac_eee_ctrl_timer,
395 				    (unsigned long)priv);
396 			mod_timer(&priv->eee_ctrl_timer,
397 				  STMMAC_LPI_T(eee_timer));
398 
399 			priv->hw->mac->set_eee_timer(priv->hw,
400 						     STMMAC_DEFAULT_LIT_LS,
401 						     tx_lpi_timer);
402 		}
403 		/* Set HW EEE according to the speed */
404 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
405 
406 		ret = true;
407 		spin_unlock_irqrestore(&priv->lock, flags);
408 
409 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
410 	}
411 out:
412 	return ret;
413 }
414 
415 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
416  * @priv: driver private structure
417  * @p : descriptor pointer
418  * @skb : the socket buffer
419  * Description :
420  * This function will read timestamp from the descriptor & pass it to stack.
421  * and also perform some sanity checks.
422  */
423 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
424 				   struct dma_desc *p, struct sk_buff *skb)
425 {
426 	struct skb_shared_hwtstamps shhwtstamp;
427 	u64 ns;
428 
429 	if (!priv->hwts_tx_en)
430 		return;
431 
432 	/* exit if skb doesn't support hw tstamp */
433 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
434 		return;
435 
436 	/* check tx tstamp status */
437 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
438 		/* get the valid tstamp */
439 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440 
441 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
443 
444 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445 		/* pass tstamp to stack */
446 		skb_tstamp_tx(skb, &shhwtstamp);
447 	}
448 
449 	return;
450 }
451 
452 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
453  * @priv: driver private structure
454  * @p : descriptor pointer
455  * @np : next descriptor pointer
456  * @skb : the socket buffer
457  * Description :
458  * This function will read received packet's timestamp from the descriptor
459  * and pass it to stack. It also perform some sanity checks.
460  */
461 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
462 				   struct dma_desc *np, struct sk_buff *skb)
463 {
464 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
465 	u64 ns;
466 
467 	if (!priv->hwts_rx_en)
468 		return;
469 
470 	/* Check if timestamp is available */
471 	if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472 		/* For GMAC4, the valid timestamp is from CTX next desc. */
473 		if (priv->plat->has_gmac4)
474 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475 		else
476 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477 
478 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479 		shhwtstamp = skb_hwtstamps(skb);
480 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
482 	} else  {
483 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
484 	}
485 }
486 
487 /**
488  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
489  *  @dev: device pointer.
490  *  @ifr: An IOCTL specific structure, that can contain a pointer to
491  *  a proprietary structure used to pass information to the driver.
492  *  Description:
493  *  This function configures the MAC to enable/disable both outgoing(TX)
494  *  and incoming(RX) packets time stamping based on user input.
495  *  Return Value:
496  *  0 on success and an appropriate -ve integer on failure.
497  */
498 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
499 {
500 	struct stmmac_priv *priv = netdev_priv(dev);
501 	struct hwtstamp_config config;
502 	struct timespec64 now;
503 	u64 temp = 0;
504 	u32 ptp_v2 = 0;
505 	u32 tstamp_all = 0;
506 	u32 ptp_over_ipv4_udp = 0;
507 	u32 ptp_over_ipv6_udp = 0;
508 	u32 ptp_over_ethernet = 0;
509 	u32 snap_type_sel = 0;
510 	u32 ts_master_en = 0;
511 	u32 ts_event_en = 0;
512 	u32 value = 0;
513 	u32 sec_inc;
514 
515 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
516 		netdev_alert(priv->dev, "No support for HW time stamping\n");
517 		priv->hwts_tx_en = 0;
518 		priv->hwts_rx_en = 0;
519 
520 		return -EOPNOTSUPP;
521 	}
522 
523 	if (copy_from_user(&config, ifr->ifr_data,
524 			   sizeof(struct hwtstamp_config)))
525 		return -EFAULT;
526 
527 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
528 		   __func__, config.flags, config.tx_type, config.rx_filter);
529 
530 	/* reserved for future extensions */
531 	if (config.flags)
532 		return -EINVAL;
533 
534 	if (config.tx_type != HWTSTAMP_TX_OFF &&
535 	    config.tx_type != HWTSTAMP_TX_ON)
536 		return -ERANGE;
537 
538 	if (priv->adv_ts) {
539 		switch (config.rx_filter) {
540 		case HWTSTAMP_FILTER_NONE:
541 			/* time stamp no incoming packet at all */
542 			config.rx_filter = HWTSTAMP_FILTER_NONE;
543 			break;
544 
545 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
546 			/* PTP v1, UDP, any kind of event packet */
547 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548 			/* take time stamp for all event messages */
549 			if (priv->plat->has_gmac4)
550 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
551 			else
552 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
553 
554 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556 			break;
557 
558 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
559 			/* PTP v1, UDP, Sync packet */
560 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
561 			/* take time stamp for SYNC messages only */
562 			ts_event_en = PTP_TCR_TSEVNTENA;
563 
564 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
565 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
566 			break;
567 
568 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
569 			/* PTP v1, UDP, Delay_req packet */
570 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
571 			/* take time stamp for Delay_Req messages only */
572 			ts_master_en = PTP_TCR_TSMSTRENA;
573 			ts_event_en = PTP_TCR_TSEVNTENA;
574 
575 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577 			break;
578 
579 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
580 			/* PTP v2, UDP, any kind of event packet */
581 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
582 			ptp_v2 = PTP_TCR_TSVER2ENA;
583 			/* take time stamp for all event messages */
584 			if (priv->plat->has_gmac4)
585 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
586 			else
587 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
588 
589 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591 			break;
592 
593 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
594 			/* PTP v2, UDP, Sync packet */
595 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
596 			ptp_v2 = PTP_TCR_TSVER2ENA;
597 			/* take time stamp for SYNC messages only */
598 			ts_event_en = PTP_TCR_TSEVNTENA;
599 
600 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602 			break;
603 
604 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
605 			/* PTP v2, UDP, Delay_req packet */
606 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
607 			ptp_v2 = PTP_TCR_TSVER2ENA;
608 			/* take time stamp for Delay_Req messages only */
609 			ts_master_en = PTP_TCR_TSMSTRENA;
610 			ts_event_en = PTP_TCR_TSEVNTENA;
611 
612 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 			break;
615 
616 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
617 			/* PTP v2/802.AS1 any layer, any kind of event packet */
618 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
619 			ptp_v2 = PTP_TCR_TSVER2ENA;
620 			/* take time stamp for all event messages */
621 			if (priv->plat->has_gmac4)
622 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
623 			else
624 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
625 
626 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
627 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
628 			ptp_over_ethernet = PTP_TCR_TSIPENA;
629 			break;
630 
631 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
632 			/* PTP v2/802.AS1, any layer, Sync packet */
633 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
634 			ptp_v2 = PTP_TCR_TSVER2ENA;
635 			/* take time stamp for SYNC messages only */
636 			ts_event_en = PTP_TCR_TSEVNTENA;
637 
638 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
639 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
640 			ptp_over_ethernet = PTP_TCR_TSIPENA;
641 			break;
642 
643 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
644 			/* PTP v2/802.AS1, any layer, Delay_req packet */
645 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
646 			ptp_v2 = PTP_TCR_TSVER2ENA;
647 			/* take time stamp for Delay_Req messages only */
648 			ts_master_en = PTP_TCR_TSMSTRENA;
649 			ts_event_en = PTP_TCR_TSEVNTENA;
650 
651 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
652 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
653 			ptp_over_ethernet = PTP_TCR_TSIPENA;
654 			break;
655 
656 		case HWTSTAMP_FILTER_ALL:
657 			/* time stamp any incoming packet */
658 			config.rx_filter = HWTSTAMP_FILTER_ALL;
659 			tstamp_all = PTP_TCR_TSENALL;
660 			break;
661 
662 		default:
663 			return -ERANGE;
664 		}
665 	} else {
666 		switch (config.rx_filter) {
667 		case HWTSTAMP_FILTER_NONE:
668 			config.rx_filter = HWTSTAMP_FILTER_NONE;
669 			break;
670 		default:
671 			/* PTP v1, UDP, any kind of event packet */
672 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
673 			break;
674 		}
675 	}
676 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
677 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
678 
679 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
680 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
681 	else {
682 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
683 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
684 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
685 			 ts_master_en | snap_type_sel);
686 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
687 
688 		/* program Sub Second Increment reg */
689 		sec_inc = priv->hw->ptp->config_sub_second_increment(
690 			priv->ptpaddr, priv->plat->clk_ptp_rate,
691 			priv->plat->has_gmac4);
692 		temp = div_u64(1000000000ULL, sec_inc);
693 
694 		/* calculate default added value:
695 		 * formula is :
696 		 * addend = (2^32)/freq_div_ratio;
697 		 * where, freq_div_ratio = 1e9ns/sec_inc
698 		 */
699 		temp = (u64)(temp << 32);
700 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
701 		priv->hw->ptp->config_addend(priv->ptpaddr,
702 					     priv->default_addend);
703 
704 		/* initialize system time */
705 		ktime_get_real_ts64(&now);
706 
707 		/* lower 32 bits of tv_sec are safe until y2106 */
708 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
709 					    now.tv_nsec);
710 	}
711 
712 	return copy_to_user(ifr->ifr_data, &config,
713 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
714 }
715 
716 /**
717  * stmmac_init_ptp - init PTP
718  * @priv: driver private structure
719  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
720  * This is done by looking at the HW cap. register.
721  * This function also registers the ptp driver.
722  */
723 static int stmmac_init_ptp(struct stmmac_priv *priv)
724 {
725 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
726 		return -EOPNOTSUPP;
727 
728 	priv->adv_ts = 0;
729 	/* Check if adv_ts can be enabled for dwmac 4.x core */
730 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
731 		priv->adv_ts = 1;
732 	/* Dwmac 3.x core with extend_desc can support adv_ts */
733 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
734 		priv->adv_ts = 1;
735 
736 	if (priv->dma_cap.time_stamp)
737 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
738 
739 	if (priv->adv_ts)
740 		netdev_info(priv->dev,
741 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
742 
743 	priv->hw->ptp = &stmmac_ptp;
744 	priv->hwts_tx_en = 0;
745 	priv->hwts_rx_en = 0;
746 
747 	stmmac_ptp_register(priv);
748 
749 	return 0;
750 }
751 
752 static void stmmac_release_ptp(struct stmmac_priv *priv)
753 {
754 	if (priv->plat->clk_ptp_ref)
755 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
756 	stmmac_ptp_unregister(priv);
757 }
758 
759 /**
760  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
761  *  @priv: driver private structure
762  *  Description: It is used for configuring the flow control in all queues
763  */
764 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
765 {
766 	u32 tx_cnt = priv->plat->tx_queues_to_use;
767 
768 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
769 				 priv->pause, tx_cnt);
770 }
771 
772 /**
773  * stmmac_adjust_link - adjusts the link parameters
774  * @dev: net device structure
775  * Description: this is the helper called by the physical abstraction layer
776  * drivers to communicate the phy link status. According the speed and duplex
777  * this driver can invoke registered glue-logic as well.
778  * It also invoke the eee initialization because it could happen when switch
779  * on different networks (that are eee capable).
780  */
781 static void stmmac_adjust_link(struct net_device *dev)
782 {
783 	struct stmmac_priv *priv = netdev_priv(dev);
784 	struct phy_device *phydev = dev->phydev;
785 	unsigned long flags;
786 	int new_state = 0;
787 
788 	if (!phydev)
789 		return;
790 
791 	spin_lock_irqsave(&priv->lock, flags);
792 
793 	if (phydev->link) {
794 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
795 
796 		/* Now we make sure that we can be in full duplex mode.
797 		 * If not, we operate in half-duplex mode. */
798 		if (phydev->duplex != priv->oldduplex) {
799 			new_state = 1;
800 			if (!(phydev->duplex))
801 				ctrl &= ~priv->hw->link.duplex;
802 			else
803 				ctrl |= priv->hw->link.duplex;
804 			priv->oldduplex = phydev->duplex;
805 		}
806 		/* Flow Control operation */
807 		if (phydev->pause)
808 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
809 
810 		if (phydev->speed != priv->speed) {
811 			new_state = 1;
812 			switch (phydev->speed) {
813 			case 1000:
814 				if (priv->plat->has_gmac ||
815 				    priv->plat->has_gmac4)
816 					ctrl &= ~priv->hw->link.port;
817 				break;
818 			case 100:
819 				if (priv->plat->has_gmac ||
820 				    priv->plat->has_gmac4) {
821 					ctrl |= priv->hw->link.port;
822 					ctrl |= priv->hw->link.speed;
823 				} else {
824 					ctrl &= ~priv->hw->link.port;
825 				}
826 				break;
827 			case 10:
828 				if (priv->plat->has_gmac ||
829 				    priv->plat->has_gmac4) {
830 					ctrl |= priv->hw->link.port;
831 					ctrl &= ~(priv->hw->link.speed);
832 				} else {
833 					ctrl &= ~priv->hw->link.port;
834 				}
835 				break;
836 			default:
837 				netif_warn(priv, link, priv->dev,
838 					   "broken speed: %d\n", phydev->speed);
839 				phydev->speed = SPEED_UNKNOWN;
840 				break;
841 			}
842 			if (phydev->speed != SPEED_UNKNOWN)
843 				stmmac_hw_fix_mac_speed(priv);
844 			priv->speed = phydev->speed;
845 		}
846 
847 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
848 
849 		if (!priv->oldlink) {
850 			new_state = 1;
851 			priv->oldlink = 1;
852 		}
853 	} else if (priv->oldlink) {
854 		new_state = 1;
855 		priv->oldlink = 0;
856 		priv->speed = SPEED_UNKNOWN;
857 		priv->oldduplex = DUPLEX_UNKNOWN;
858 	}
859 
860 	if (new_state && netif_msg_link(priv))
861 		phy_print_status(phydev);
862 
863 	spin_unlock_irqrestore(&priv->lock, flags);
864 
865 	if (phydev->is_pseudo_fixed_link)
866 		/* Stop PHY layer to call the hook to adjust the link in case
867 		 * of a switch is attached to the stmmac driver.
868 		 */
869 		phydev->irq = PHY_IGNORE_INTERRUPT;
870 	else
871 		/* At this stage, init the EEE if supported.
872 		 * Never called in case of fixed_link.
873 		 */
874 		priv->eee_enabled = stmmac_eee_init(priv);
875 }
876 
877 /**
878  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
879  * @priv: driver private structure
880  * Description: this is to verify if the HW supports the PCS.
881  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
882  * configured for the TBI, RTBI, or SGMII PHY interface.
883  */
884 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
885 {
886 	int interface = priv->plat->interface;
887 
888 	if (priv->dma_cap.pcs) {
889 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
890 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
891 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
892 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
893 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
894 			priv->hw->pcs = STMMAC_PCS_RGMII;
895 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
896 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
897 			priv->hw->pcs = STMMAC_PCS_SGMII;
898 		}
899 	}
900 }
901 
902 /**
903  * stmmac_init_phy - PHY initialization
904  * @dev: net device structure
905  * Description: it initializes the driver's PHY state, and attaches the PHY
906  * to the mac driver.
907  *  Return value:
908  *  0 on success
909  */
910 static int stmmac_init_phy(struct net_device *dev)
911 {
912 	struct stmmac_priv *priv = netdev_priv(dev);
913 	struct phy_device *phydev;
914 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
915 	char bus_id[MII_BUS_ID_SIZE];
916 	int interface = priv->plat->interface;
917 	int max_speed = priv->plat->max_speed;
918 	priv->oldlink = 0;
919 	priv->speed = SPEED_UNKNOWN;
920 	priv->oldduplex = DUPLEX_UNKNOWN;
921 
922 	if (priv->plat->phy_node) {
923 		phydev = of_phy_connect(dev, priv->plat->phy_node,
924 					&stmmac_adjust_link, 0, interface);
925 	} else {
926 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
927 			 priv->plat->bus_id);
928 
929 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
930 			 priv->plat->phy_addr);
931 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
932 			   phy_id_fmt);
933 
934 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
935 				     interface);
936 	}
937 
938 	if (IS_ERR_OR_NULL(phydev)) {
939 		netdev_err(priv->dev, "Could not attach to PHY\n");
940 		if (!phydev)
941 			return -ENODEV;
942 
943 		return PTR_ERR(phydev);
944 	}
945 
946 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
947 	if ((interface == PHY_INTERFACE_MODE_MII) ||
948 	    (interface == PHY_INTERFACE_MODE_RMII) ||
949 		(max_speed < 1000 && max_speed > 0))
950 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
951 					 SUPPORTED_1000baseT_Full);
952 
953 	/*
954 	 * Broken HW is sometimes missing the pull-up resistor on the
955 	 * MDIO line, which results in reads to non-existent devices returning
956 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
957 	 * device as well.
958 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
959 	 */
960 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
961 		phy_disconnect(phydev);
962 		return -ENODEV;
963 	}
964 
965 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
966 	 * subsequent PHY polling, make sure we force a link transition if
967 	 * we have a UP/DOWN/UP transition
968 	 */
969 	if (phydev->is_pseudo_fixed_link)
970 		phydev->irq = PHY_POLL;
971 
972 	phy_attached_info(phydev);
973 	return 0;
974 }
975 
976 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
977 {
978 	u32 rx_cnt = priv->plat->rx_queues_to_use;
979 	void *head_rx;
980 	u32 queue;
981 
982 	/* Display RX rings */
983 	for (queue = 0; queue < rx_cnt; queue++) {
984 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
985 
986 		pr_info("\tRX Queue %u rings\n", queue);
987 
988 		if (priv->extend_desc)
989 			head_rx = (void *)rx_q->dma_erx;
990 		else
991 			head_rx = (void *)rx_q->dma_rx;
992 
993 		/* Display RX ring */
994 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
995 	}
996 }
997 
998 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
999 {
1000 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1001 	void *head_tx;
1002 	u32 queue;
1003 
1004 	/* Display TX rings */
1005 	for (queue = 0; queue < tx_cnt; queue++) {
1006 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1007 
1008 		pr_info("\tTX Queue %d rings\n", queue);
1009 
1010 		if (priv->extend_desc)
1011 			head_tx = (void *)tx_q->dma_etx;
1012 		else
1013 			head_tx = (void *)tx_q->dma_tx;
1014 
1015 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1016 	}
1017 }
1018 
1019 static void stmmac_display_rings(struct stmmac_priv *priv)
1020 {
1021 	/* Display RX ring */
1022 	stmmac_display_rx_rings(priv);
1023 
1024 	/* Display TX ring */
1025 	stmmac_display_tx_rings(priv);
1026 }
1027 
1028 static int stmmac_set_bfsize(int mtu, int bufsize)
1029 {
1030 	int ret = bufsize;
1031 
1032 	if (mtu >= BUF_SIZE_4KiB)
1033 		ret = BUF_SIZE_8KiB;
1034 	else if (mtu >= BUF_SIZE_2KiB)
1035 		ret = BUF_SIZE_4KiB;
1036 	else if (mtu > DEFAULT_BUFSIZE)
1037 		ret = BUF_SIZE_2KiB;
1038 	else
1039 		ret = DEFAULT_BUFSIZE;
1040 
1041 	return ret;
1042 }
1043 
1044 /**
1045  * stmmac_clear_rx_descriptors - clear RX descriptors
1046  * @priv: driver private structure
1047  * @queue: RX queue index
1048  * Description: this function is called to clear the RX descriptors
1049  * in case of both basic and extended descriptors are used.
1050  */
1051 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1052 {
1053 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1054 	int i;
1055 
1056 	/* Clear the RX descriptors */
1057 	for (i = 0; i < DMA_RX_SIZE; i++)
1058 		if (priv->extend_desc)
1059 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1060 						     priv->use_riwt, priv->mode,
1061 						     (i == DMA_RX_SIZE - 1));
1062 		else
1063 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1064 						     priv->use_riwt, priv->mode,
1065 						     (i == DMA_RX_SIZE - 1));
1066 }
1067 
1068 /**
1069  * stmmac_clear_tx_descriptors - clear tx descriptors
1070  * @priv: driver private structure
1071  * @queue: TX queue index.
1072  * Description: this function is called to clear the TX descriptors
1073  * in case of both basic and extended descriptors are used.
1074  */
1075 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1076 {
1077 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1078 	int i;
1079 
1080 	/* Clear the TX descriptors */
1081 	for (i = 0; i < DMA_TX_SIZE; i++)
1082 		if (priv->extend_desc)
1083 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1084 						     priv->mode,
1085 						     (i == DMA_TX_SIZE - 1));
1086 		else
1087 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1088 						     priv->mode,
1089 						     (i == DMA_TX_SIZE - 1));
1090 }
1091 
1092 /**
1093  * stmmac_clear_descriptors - clear descriptors
1094  * @priv: driver private structure
1095  * Description: this function is called to clear the TX and RX descriptors
1096  * in case of both basic and extended descriptors are used.
1097  */
1098 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1099 {
1100 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1101 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1102 	u32 queue;
1103 
1104 	/* Clear the RX descriptors */
1105 	for (queue = 0; queue < rx_queue_cnt; queue++)
1106 		stmmac_clear_rx_descriptors(priv, queue);
1107 
1108 	/* Clear the TX descriptors */
1109 	for (queue = 0; queue < tx_queue_cnt; queue++)
1110 		stmmac_clear_tx_descriptors(priv, queue);
1111 }
1112 
1113 /**
1114  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1115  * @priv: driver private structure
1116  * @p: descriptor pointer
1117  * @i: descriptor index
1118  * @flags: gfp flag
1119  * @queue: RX queue index
1120  * Description: this function is called to allocate a receive buffer, perform
1121  * the DMA mapping and init the descriptor.
1122  */
1123 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1124 				  int i, gfp_t flags, u32 queue)
1125 {
1126 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1127 	struct sk_buff *skb;
1128 
1129 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1130 	if (!skb) {
1131 		netdev_err(priv->dev,
1132 			   "%s: Rx init fails; skb is NULL\n", __func__);
1133 		return -ENOMEM;
1134 	}
1135 	rx_q->rx_skbuff[i] = skb;
1136 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1137 						priv->dma_buf_sz,
1138 						DMA_FROM_DEVICE);
1139 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1140 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1141 		dev_kfree_skb_any(skb);
1142 		return -EINVAL;
1143 	}
1144 
1145 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1146 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1147 	else
1148 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1149 
1150 	if ((priv->hw->mode->init_desc3) &&
1151 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1152 		priv->hw->mode->init_desc3(p);
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * stmmac_free_rx_buffer - free RX dma buffers
1159  * @priv: private structure
1160  * @queue: RX queue index
1161  * @i: buffer index.
1162  */
1163 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1164 {
1165 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1166 
1167 	if (rx_q->rx_skbuff[i]) {
1168 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1169 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1170 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1171 	}
1172 	rx_q->rx_skbuff[i] = NULL;
1173 }
1174 
1175 /**
1176  * stmmac_free_tx_buffer - free RX dma buffers
1177  * @priv: private structure
1178  * @queue: RX queue index
1179  * @i: buffer index.
1180  */
1181 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1182 {
1183 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1184 
1185 	if (tx_q->tx_skbuff_dma[i].buf) {
1186 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1187 			dma_unmap_page(priv->device,
1188 				       tx_q->tx_skbuff_dma[i].buf,
1189 				       tx_q->tx_skbuff_dma[i].len,
1190 				       DMA_TO_DEVICE);
1191 		else
1192 			dma_unmap_single(priv->device,
1193 					 tx_q->tx_skbuff_dma[i].buf,
1194 					 tx_q->tx_skbuff_dma[i].len,
1195 					 DMA_TO_DEVICE);
1196 	}
1197 
1198 	if (tx_q->tx_skbuff[i]) {
1199 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1200 		tx_q->tx_skbuff[i] = NULL;
1201 		tx_q->tx_skbuff_dma[i].buf = 0;
1202 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1203 	}
1204 }
1205 
1206 /**
1207  * init_dma_rx_desc_rings - init the RX descriptor rings
1208  * @dev: net device structure
1209  * @flags: gfp flag.
1210  * Description: this function initializes the DMA RX descriptors
1211  * and allocates the socket buffers. It supports the chained and ring
1212  * modes.
1213  */
1214 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1215 {
1216 	struct stmmac_priv *priv = netdev_priv(dev);
1217 	u32 rx_count = priv->plat->rx_queues_to_use;
1218 	unsigned int bfsize = 0;
1219 	int ret = -ENOMEM;
1220 	int queue;
1221 	int i;
1222 
1223 	if (priv->hw->mode->set_16kib_bfsize)
1224 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1225 
1226 	if (bfsize < BUF_SIZE_16KiB)
1227 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1228 
1229 	priv->dma_buf_sz = bfsize;
1230 
1231 	/* RX INITIALIZATION */
1232 	netif_dbg(priv, probe, priv->dev,
1233 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1234 
1235 	for (queue = 0; queue < rx_count; queue++) {
1236 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1237 
1238 		netif_dbg(priv, probe, priv->dev,
1239 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1240 			  (u32)rx_q->dma_rx_phy);
1241 
1242 		for (i = 0; i < DMA_RX_SIZE; i++) {
1243 			struct dma_desc *p;
1244 
1245 			if (priv->extend_desc)
1246 				p = &((rx_q->dma_erx + i)->basic);
1247 			else
1248 				p = rx_q->dma_rx + i;
1249 
1250 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1251 						     queue);
1252 			if (ret)
1253 				goto err_init_rx_buffers;
1254 
1255 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1256 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1257 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1258 		}
1259 
1260 		rx_q->cur_rx = 0;
1261 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1262 
1263 		stmmac_clear_rx_descriptors(priv, queue);
1264 
1265 		/* Setup the chained descriptor addresses */
1266 		if (priv->mode == STMMAC_CHAIN_MODE) {
1267 			if (priv->extend_desc)
1268 				priv->hw->mode->init(rx_q->dma_erx,
1269 						     rx_q->dma_rx_phy,
1270 						     DMA_RX_SIZE, 1);
1271 			else
1272 				priv->hw->mode->init(rx_q->dma_rx,
1273 						     rx_q->dma_rx_phy,
1274 						     DMA_RX_SIZE, 0);
1275 		}
1276 	}
1277 
1278 	buf_sz = bfsize;
1279 
1280 	return 0;
1281 
1282 err_init_rx_buffers:
1283 	while (queue >= 0) {
1284 		while (--i >= 0)
1285 			stmmac_free_rx_buffer(priv, queue, i);
1286 
1287 		if (queue == 0)
1288 			break;
1289 
1290 		i = DMA_RX_SIZE;
1291 		queue--;
1292 	}
1293 
1294 	return ret;
1295 }
1296 
1297 /**
1298  * init_dma_tx_desc_rings - init the TX descriptor rings
1299  * @dev: net device structure.
1300  * Description: this function initializes the DMA TX descriptors
1301  * and allocates the socket buffers. It supports the chained and ring
1302  * modes.
1303  */
1304 static int init_dma_tx_desc_rings(struct net_device *dev)
1305 {
1306 	struct stmmac_priv *priv = netdev_priv(dev);
1307 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1308 	u32 queue;
1309 	int i;
1310 
1311 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1312 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1313 
1314 		netif_dbg(priv, probe, priv->dev,
1315 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1316 			 (u32)tx_q->dma_tx_phy);
1317 
1318 		/* Setup the chained descriptor addresses */
1319 		if (priv->mode == STMMAC_CHAIN_MODE) {
1320 			if (priv->extend_desc)
1321 				priv->hw->mode->init(tx_q->dma_etx,
1322 						     tx_q->dma_tx_phy,
1323 						     DMA_TX_SIZE, 1);
1324 			else
1325 				priv->hw->mode->init(tx_q->dma_tx,
1326 						     tx_q->dma_tx_phy,
1327 						     DMA_TX_SIZE, 0);
1328 		}
1329 
1330 		for (i = 0; i < DMA_TX_SIZE; i++) {
1331 			struct dma_desc *p;
1332 			if (priv->extend_desc)
1333 				p = &((tx_q->dma_etx + i)->basic);
1334 			else
1335 				p = tx_q->dma_tx + i;
1336 
1337 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1338 				p->des0 = 0;
1339 				p->des1 = 0;
1340 				p->des2 = 0;
1341 				p->des3 = 0;
1342 			} else {
1343 				p->des2 = 0;
1344 			}
1345 
1346 			tx_q->tx_skbuff_dma[i].buf = 0;
1347 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1348 			tx_q->tx_skbuff_dma[i].len = 0;
1349 			tx_q->tx_skbuff_dma[i].last_segment = false;
1350 			tx_q->tx_skbuff[i] = NULL;
1351 		}
1352 
1353 		tx_q->dirty_tx = 0;
1354 		tx_q->cur_tx = 0;
1355 
1356 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 /**
1363  * init_dma_desc_rings - init the RX/TX descriptor rings
1364  * @dev: net device structure
1365  * @flags: gfp flag.
1366  * Description: this function initializes the DMA RX/TX descriptors
1367  * and allocates the socket buffers. It supports the chained and ring
1368  * modes.
1369  */
1370 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1371 {
1372 	struct stmmac_priv *priv = netdev_priv(dev);
1373 	int ret;
1374 
1375 	ret = init_dma_rx_desc_rings(dev, flags);
1376 	if (ret)
1377 		return ret;
1378 
1379 	ret = init_dma_tx_desc_rings(dev);
1380 
1381 	stmmac_clear_descriptors(priv);
1382 
1383 	if (netif_msg_hw(priv))
1384 		stmmac_display_rings(priv);
1385 
1386 	return ret;
1387 }
1388 
1389 /**
1390  * dma_free_rx_skbufs - free RX dma buffers
1391  * @priv: private structure
1392  * @queue: RX queue index
1393  */
1394 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1395 {
1396 	int i;
1397 
1398 	for (i = 0; i < DMA_RX_SIZE; i++)
1399 		stmmac_free_rx_buffer(priv, queue, i);
1400 }
1401 
1402 /**
1403  * dma_free_tx_skbufs - free TX dma buffers
1404  * @priv: private structure
1405  * @queue: TX queue index
1406  */
1407 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1408 {
1409 	int i;
1410 
1411 	for (i = 0; i < DMA_TX_SIZE; i++)
1412 		stmmac_free_tx_buffer(priv, queue, i);
1413 }
1414 
1415 /**
1416  * free_dma_rx_desc_resources - free RX dma desc resources
1417  * @priv: private structure
1418  */
1419 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1420 {
1421 	u32 rx_count = priv->plat->rx_queues_to_use;
1422 	u32 queue;
1423 
1424 	/* Free RX queue resources */
1425 	for (queue = 0; queue < rx_count; queue++) {
1426 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1427 
1428 		/* Release the DMA RX socket buffers */
1429 		dma_free_rx_skbufs(priv, queue);
1430 
1431 		/* Free DMA regions of consistent memory previously allocated */
1432 		if (!priv->extend_desc)
1433 			dma_free_coherent(priv->device,
1434 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1435 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1436 		else
1437 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1438 					  sizeof(struct dma_extended_desc),
1439 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1440 
1441 		kfree(rx_q->rx_skbuff_dma);
1442 		kfree(rx_q->rx_skbuff);
1443 	}
1444 }
1445 
1446 /**
1447  * free_dma_tx_desc_resources - free TX dma desc resources
1448  * @priv: private structure
1449  */
1450 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1451 {
1452 	u32 tx_count = priv->plat->tx_queues_to_use;
1453 	u32 queue = 0;
1454 
1455 	/* Free TX queue resources */
1456 	for (queue = 0; queue < tx_count; queue++) {
1457 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1458 
1459 		/* Release the DMA TX socket buffers */
1460 		dma_free_tx_skbufs(priv, queue);
1461 
1462 		/* Free DMA regions of consistent memory previously allocated */
1463 		if (!priv->extend_desc)
1464 			dma_free_coherent(priv->device,
1465 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1466 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1467 		else
1468 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1469 					  sizeof(struct dma_extended_desc),
1470 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1471 
1472 		kfree(tx_q->tx_skbuff_dma);
1473 		kfree(tx_q->tx_skbuff);
1474 	}
1475 }
1476 
1477 /**
1478  * alloc_dma_rx_desc_resources - alloc RX resources.
1479  * @priv: private structure
1480  * Description: according to which descriptor can be used (extend or basic)
1481  * this function allocates the resources for TX and RX paths. In case of
1482  * reception, for example, it pre-allocated the RX socket buffer in order to
1483  * allow zero-copy mechanism.
1484  */
1485 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1486 {
1487 	u32 rx_count = priv->plat->rx_queues_to_use;
1488 	int ret = -ENOMEM;
1489 	u32 queue;
1490 
1491 	/* RX queues buffers and DMA */
1492 	for (queue = 0; queue < rx_count; queue++) {
1493 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1494 
1495 		rx_q->queue_index = queue;
1496 		rx_q->priv_data = priv;
1497 
1498 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1499 						    sizeof(dma_addr_t),
1500 						    GFP_KERNEL);
1501 		if (!rx_q->rx_skbuff_dma)
1502 			return -ENOMEM;
1503 
1504 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1505 						sizeof(struct sk_buff *),
1506 						GFP_KERNEL);
1507 		if (!rx_q->rx_skbuff)
1508 			goto err_dma;
1509 
1510 		if (priv->extend_desc) {
1511 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1512 							    DMA_RX_SIZE *
1513 							    sizeof(struct
1514 							    dma_extended_desc),
1515 							    &rx_q->dma_rx_phy,
1516 							    GFP_KERNEL);
1517 			if (!rx_q->dma_erx)
1518 				goto err_dma;
1519 
1520 		} else {
1521 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1522 							   DMA_RX_SIZE *
1523 							   sizeof(struct
1524 							   dma_desc),
1525 							   &rx_q->dma_rx_phy,
1526 							   GFP_KERNEL);
1527 			if (!rx_q->dma_rx)
1528 				goto err_dma;
1529 		}
1530 	}
1531 
1532 	return 0;
1533 
1534 err_dma:
1535 	free_dma_rx_desc_resources(priv);
1536 
1537 	return ret;
1538 }
1539 
1540 /**
1541  * alloc_dma_tx_desc_resources - alloc TX resources.
1542  * @priv: private structure
1543  * Description: according to which descriptor can be used (extend or basic)
1544  * this function allocates the resources for TX and RX paths. In case of
1545  * reception, for example, it pre-allocated the RX socket buffer in order to
1546  * allow zero-copy mechanism.
1547  */
1548 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1549 {
1550 	u32 tx_count = priv->plat->tx_queues_to_use;
1551 	int ret = -ENOMEM;
1552 	u32 queue;
1553 
1554 	/* TX queues buffers and DMA */
1555 	for (queue = 0; queue < tx_count; queue++) {
1556 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1557 
1558 		tx_q->queue_index = queue;
1559 		tx_q->priv_data = priv;
1560 
1561 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1562 						    sizeof(*tx_q->tx_skbuff_dma),
1563 						    GFP_KERNEL);
1564 		if (!tx_q->tx_skbuff_dma)
1565 			return -ENOMEM;
1566 
1567 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1568 						sizeof(struct sk_buff *),
1569 						GFP_KERNEL);
1570 		if (!tx_q->tx_skbuff)
1571 			goto err_dma_buffers;
1572 
1573 		if (priv->extend_desc) {
1574 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1575 							    DMA_TX_SIZE *
1576 							    sizeof(struct
1577 							    dma_extended_desc),
1578 							    &tx_q->dma_tx_phy,
1579 							    GFP_KERNEL);
1580 			if (!tx_q->dma_etx)
1581 				goto err_dma_buffers;
1582 		} else {
1583 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1584 							   DMA_TX_SIZE *
1585 							   sizeof(struct
1586 								  dma_desc),
1587 							   &tx_q->dma_tx_phy,
1588 							   GFP_KERNEL);
1589 			if (!tx_q->dma_tx)
1590 				goto err_dma_buffers;
1591 		}
1592 	}
1593 
1594 	return 0;
1595 
1596 err_dma_buffers:
1597 	free_dma_tx_desc_resources(priv);
1598 
1599 	return ret;
1600 }
1601 
1602 /**
1603  * alloc_dma_desc_resources - alloc TX/RX resources.
1604  * @priv: private structure
1605  * Description: according to which descriptor can be used (extend or basic)
1606  * this function allocates the resources for TX and RX paths. In case of
1607  * reception, for example, it pre-allocated the RX socket buffer in order to
1608  * allow zero-copy mechanism.
1609  */
1610 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1611 {
1612 	/* RX Allocation */
1613 	int ret = alloc_dma_rx_desc_resources(priv);
1614 
1615 	if (ret)
1616 		return ret;
1617 
1618 	ret = alloc_dma_tx_desc_resources(priv);
1619 
1620 	return ret;
1621 }
1622 
1623 /**
1624  * free_dma_desc_resources - free dma desc resources
1625  * @priv: private structure
1626  */
1627 static void free_dma_desc_resources(struct stmmac_priv *priv)
1628 {
1629 	/* Release the DMA RX socket buffers */
1630 	free_dma_rx_desc_resources(priv);
1631 
1632 	/* Release the DMA TX socket buffers */
1633 	free_dma_tx_desc_resources(priv);
1634 }
1635 
1636 /**
1637  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1638  *  @priv: driver private structure
1639  *  Description: It is used for enabling the rx queues in the MAC
1640  */
1641 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1642 {
1643 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1644 	int queue;
1645 	u8 mode;
1646 
1647 	for (queue = 0; queue < rx_queues_count; queue++) {
1648 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1649 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1650 	}
1651 }
1652 
1653 /**
1654  * stmmac_start_rx_dma - start RX DMA channel
1655  * @priv: driver private structure
1656  * @chan: RX channel index
1657  * Description:
1658  * This starts a RX DMA channel
1659  */
1660 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1661 {
1662 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1663 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1664 }
1665 
1666 /**
1667  * stmmac_start_tx_dma - start TX DMA channel
1668  * @priv: driver private structure
1669  * @chan: TX channel index
1670  * Description:
1671  * This starts a TX DMA channel
1672  */
1673 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1674 {
1675 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1676 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1677 }
1678 
1679 /**
1680  * stmmac_stop_rx_dma - stop RX DMA channel
1681  * @priv: driver private structure
1682  * @chan: RX channel index
1683  * Description:
1684  * This stops a RX DMA channel
1685  */
1686 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1687 {
1688 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1689 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1690 }
1691 
1692 /**
1693  * stmmac_stop_tx_dma - stop TX DMA channel
1694  * @priv: driver private structure
1695  * @chan: TX channel index
1696  * Description:
1697  * This stops a TX DMA channel
1698  */
1699 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1700 {
1701 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1702 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1703 }
1704 
1705 /**
1706  * stmmac_start_all_dma - start all RX and TX DMA channels
1707  * @priv: driver private structure
1708  * Description:
1709  * This starts all the RX and TX DMA channels
1710  */
1711 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1712 {
1713 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1714 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1715 	u32 chan = 0;
1716 
1717 	for (chan = 0; chan < rx_channels_count; chan++)
1718 		stmmac_start_rx_dma(priv, chan);
1719 
1720 	for (chan = 0; chan < tx_channels_count; chan++)
1721 		stmmac_start_tx_dma(priv, chan);
1722 }
1723 
1724 /**
1725  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1726  * @priv: driver private structure
1727  * Description:
1728  * This stops the RX and TX DMA channels
1729  */
1730 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1731 {
1732 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1733 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1734 	u32 chan = 0;
1735 
1736 	for (chan = 0; chan < rx_channels_count; chan++)
1737 		stmmac_stop_rx_dma(priv, chan);
1738 
1739 	for (chan = 0; chan < tx_channels_count; chan++)
1740 		stmmac_stop_tx_dma(priv, chan);
1741 }
1742 
1743 /**
1744  *  stmmac_dma_operation_mode - HW DMA operation mode
1745  *  @priv: driver private structure
1746  *  Description: it is used for configuring the DMA operation mode register in
1747  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1748  */
1749 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1750 {
1751 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1752 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1753 	int rxfifosz = priv->plat->rx_fifo_size;
1754 	u32 txmode = 0;
1755 	u32 rxmode = 0;
1756 	u32 chan = 0;
1757 
1758 	if (rxfifosz == 0)
1759 		rxfifosz = priv->dma_cap.rx_fifo_size;
1760 
1761 	if (priv->plat->force_thresh_dma_mode) {
1762 		txmode = tc;
1763 		rxmode = tc;
1764 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1765 		/*
1766 		 * In case of GMAC, SF mode can be enabled
1767 		 * to perform the TX COE in HW. This depends on:
1768 		 * 1) TX COE if actually supported
1769 		 * 2) There is no bugged Jumbo frame support
1770 		 *    that needs to not insert csum in the TDES.
1771 		 */
1772 		txmode = SF_DMA_MODE;
1773 		rxmode = SF_DMA_MODE;
1774 		priv->xstats.threshold = SF_DMA_MODE;
1775 	} else {
1776 		txmode = tc;
1777 		rxmode = SF_DMA_MODE;
1778 	}
1779 
1780 	/* configure all channels */
1781 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1782 		for (chan = 0; chan < rx_channels_count; chan++)
1783 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1784 						   rxfifosz);
1785 
1786 		for (chan = 0; chan < tx_channels_count; chan++)
1787 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1788 	} else {
1789 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1790 					rxfifosz);
1791 	}
1792 }
1793 
1794 /**
1795  * stmmac_tx_clean - to manage the transmission completion
1796  * @priv: driver private structure
1797  * @queue: TX queue index
1798  * Description: it reclaims the transmit resources after transmission completes.
1799  */
1800 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1801 {
1802 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1803 	unsigned int bytes_compl = 0, pkts_compl = 0;
1804 	unsigned int entry = tx_q->dirty_tx;
1805 
1806 	netif_tx_lock(priv->dev);
1807 
1808 	priv->xstats.tx_clean++;
1809 
1810 	while (entry != tx_q->cur_tx) {
1811 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1812 		struct dma_desc *p;
1813 		int status;
1814 
1815 		if (priv->extend_desc)
1816 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1817 		else
1818 			p = tx_q->dma_tx + entry;
1819 
1820 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1821 						      &priv->xstats, p,
1822 						      priv->ioaddr);
1823 		/* Check if the descriptor is owned by the DMA */
1824 		if (unlikely(status & tx_dma_own))
1825 			break;
1826 
1827 		/* Just consider the last segment and ...*/
1828 		if (likely(!(status & tx_not_ls))) {
1829 			/* ... verify the status error condition */
1830 			if (unlikely(status & tx_err)) {
1831 				priv->dev->stats.tx_errors++;
1832 			} else {
1833 				priv->dev->stats.tx_packets++;
1834 				priv->xstats.tx_pkt_n++;
1835 			}
1836 			stmmac_get_tx_hwtstamp(priv, p, skb);
1837 		}
1838 
1839 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1840 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1841 				dma_unmap_page(priv->device,
1842 					       tx_q->tx_skbuff_dma[entry].buf,
1843 					       tx_q->tx_skbuff_dma[entry].len,
1844 					       DMA_TO_DEVICE);
1845 			else
1846 				dma_unmap_single(priv->device,
1847 						 tx_q->tx_skbuff_dma[entry].buf,
1848 						 tx_q->tx_skbuff_dma[entry].len,
1849 						 DMA_TO_DEVICE);
1850 			tx_q->tx_skbuff_dma[entry].buf = 0;
1851 			tx_q->tx_skbuff_dma[entry].len = 0;
1852 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1853 		}
1854 
1855 		if (priv->hw->mode->clean_desc3)
1856 			priv->hw->mode->clean_desc3(tx_q, p);
1857 
1858 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1859 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1860 
1861 		if (likely(skb != NULL)) {
1862 			pkts_compl++;
1863 			bytes_compl += skb->len;
1864 			dev_consume_skb_any(skb);
1865 			tx_q->tx_skbuff[entry] = NULL;
1866 		}
1867 
1868 		priv->hw->desc->release_tx_desc(p, priv->mode);
1869 
1870 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1871 	}
1872 	tx_q->dirty_tx = entry;
1873 
1874 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1875 				  pkts_compl, bytes_compl);
1876 
1877 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1878 								queue))) &&
1879 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1880 
1881 		netif_dbg(priv, tx_done, priv->dev,
1882 			  "%s: restart transmit\n", __func__);
1883 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1884 	}
1885 
1886 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1887 		stmmac_enable_eee_mode(priv);
1888 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1889 	}
1890 	netif_tx_unlock(priv->dev);
1891 }
1892 
1893 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1894 {
1895 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1896 }
1897 
1898 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1899 {
1900 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1901 }
1902 
1903 /**
1904  * stmmac_tx_err - to manage the tx error
1905  * @priv: driver private structure
1906  * @chan: channel index
1907  * Description: it cleans the descriptors and restarts the transmission
1908  * in case of transmission errors.
1909  */
1910 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1911 {
1912 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1913 	int i;
1914 
1915 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1916 
1917 	stmmac_stop_tx_dma(priv, chan);
1918 	dma_free_tx_skbufs(priv, chan);
1919 	for (i = 0; i < DMA_TX_SIZE; i++)
1920 		if (priv->extend_desc)
1921 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1922 						     priv->mode,
1923 						     (i == DMA_TX_SIZE - 1));
1924 		else
1925 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1926 						     priv->mode,
1927 						     (i == DMA_TX_SIZE - 1));
1928 	tx_q->dirty_tx = 0;
1929 	tx_q->cur_tx = 0;
1930 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1931 	stmmac_start_tx_dma(priv, chan);
1932 
1933 	priv->dev->stats.tx_errors++;
1934 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1935 }
1936 
1937 /**
1938  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1939  *  @priv: driver private structure
1940  *  @txmode: TX operating mode
1941  *  @rxmode: RX operating mode
1942  *  @chan: channel index
1943  *  Description: it is used for configuring of the DMA operation mode in
1944  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1945  *  mode.
1946  */
1947 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1948 					  u32 rxmode, u32 chan)
1949 {
1950 	int rxfifosz = priv->plat->rx_fifo_size;
1951 
1952 	if (rxfifosz == 0)
1953 		rxfifosz = priv->dma_cap.rx_fifo_size;
1954 
1955 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1956 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1957 					   rxfifosz);
1958 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1959 	} else {
1960 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1961 					rxfifosz);
1962 	}
1963 }
1964 
1965 /**
1966  * stmmac_dma_interrupt - DMA ISR
1967  * @priv: driver private structure
1968  * Description: this is the DMA ISR. It is called by the main ISR.
1969  * It calls the dwmac dma routine and schedule poll method in case of some
1970  * work can be done.
1971  */
1972 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1973 {
1974 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1975 	int status;
1976 	u32 chan;
1977 
1978 	for (chan = 0; chan < tx_channel_count; chan++) {
1979 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1980 
1981 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1982 						      &priv->xstats, chan);
1983 		if (likely((status & handle_rx)) || (status & handle_tx)) {
1984 			if (likely(napi_schedule_prep(&rx_q->napi))) {
1985 				stmmac_disable_dma_irq(priv, chan);
1986 				__napi_schedule(&rx_q->napi);
1987 			}
1988 		}
1989 
1990 		if (unlikely(status & tx_hard_error_bump_tc)) {
1991 			/* Try to bump up the dma threshold on this failure */
1992 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1993 			    (tc <= 256)) {
1994 				tc += 64;
1995 				if (priv->plat->force_thresh_dma_mode)
1996 					stmmac_set_dma_operation_mode(priv,
1997 								      tc,
1998 								      tc,
1999 								      chan);
2000 				else
2001 					stmmac_set_dma_operation_mode(priv,
2002 								    tc,
2003 								    SF_DMA_MODE,
2004 								    chan);
2005 				priv->xstats.threshold = tc;
2006 			}
2007 		} else if (unlikely(status == tx_hard_error)) {
2008 			stmmac_tx_err(priv, chan);
2009 		}
2010 	}
2011 }
2012 
2013 /**
2014  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2015  * @priv: driver private structure
2016  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2017  */
2018 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2019 {
2020 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2021 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2022 
2023 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2024 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2025 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2026 	} else {
2027 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2028 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2029 	}
2030 
2031 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2032 
2033 	if (priv->dma_cap.rmon) {
2034 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2035 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2036 	} else
2037 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2038 }
2039 
2040 /**
2041  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2042  * @priv: driver private structure
2043  * Description: select the Enhanced/Alternate or Normal descriptors.
2044  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2045  * supported by the HW capability register.
2046  */
2047 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2048 {
2049 	if (priv->plat->enh_desc) {
2050 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2051 
2052 		/* GMAC older than 3.50 has no extended descriptors */
2053 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2054 			dev_info(priv->device, "Enabled extended descriptors\n");
2055 			priv->extend_desc = 1;
2056 		} else
2057 			dev_warn(priv->device, "Extended descriptors not supported\n");
2058 
2059 		priv->hw->desc = &enh_desc_ops;
2060 	} else {
2061 		dev_info(priv->device, "Normal descriptors\n");
2062 		priv->hw->desc = &ndesc_ops;
2063 	}
2064 }
2065 
2066 /**
2067  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2068  * @priv: driver private structure
2069  * Description:
2070  *  new GMAC chip generations have a new register to indicate the
2071  *  presence of the optional feature/functions.
2072  *  This can be also used to override the value passed through the
2073  *  platform and necessary for old MAC10/100 and GMAC chips.
2074  */
2075 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2076 {
2077 	u32 ret = 0;
2078 
2079 	if (priv->hw->dma->get_hw_feature) {
2080 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2081 					      &priv->dma_cap);
2082 		ret = 1;
2083 	}
2084 
2085 	return ret;
2086 }
2087 
2088 /**
2089  * stmmac_check_ether_addr - check if the MAC addr is valid
2090  * @priv: driver private structure
2091  * Description:
2092  * it is to verify if the MAC address is valid, in case of failures it
2093  * generates a random MAC address
2094  */
2095 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2096 {
2097 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2098 		priv->hw->mac->get_umac_addr(priv->hw,
2099 					     priv->dev->dev_addr, 0);
2100 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2101 			eth_hw_addr_random(priv->dev);
2102 		netdev_info(priv->dev, "device MAC address %pM\n",
2103 			    priv->dev->dev_addr);
2104 	}
2105 }
2106 
2107 /**
2108  * stmmac_init_dma_engine - DMA init.
2109  * @priv: driver private structure
2110  * Description:
2111  * It inits the DMA invoking the specific MAC/GMAC callback.
2112  * Some DMA parameters can be passed from the platform;
2113  * in case of these are not passed a default is kept for the MAC or GMAC.
2114  */
2115 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2116 {
2117 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2118 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2119 	struct stmmac_rx_queue *rx_q;
2120 	struct stmmac_tx_queue *tx_q;
2121 	u32 dummy_dma_rx_phy = 0;
2122 	u32 dummy_dma_tx_phy = 0;
2123 	u32 chan = 0;
2124 	int atds = 0;
2125 	int ret = 0;
2126 
2127 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2128 		dev_err(priv->device, "Invalid DMA configuration\n");
2129 		return -EINVAL;
2130 	}
2131 
2132 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2133 		atds = 1;
2134 
2135 	ret = priv->hw->dma->reset(priv->ioaddr);
2136 	if (ret) {
2137 		dev_err(priv->device, "Failed to reset the dma\n");
2138 		return ret;
2139 	}
2140 
2141 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2142 		/* DMA Configuration */
2143 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2144 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2145 
2146 		/* DMA RX Channel Configuration */
2147 		for (chan = 0; chan < rx_channels_count; chan++) {
2148 			rx_q = &priv->rx_queue[chan];
2149 
2150 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2151 						    priv->plat->dma_cfg,
2152 						    rx_q->dma_rx_phy, chan);
2153 
2154 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2155 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2156 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2157 						       rx_q->rx_tail_addr,
2158 						       chan);
2159 		}
2160 
2161 		/* DMA TX Channel Configuration */
2162 		for (chan = 0; chan < tx_channels_count; chan++) {
2163 			tx_q = &priv->tx_queue[chan];
2164 
2165 			priv->hw->dma->init_chan(priv->ioaddr,
2166 						 priv->plat->dma_cfg,
2167 						 chan);
2168 
2169 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2170 						    priv->plat->dma_cfg,
2171 						    tx_q->dma_tx_phy, chan);
2172 
2173 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2174 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2175 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2176 						       tx_q->tx_tail_addr,
2177 						       chan);
2178 		}
2179 	} else {
2180 		rx_q = &priv->rx_queue[chan];
2181 		tx_q = &priv->tx_queue[chan];
2182 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2183 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2184 	}
2185 
2186 	if (priv->plat->axi && priv->hw->dma->axi)
2187 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2188 
2189 	return ret;
2190 }
2191 
2192 /**
2193  * stmmac_tx_timer - mitigation sw timer for tx.
2194  * @data: data pointer
2195  * Description:
2196  * This is the timer handler to directly invoke the stmmac_tx_clean.
2197  */
2198 static void stmmac_tx_timer(unsigned long data)
2199 {
2200 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
2201 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2202 	u32 queue;
2203 
2204 	/* let's scan all the tx queues */
2205 	for (queue = 0; queue < tx_queues_count; queue++)
2206 		stmmac_tx_clean(priv, queue);
2207 }
2208 
2209 /**
2210  * stmmac_init_tx_coalesce - init tx mitigation options.
2211  * @priv: driver private structure
2212  * Description:
2213  * This inits the transmit coalesce parameters: i.e. timer rate,
2214  * timer handler and default threshold used for enabling the
2215  * interrupt on completion bit.
2216  */
2217 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2218 {
2219 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2220 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2221 	init_timer(&priv->txtimer);
2222 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2223 	priv->txtimer.data = (unsigned long)priv;
2224 	priv->txtimer.function = stmmac_tx_timer;
2225 	add_timer(&priv->txtimer);
2226 }
2227 
2228 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2229 {
2230 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2231 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2232 	u32 chan;
2233 
2234 	/* set TX ring length */
2235 	if (priv->hw->dma->set_tx_ring_len) {
2236 		for (chan = 0; chan < tx_channels_count; chan++)
2237 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2238 						       (DMA_TX_SIZE - 1), chan);
2239 	}
2240 
2241 	/* set RX ring length */
2242 	if (priv->hw->dma->set_rx_ring_len) {
2243 		for (chan = 0; chan < rx_channels_count; chan++)
2244 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2245 						       (DMA_RX_SIZE - 1), chan);
2246 	}
2247 }
2248 
2249 /**
2250  *  stmmac_set_tx_queue_weight - Set TX queue weight
2251  *  @priv: driver private structure
2252  *  Description: It is used for setting TX queues weight
2253  */
2254 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2255 {
2256 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2257 	u32 weight;
2258 	u32 queue;
2259 
2260 	for (queue = 0; queue < tx_queues_count; queue++) {
2261 		weight = priv->plat->tx_queues_cfg[queue].weight;
2262 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2263 	}
2264 }
2265 
2266 /**
2267  *  stmmac_configure_cbs - Configure CBS in TX queue
2268  *  @priv: driver private structure
2269  *  Description: It is used for configuring CBS in AVB TX queues
2270  */
2271 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2272 {
2273 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2274 	u32 mode_to_use;
2275 	u32 queue;
2276 
2277 	/* queue 0 is reserved for legacy traffic */
2278 	for (queue = 1; queue < tx_queues_count; queue++) {
2279 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2280 		if (mode_to_use == MTL_QUEUE_DCB)
2281 			continue;
2282 
2283 		priv->hw->mac->config_cbs(priv->hw,
2284 				priv->plat->tx_queues_cfg[queue].send_slope,
2285 				priv->plat->tx_queues_cfg[queue].idle_slope,
2286 				priv->plat->tx_queues_cfg[queue].high_credit,
2287 				priv->plat->tx_queues_cfg[queue].low_credit,
2288 				queue);
2289 	}
2290 }
2291 
2292 /**
2293  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2294  *  @priv: driver private structure
2295  *  Description: It is used for mapping RX queues to RX dma channels
2296  */
2297 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2300 	u32 queue;
2301 	u32 chan;
2302 
2303 	for (queue = 0; queue < rx_queues_count; queue++) {
2304 		chan = priv->plat->rx_queues_cfg[queue].chan;
2305 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2306 	}
2307 }
2308 
2309 /**
2310  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2311  *  @priv: driver private structure
2312  *  Description: It is used for configuring the RX Queue Priority
2313  */
2314 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2315 {
2316 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2317 	u32 queue;
2318 	u32 prio;
2319 
2320 	for (queue = 0; queue < rx_queues_count; queue++) {
2321 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2322 			continue;
2323 
2324 		prio = priv->plat->rx_queues_cfg[queue].prio;
2325 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2326 	}
2327 }
2328 
2329 /**
2330  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2331  *  @priv: driver private structure
2332  *  Description: It is used for configuring the TX Queue Priority
2333  */
2334 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2335 {
2336 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2337 	u32 queue;
2338 	u32 prio;
2339 
2340 	for (queue = 0; queue < tx_queues_count; queue++) {
2341 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2342 			continue;
2343 
2344 		prio = priv->plat->tx_queues_cfg[queue].prio;
2345 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2346 	}
2347 }
2348 
2349 /**
2350  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2351  *  @priv: driver private structure
2352  *  Description: It is used for configuring the RX queue routing
2353  */
2354 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2355 {
2356 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2357 	u32 queue;
2358 	u8 packet;
2359 
2360 	for (queue = 0; queue < rx_queues_count; queue++) {
2361 		/* no specific packet type routing specified for the queue */
2362 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2363 			continue;
2364 
2365 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2366 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2367 	}
2368 }
2369 
2370 /**
2371  *  stmmac_mtl_configuration - Configure MTL
2372  *  @priv: driver private structure
2373  *  Description: It is used for configurring MTL
2374  */
2375 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2376 {
2377 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2378 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2379 
2380 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2381 		stmmac_set_tx_queue_weight(priv);
2382 
2383 	/* Configure MTL RX algorithms */
2384 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2385 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2386 						priv->plat->rx_sched_algorithm);
2387 
2388 	/* Configure MTL TX algorithms */
2389 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2390 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2391 						priv->plat->tx_sched_algorithm);
2392 
2393 	/* Configure CBS in AVB TX queues */
2394 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2395 		stmmac_configure_cbs(priv);
2396 
2397 	/* Map RX MTL to DMA channels */
2398 	if (priv->hw->mac->map_mtl_to_dma)
2399 		stmmac_rx_queue_dma_chan_map(priv);
2400 
2401 	/* Enable MAC RX Queues */
2402 	if (priv->hw->mac->rx_queue_enable)
2403 		stmmac_mac_enable_rx_queues(priv);
2404 
2405 	/* Set RX priorities */
2406 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2407 		stmmac_mac_config_rx_queues_prio(priv);
2408 
2409 	/* Set TX priorities */
2410 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2411 		stmmac_mac_config_tx_queues_prio(priv);
2412 
2413 	/* Set RX routing */
2414 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2415 		stmmac_mac_config_rx_queues_routing(priv);
2416 }
2417 
2418 /**
2419  * stmmac_hw_setup - setup mac in a usable state.
2420  *  @dev : pointer to the device structure.
2421  *  Description:
2422  *  this is the main function to setup the HW in a usable state because the
2423  *  dma engine is reset, the core registers are configured (e.g. AXI,
2424  *  Checksum features, timers). The DMA is ready to start receiving and
2425  *  transmitting.
2426  *  Return value:
2427  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2428  *  file on failure.
2429  */
2430 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2431 {
2432 	struct stmmac_priv *priv = netdev_priv(dev);
2433 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2434 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2435 	u32 chan;
2436 	int ret;
2437 
2438 	/* DMA initialization and SW reset */
2439 	ret = stmmac_init_dma_engine(priv);
2440 	if (ret < 0) {
2441 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2442 			   __func__);
2443 		return ret;
2444 	}
2445 
2446 	/* Copy the MAC addr into the HW  */
2447 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2448 
2449 	/* PS and related bits will be programmed according to the speed */
2450 	if (priv->hw->pcs) {
2451 		int speed = priv->plat->mac_port_sel_speed;
2452 
2453 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2454 		    (speed == SPEED_1000)) {
2455 			priv->hw->ps = speed;
2456 		} else {
2457 			dev_warn(priv->device, "invalid port speed\n");
2458 			priv->hw->ps = 0;
2459 		}
2460 	}
2461 
2462 	/* Initialize the MAC Core */
2463 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2464 
2465 	/* Initialize MTL*/
2466 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2467 		stmmac_mtl_configuration(priv);
2468 
2469 	ret = priv->hw->mac->rx_ipc(priv->hw);
2470 	if (!ret) {
2471 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2472 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2473 		priv->hw->rx_csum = 0;
2474 	}
2475 
2476 	/* Enable the MAC Rx/Tx */
2477 	priv->hw->mac->set_mac(priv->ioaddr, true);
2478 
2479 	/* Set the HW DMA mode and the COE */
2480 	stmmac_dma_operation_mode(priv);
2481 
2482 	stmmac_mmc_setup(priv);
2483 
2484 	if (init_ptp) {
2485 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2486 		if (ret < 0)
2487 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2488 
2489 		ret = stmmac_init_ptp(priv);
2490 		if (ret == -EOPNOTSUPP)
2491 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2492 		else if (ret)
2493 			netdev_warn(priv->dev, "PTP init failed\n");
2494 	}
2495 
2496 #ifdef CONFIG_DEBUG_FS
2497 	ret = stmmac_init_fs(dev);
2498 	if (ret < 0)
2499 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2500 			    __func__);
2501 #endif
2502 	/* Start the ball rolling... */
2503 	stmmac_start_all_dma(priv);
2504 
2505 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2506 
2507 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2508 		priv->rx_riwt = MAX_DMA_RIWT;
2509 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2510 	}
2511 
2512 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2513 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2514 
2515 	/* set TX and RX rings length */
2516 	stmmac_set_rings_length(priv);
2517 
2518 	/* Enable TSO */
2519 	if (priv->tso) {
2520 		for (chan = 0; chan < tx_cnt; chan++)
2521 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2522 	}
2523 
2524 	return 0;
2525 }
2526 
2527 static void stmmac_hw_teardown(struct net_device *dev)
2528 {
2529 	struct stmmac_priv *priv = netdev_priv(dev);
2530 
2531 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2532 }
2533 
2534 /**
2535  *  stmmac_open - open entry point of the driver
2536  *  @dev : pointer to the device structure.
2537  *  Description:
2538  *  This function is the open entry point of the driver.
2539  *  Return value:
2540  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2541  *  file on failure.
2542  */
2543 static int stmmac_open(struct net_device *dev)
2544 {
2545 	struct stmmac_priv *priv = netdev_priv(dev);
2546 	int ret;
2547 
2548 	stmmac_check_ether_addr(priv);
2549 
2550 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2551 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2552 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2553 		ret = stmmac_init_phy(dev);
2554 		if (ret) {
2555 			netdev_err(priv->dev,
2556 				   "%s: Cannot attach to PHY (error: %d)\n",
2557 				   __func__, ret);
2558 			return ret;
2559 		}
2560 	}
2561 
2562 	/* Extra statistics */
2563 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2564 	priv->xstats.threshold = tc;
2565 
2566 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2567 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2568 
2569 	ret = alloc_dma_desc_resources(priv);
2570 	if (ret < 0) {
2571 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2572 			   __func__);
2573 		goto dma_desc_error;
2574 	}
2575 
2576 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2577 	if (ret < 0) {
2578 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2579 			   __func__);
2580 		goto init_error;
2581 	}
2582 
2583 	ret = stmmac_hw_setup(dev, true);
2584 	if (ret < 0) {
2585 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2586 		goto init_error;
2587 	}
2588 
2589 	stmmac_init_tx_coalesce(priv);
2590 
2591 	if (dev->phydev)
2592 		phy_start(dev->phydev);
2593 
2594 	/* Request the IRQ lines */
2595 	ret = request_irq(dev->irq, stmmac_interrupt,
2596 			  IRQF_SHARED, dev->name, dev);
2597 	if (unlikely(ret < 0)) {
2598 		netdev_err(priv->dev,
2599 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2600 			   __func__, dev->irq, ret);
2601 		goto irq_error;
2602 	}
2603 
2604 	/* Request the Wake IRQ in case of another line is used for WoL */
2605 	if (priv->wol_irq != dev->irq) {
2606 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2607 				  IRQF_SHARED, dev->name, dev);
2608 		if (unlikely(ret < 0)) {
2609 			netdev_err(priv->dev,
2610 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2611 				   __func__, priv->wol_irq, ret);
2612 			goto wolirq_error;
2613 		}
2614 	}
2615 
2616 	/* Request the IRQ lines */
2617 	if (priv->lpi_irq > 0) {
2618 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2619 				  dev->name, dev);
2620 		if (unlikely(ret < 0)) {
2621 			netdev_err(priv->dev,
2622 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2623 				   __func__, priv->lpi_irq, ret);
2624 			goto lpiirq_error;
2625 		}
2626 	}
2627 
2628 	stmmac_enable_all_queues(priv);
2629 	stmmac_start_all_queues(priv);
2630 
2631 	return 0;
2632 
2633 lpiirq_error:
2634 	if (priv->wol_irq != dev->irq)
2635 		free_irq(priv->wol_irq, dev);
2636 wolirq_error:
2637 	free_irq(dev->irq, dev);
2638 irq_error:
2639 	if (dev->phydev)
2640 		phy_stop(dev->phydev);
2641 
2642 	del_timer_sync(&priv->txtimer);
2643 	stmmac_hw_teardown(dev);
2644 init_error:
2645 	free_dma_desc_resources(priv);
2646 dma_desc_error:
2647 	if (dev->phydev)
2648 		phy_disconnect(dev->phydev);
2649 
2650 	return ret;
2651 }
2652 
2653 /**
2654  *  stmmac_release - close entry point of the driver
2655  *  @dev : device pointer.
2656  *  Description:
2657  *  This is the stop entry point of the driver.
2658  */
2659 static int stmmac_release(struct net_device *dev)
2660 {
2661 	struct stmmac_priv *priv = netdev_priv(dev);
2662 
2663 	if (priv->eee_enabled)
2664 		del_timer_sync(&priv->eee_ctrl_timer);
2665 
2666 	/* Stop and disconnect the PHY */
2667 	if (dev->phydev) {
2668 		phy_stop(dev->phydev);
2669 		phy_disconnect(dev->phydev);
2670 	}
2671 
2672 	stmmac_stop_all_queues(priv);
2673 
2674 	stmmac_disable_all_queues(priv);
2675 
2676 	del_timer_sync(&priv->txtimer);
2677 
2678 	/* Free the IRQ lines */
2679 	free_irq(dev->irq, dev);
2680 	if (priv->wol_irq != dev->irq)
2681 		free_irq(priv->wol_irq, dev);
2682 	if (priv->lpi_irq > 0)
2683 		free_irq(priv->lpi_irq, dev);
2684 
2685 	/* Stop TX/RX DMA and clear the descriptors */
2686 	stmmac_stop_all_dma(priv);
2687 
2688 	/* Release and free the Rx/Tx resources */
2689 	free_dma_desc_resources(priv);
2690 
2691 	/* Disable the MAC Rx/Tx */
2692 	priv->hw->mac->set_mac(priv->ioaddr, false);
2693 
2694 	netif_carrier_off(dev);
2695 
2696 #ifdef CONFIG_DEBUG_FS
2697 	stmmac_exit_fs(dev);
2698 #endif
2699 
2700 	stmmac_release_ptp(priv);
2701 
2702 	return 0;
2703 }
2704 
2705 /**
2706  *  stmmac_tso_allocator - close entry point of the driver
2707  *  @priv: driver private structure
2708  *  @des: buffer start address
2709  *  @total_len: total length to fill in descriptors
2710  *  @last_segmant: condition for the last descriptor
2711  *  @queue: TX queue index
2712  *  Description:
2713  *  This function fills descriptor and request new descriptors according to
2714  *  buffer length to fill
2715  */
2716 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2717 				 int total_len, bool last_segment, u32 queue)
2718 {
2719 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2720 	struct dma_desc *desc;
2721 	u32 buff_size;
2722 	int tmp_len;
2723 
2724 	tmp_len = total_len;
2725 
2726 	while (tmp_len > 0) {
2727 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2728 		desc = tx_q->dma_tx + tx_q->cur_tx;
2729 
2730 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2731 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2732 			    TSO_MAX_BUFF_SIZE : tmp_len;
2733 
2734 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2735 			0, 1,
2736 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2737 			0, 0);
2738 
2739 		tmp_len -= TSO_MAX_BUFF_SIZE;
2740 	}
2741 }
2742 
2743 /**
2744  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2745  *  @skb : the socket buffer
2746  *  @dev : device pointer
2747  *  Description: this is the transmit function that is called on TSO frames
2748  *  (support available on GMAC4 and newer chips).
2749  *  Diagram below show the ring programming in case of TSO frames:
2750  *
2751  *  First Descriptor
2752  *   --------
2753  *   | DES0 |---> buffer1 = L2/L3/L4 header
2754  *   | DES1 |---> TCP Payload (can continue on next descr...)
2755  *   | DES2 |---> buffer 1 and 2 len
2756  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2757  *   --------
2758  *	|
2759  *     ...
2760  *	|
2761  *   --------
2762  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2763  *   | DES1 | --|
2764  *   | DES2 | --> buffer 1 and 2 len
2765  *   | DES3 |
2766  *   --------
2767  *
2768  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2769  */
2770 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2771 {
2772 	struct dma_desc *desc, *first, *mss_desc = NULL;
2773 	struct stmmac_priv *priv = netdev_priv(dev);
2774 	int nfrags = skb_shinfo(skb)->nr_frags;
2775 	u32 queue = skb_get_queue_mapping(skb);
2776 	unsigned int first_entry, des;
2777 	struct stmmac_tx_queue *tx_q;
2778 	int tmp_pay_len = 0;
2779 	u32 pay_len, mss;
2780 	u8 proto_hdr_len;
2781 	int i;
2782 
2783 	tx_q = &priv->tx_queue[queue];
2784 
2785 	/* Compute header lengths */
2786 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2787 
2788 	/* Desc availability based on threshold should be enough safe */
2789 	if (unlikely(stmmac_tx_avail(priv, queue) <
2790 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2791 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2792 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2793 								queue));
2794 			/* This is a hard error, log it. */
2795 			netdev_err(priv->dev,
2796 				   "%s: Tx Ring full when queue awake\n",
2797 				   __func__);
2798 		}
2799 		return NETDEV_TX_BUSY;
2800 	}
2801 
2802 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2803 
2804 	mss = skb_shinfo(skb)->gso_size;
2805 
2806 	/* set new MSS value if needed */
2807 	if (mss != priv->mss) {
2808 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2809 		priv->hw->desc->set_mss(mss_desc, mss);
2810 		priv->mss = mss;
2811 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2812 	}
2813 
2814 	if (netif_msg_tx_queued(priv)) {
2815 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2816 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2817 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2818 			skb->data_len);
2819 	}
2820 
2821 	first_entry = tx_q->cur_tx;
2822 
2823 	desc = tx_q->dma_tx + first_entry;
2824 	first = desc;
2825 
2826 	/* first descriptor: fill Headers on Buf1 */
2827 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2828 			     DMA_TO_DEVICE);
2829 	if (dma_mapping_error(priv->device, des))
2830 		goto dma_map_err;
2831 
2832 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2833 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2834 	tx_q->tx_skbuff[first_entry] = skb;
2835 
2836 	first->des0 = cpu_to_le32(des);
2837 
2838 	/* Fill start of payload in buff2 of first descriptor */
2839 	if (pay_len)
2840 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2841 
2842 	/* If needed take extra descriptors to fill the remaining payload */
2843 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2844 
2845 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2846 
2847 	/* Prepare fragments */
2848 	for (i = 0; i < nfrags; i++) {
2849 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2850 
2851 		des = skb_frag_dma_map(priv->device, frag, 0,
2852 				       skb_frag_size(frag),
2853 				       DMA_TO_DEVICE);
2854 		if (dma_mapping_error(priv->device, des))
2855 			goto dma_map_err;
2856 
2857 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2858 				     (i == nfrags - 1), queue);
2859 
2860 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2861 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2862 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2863 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2864 	}
2865 
2866 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2867 
2868 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2869 
2870 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2871 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2872 			  __func__);
2873 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2874 	}
2875 
2876 	dev->stats.tx_bytes += skb->len;
2877 	priv->xstats.tx_tso_frames++;
2878 	priv->xstats.tx_tso_nfrags += nfrags;
2879 
2880 	/* Manage tx mitigation */
2881 	priv->tx_count_frames += nfrags + 1;
2882 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2883 		mod_timer(&priv->txtimer,
2884 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2885 	} else {
2886 		priv->tx_count_frames = 0;
2887 		priv->hw->desc->set_tx_ic(desc);
2888 		priv->xstats.tx_set_ic_bit++;
2889 	}
2890 
2891 	if (!priv->hwts_tx_en)
2892 		skb_tx_timestamp(skb);
2893 
2894 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2895 		     priv->hwts_tx_en)) {
2896 		/* declare that device is doing timestamping */
2897 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2898 		priv->hw->desc->enable_tx_timestamp(first);
2899 	}
2900 
2901 	/* Complete the first descriptor before granting the DMA */
2902 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2903 			proto_hdr_len,
2904 			pay_len,
2905 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2906 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2907 
2908 	/* If context desc is used to change MSS */
2909 	if (mss_desc)
2910 		priv->hw->desc->set_tx_owner(mss_desc);
2911 
2912 	/* The own bit must be the latest setting done when prepare the
2913 	 * descriptor and then barrier is needed to make sure that
2914 	 * all is coherent before granting the DMA engine.
2915 	 */
2916 	dma_wmb();
2917 
2918 	if (netif_msg_pktdata(priv)) {
2919 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2920 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2921 			tx_q->cur_tx, first, nfrags);
2922 
2923 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2924 					     0);
2925 
2926 		pr_info(">>> frame to be transmitted: ");
2927 		print_pkt(skb->data, skb_headlen(skb));
2928 	}
2929 
2930 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2931 
2932 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2933 				       queue);
2934 
2935 	return NETDEV_TX_OK;
2936 
2937 dma_map_err:
2938 	dev_err(priv->device, "Tx dma map failed\n");
2939 	dev_kfree_skb(skb);
2940 	priv->dev->stats.tx_dropped++;
2941 	return NETDEV_TX_OK;
2942 }
2943 
2944 /**
2945  *  stmmac_xmit - Tx entry point of the driver
2946  *  @skb : the socket buffer
2947  *  @dev : device pointer
2948  *  Description : this is the tx entry point of the driver.
2949  *  It programs the chain or the ring and supports oversized frames
2950  *  and SG feature.
2951  */
2952 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2953 {
2954 	struct stmmac_priv *priv = netdev_priv(dev);
2955 	unsigned int nopaged_len = skb_headlen(skb);
2956 	int i, csum_insertion = 0, is_jumbo = 0;
2957 	u32 queue = skb_get_queue_mapping(skb);
2958 	int nfrags = skb_shinfo(skb)->nr_frags;
2959 	int entry;
2960 	unsigned int first_entry;
2961 	struct dma_desc *desc, *first;
2962 	struct stmmac_tx_queue *tx_q;
2963 	unsigned int enh_desc;
2964 	unsigned int des;
2965 
2966 	tx_q = &priv->tx_queue[queue];
2967 
2968 	/* Manage oversized TCP frames for GMAC4 device */
2969 	if (skb_is_gso(skb) && priv->tso) {
2970 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2971 			return stmmac_tso_xmit(skb, dev);
2972 	}
2973 
2974 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2975 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2976 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2977 								queue));
2978 			/* This is a hard error, log it. */
2979 			netdev_err(priv->dev,
2980 				   "%s: Tx Ring full when queue awake\n",
2981 				   __func__);
2982 		}
2983 		return NETDEV_TX_BUSY;
2984 	}
2985 
2986 	if (priv->tx_path_in_lpi_mode)
2987 		stmmac_disable_eee_mode(priv);
2988 
2989 	entry = tx_q->cur_tx;
2990 	first_entry = entry;
2991 
2992 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2993 
2994 	if (likely(priv->extend_desc))
2995 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2996 	else
2997 		desc = tx_q->dma_tx + entry;
2998 
2999 	first = desc;
3000 
3001 	tx_q->tx_skbuff[first_entry] = skb;
3002 
3003 	enh_desc = priv->plat->enh_desc;
3004 	/* To program the descriptors according to the size of the frame */
3005 	if (enh_desc)
3006 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3007 
3008 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3009 					 DWMAC_CORE_4_00)) {
3010 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3011 		if (unlikely(entry < 0))
3012 			goto dma_map_err;
3013 	}
3014 
3015 	for (i = 0; i < nfrags; i++) {
3016 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3017 		int len = skb_frag_size(frag);
3018 		bool last_segment = (i == (nfrags - 1));
3019 
3020 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3021 
3022 		if (likely(priv->extend_desc))
3023 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3024 		else
3025 			desc = tx_q->dma_tx + entry;
3026 
3027 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3028 				       DMA_TO_DEVICE);
3029 		if (dma_mapping_error(priv->device, des))
3030 			goto dma_map_err; /* should reuse desc w/o issues */
3031 
3032 		tx_q->tx_skbuff[entry] = NULL;
3033 
3034 		tx_q->tx_skbuff_dma[entry].buf = des;
3035 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3036 			desc->des0 = cpu_to_le32(des);
3037 		else
3038 			desc->des2 = cpu_to_le32(des);
3039 
3040 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3041 		tx_q->tx_skbuff_dma[entry].len = len;
3042 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3043 
3044 		/* Prepare the descriptor and set the own bit too */
3045 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3046 						priv->mode, 1, last_segment,
3047 						skb->len);
3048 	}
3049 
3050 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3051 
3052 	tx_q->cur_tx = entry;
3053 
3054 	if (netif_msg_pktdata(priv)) {
3055 		void *tx_head;
3056 
3057 		netdev_dbg(priv->dev,
3058 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3059 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3060 			   entry, first, nfrags);
3061 
3062 		if (priv->extend_desc)
3063 			tx_head = (void *)tx_q->dma_etx;
3064 		else
3065 			tx_head = (void *)tx_q->dma_tx;
3066 
3067 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3068 
3069 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3070 		print_pkt(skb->data, skb->len);
3071 	}
3072 
3073 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3074 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3075 			  __func__);
3076 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3077 	}
3078 
3079 	dev->stats.tx_bytes += skb->len;
3080 
3081 	/* According to the coalesce parameter the IC bit for the latest
3082 	 * segment is reset and the timer re-started to clean the tx status.
3083 	 * This approach takes care about the fragments: desc is the first
3084 	 * element in case of no SG.
3085 	 */
3086 	priv->tx_count_frames += nfrags + 1;
3087 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3088 		mod_timer(&priv->txtimer,
3089 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3090 	} else {
3091 		priv->tx_count_frames = 0;
3092 		priv->hw->desc->set_tx_ic(desc);
3093 		priv->xstats.tx_set_ic_bit++;
3094 	}
3095 
3096 	if (!priv->hwts_tx_en)
3097 		skb_tx_timestamp(skb);
3098 
3099 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3100 	 * problems because all the descriptors are actually ready to be
3101 	 * passed to the DMA engine.
3102 	 */
3103 	if (likely(!is_jumbo)) {
3104 		bool last_segment = (nfrags == 0);
3105 
3106 		des = dma_map_single(priv->device, skb->data,
3107 				     nopaged_len, DMA_TO_DEVICE);
3108 		if (dma_mapping_error(priv->device, des))
3109 			goto dma_map_err;
3110 
3111 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3112 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3113 			first->des0 = cpu_to_le32(des);
3114 		else
3115 			first->des2 = cpu_to_le32(des);
3116 
3117 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3118 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3119 
3120 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3121 			     priv->hwts_tx_en)) {
3122 			/* declare that device is doing timestamping */
3123 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3124 			priv->hw->desc->enable_tx_timestamp(first);
3125 		}
3126 
3127 		/* Prepare the first descriptor setting the OWN bit too */
3128 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3129 						csum_insertion, priv->mode, 1,
3130 						last_segment, skb->len);
3131 
3132 		/* The own bit must be the latest setting done when prepare the
3133 		 * descriptor and then barrier is needed to make sure that
3134 		 * all is coherent before granting the DMA engine.
3135 		 */
3136 		dma_wmb();
3137 	}
3138 
3139 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3140 
3141 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3142 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3143 	else
3144 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3145 					       queue);
3146 
3147 	return NETDEV_TX_OK;
3148 
3149 dma_map_err:
3150 	netdev_err(priv->dev, "Tx DMA map failed\n");
3151 	dev_kfree_skb(skb);
3152 	priv->dev->stats.tx_dropped++;
3153 	return NETDEV_TX_OK;
3154 }
3155 
3156 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3157 {
3158 	struct ethhdr *ehdr;
3159 	u16 vlanid;
3160 
3161 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3162 	    NETIF_F_HW_VLAN_CTAG_RX &&
3163 	    !__vlan_get_tag(skb, &vlanid)) {
3164 		/* pop the vlan tag */
3165 		ehdr = (struct ethhdr *)skb->data;
3166 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3167 		skb_pull(skb, VLAN_HLEN);
3168 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3169 	}
3170 }
3171 
3172 
3173 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3174 {
3175 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3176 		return 0;
3177 
3178 	return 1;
3179 }
3180 
3181 /**
3182  * stmmac_rx_refill - refill used skb preallocated buffers
3183  * @priv: driver private structure
3184  * @queue: RX queue index
3185  * Description : this is to reallocate the skb for the reception process
3186  * that is based on zero-copy.
3187  */
3188 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3189 {
3190 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3191 	int dirty = stmmac_rx_dirty(priv, queue);
3192 	unsigned int entry = rx_q->dirty_rx;
3193 
3194 	int bfsize = priv->dma_buf_sz;
3195 
3196 	while (dirty-- > 0) {
3197 		struct dma_desc *p;
3198 
3199 		if (priv->extend_desc)
3200 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3201 		else
3202 			p = rx_q->dma_rx + entry;
3203 
3204 		if (likely(!rx_q->rx_skbuff[entry])) {
3205 			struct sk_buff *skb;
3206 
3207 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3208 			if (unlikely(!skb)) {
3209 				/* so for a while no zero-copy! */
3210 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3211 				if (unlikely(net_ratelimit()))
3212 					dev_err(priv->device,
3213 						"fail to alloc skb entry %d\n",
3214 						entry);
3215 				break;
3216 			}
3217 
3218 			rx_q->rx_skbuff[entry] = skb;
3219 			rx_q->rx_skbuff_dma[entry] =
3220 			    dma_map_single(priv->device, skb->data, bfsize,
3221 					   DMA_FROM_DEVICE);
3222 			if (dma_mapping_error(priv->device,
3223 					      rx_q->rx_skbuff_dma[entry])) {
3224 				netdev_err(priv->dev, "Rx DMA map failed\n");
3225 				dev_kfree_skb(skb);
3226 				break;
3227 			}
3228 
3229 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3230 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3231 				p->des1 = 0;
3232 			} else {
3233 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3234 			}
3235 			if (priv->hw->mode->refill_desc3)
3236 				priv->hw->mode->refill_desc3(rx_q, p);
3237 
3238 			if (rx_q->rx_zeroc_thresh > 0)
3239 				rx_q->rx_zeroc_thresh--;
3240 
3241 			netif_dbg(priv, rx_status, priv->dev,
3242 				  "refill entry #%d\n", entry);
3243 		}
3244 		dma_wmb();
3245 
3246 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3247 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3248 		else
3249 			priv->hw->desc->set_rx_owner(p);
3250 
3251 		dma_wmb();
3252 
3253 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3254 	}
3255 	rx_q->dirty_rx = entry;
3256 }
3257 
3258 /**
3259  * stmmac_rx - manage the receive process
3260  * @priv: driver private structure
3261  * @limit: napi bugget
3262  * @queue: RX queue index.
3263  * Description :  this the function called by the napi poll method.
3264  * It gets all the frames inside the ring.
3265  */
3266 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3267 {
3268 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3269 	unsigned int entry = rx_q->cur_rx;
3270 	int coe = priv->hw->rx_csum;
3271 	unsigned int next_entry;
3272 	unsigned int count = 0;
3273 
3274 	if (netif_msg_rx_status(priv)) {
3275 		void *rx_head;
3276 
3277 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3278 		if (priv->extend_desc)
3279 			rx_head = (void *)rx_q->dma_erx;
3280 		else
3281 			rx_head = (void *)rx_q->dma_rx;
3282 
3283 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3284 	}
3285 	while (count < limit) {
3286 		int status;
3287 		struct dma_desc *p;
3288 		struct dma_desc *np;
3289 
3290 		if (priv->extend_desc)
3291 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3292 		else
3293 			p = rx_q->dma_rx + entry;
3294 
3295 		/* read the status of the incoming frame */
3296 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3297 						   &priv->xstats, p);
3298 		/* check if managed by the DMA otherwise go ahead */
3299 		if (unlikely(status & dma_own))
3300 			break;
3301 
3302 		count++;
3303 
3304 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3305 		next_entry = rx_q->cur_rx;
3306 
3307 		if (priv->extend_desc)
3308 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3309 		else
3310 			np = rx_q->dma_rx + next_entry;
3311 
3312 		prefetch(np);
3313 
3314 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3315 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3316 							   &priv->xstats,
3317 							   rx_q->dma_erx +
3318 							   entry);
3319 		if (unlikely(status == discard_frame)) {
3320 			priv->dev->stats.rx_errors++;
3321 			if (priv->hwts_rx_en && !priv->extend_desc) {
3322 				/* DESC2 & DESC3 will be overwritten by device
3323 				 * with timestamp value, hence reinitialize
3324 				 * them in stmmac_rx_refill() function so that
3325 				 * device can reuse it.
3326 				 */
3327 				rx_q->rx_skbuff[entry] = NULL;
3328 				dma_unmap_single(priv->device,
3329 						 rx_q->rx_skbuff_dma[entry],
3330 						 priv->dma_buf_sz,
3331 						 DMA_FROM_DEVICE);
3332 			}
3333 		} else {
3334 			struct sk_buff *skb;
3335 			int frame_len;
3336 			unsigned int des;
3337 
3338 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3339 				des = le32_to_cpu(p->des0);
3340 			else
3341 				des = le32_to_cpu(p->des2);
3342 
3343 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3344 
3345 			/*  If frame length is greater than skb buffer size
3346 			 *  (preallocated during init) then the packet is
3347 			 *  ignored
3348 			 */
3349 			if (frame_len > priv->dma_buf_sz) {
3350 				netdev_err(priv->dev,
3351 					   "len %d larger than size (%d)\n",
3352 					   frame_len, priv->dma_buf_sz);
3353 				priv->dev->stats.rx_length_errors++;
3354 				break;
3355 			}
3356 
3357 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3358 			 * Type frames (LLC/LLC-SNAP)
3359 			 */
3360 			if (unlikely(status != llc_snap))
3361 				frame_len -= ETH_FCS_LEN;
3362 
3363 			if (netif_msg_rx_status(priv)) {
3364 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3365 					   p, entry, des);
3366 				if (frame_len > ETH_FRAME_LEN)
3367 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3368 						   frame_len, status);
3369 			}
3370 
3371 			/* The zero-copy is always used for all the sizes
3372 			 * in case of GMAC4 because it needs
3373 			 * to refill the used descriptors, always.
3374 			 */
3375 			if (unlikely(!priv->plat->has_gmac4 &&
3376 				     ((frame_len < priv->rx_copybreak) ||
3377 				     stmmac_rx_threshold_count(rx_q)))) {
3378 				skb = netdev_alloc_skb_ip_align(priv->dev,
3379 								frame_len);
3380 				if (unlikely(!skb)) {
3381 					if (net_ratelimit())
3382 						dev_warn(priv->device,
3383 							 "packet dropped\n");
3384 					priv->dev->stats.rx_dropped++;
3385 					break;
3386 				}
3387 
3388 				dma_sync_single_for_cpu(priv->device,
3389 							rx_q->rx_skbuff_dma
3390 							[entry], frame_len,
3391 							DMA_FROM_DEVICE);
3392 				skb_copy_to_linear_data(skb,
3393 							rx_q->
3394 							rx_skbuff[entry]->data,
3395 							frame_len);
3396 
3397 				skb_put(skb, frame_len);
3398 				dma_sync_single_for_device(priv->device,
3399 							   rx_q->rx_skbuff_dma
3400 							   [entry], frame_len,
3401 							   DMA_FROM_DEVICE);
3402 			} else {
3403 				skb = rx_q->rx_skbuff[entry];
3404 				if (unlikely(!skb)) {
3405 					netdev_err(priv->dev,
3406 						   "%s: Inconsistent Rx chain\n",
3407 						   priv->dev->name);
3408 					priv->dev->stats.rx_dropped++;
3409 					break;
3410 				}
3411 				prefetch(skb->data - NET_IP_ALIGN);
3412 				rx_q->rx_skbuff[entry] = NULL;
3413 				rx_q->rx_zeroc_thresh++;
3414 
3415 				skb_put(skb, frame_len);
3416 				dma_unmap_single(priv->device,
3417 						 rx_q->rx_skbuff_dma[entry],
3418 						 priv->dma_buf_sz,
3419 						 DMA_FROM_DEVICE);
3420 			}
3421 
3422 			if (netif_msg_pktdata(priv)) {
3423 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3424 					   frame_len);
3425 				print_pkt(skb->data, frame_len);
3426 			}
3427 
3428 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3429 
3430 			stmmac_rx_vlan(priv->dev, skb);
3431 
3432 			skb->protocol = eth_type_trans(skb, priv->dev);
3433 
3434 			if (unlikely(!coe))
3435 				skb_checksum_none_assert(skb);
3436 			else
3437 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3438 
3439 			napi_gro_receive(&rx_q->napi, skb);
3440 
3441 			priv->dev->stats.rx_packets++;
3442 			priv->dev->stats.rx_bytes += frame_len;
3443 		}
3444 		entry = next_entry;
3445 	}
3446 
3447 	stmmac_rx_refill(priv, queue);
3448 
3449 	priv->xstats.rx_pkt_n += count;
3450 
3451 	return count;
3452 }
3453 
3454 /**
3455  *  stmmac_poll - stmmac poll method (NAPI)
3456  *  @napi : pointer to the napi structure.
3457  *  @budget : maximum number of packets that the current CPU can receive from
3458  *	      all interfaces.
3459  *  Description :
3460  *  To look at the incoming frames and clear the tx resources.
3461  */
3462 static int stmmac_poll(struct napi_struct *napi, int budget)
3463 {
3464 	struct stmmac_rx_queue *rx_q =
3465 		container_of(napi, struct stmmac_rx_queue, napi);
3466 	struct stmmac_priv *priv = rx_q->priv_data;
3467 	u32 tx_count = priv->plat->tx_queues_to_use;
3468 	u32 chan = rx_q->queue_index;
3469 	int work_done = 0;
3470 	u32 queue;
3471 
3472 	priv->xstats.napi_poll++;
3473 
3474 	/* check all the queues */
3475 	for (queue = 0; queue < tx_count; queue++)
3476 		stmmac_tx_clean(priv, queue);
3477 
3478 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3479 	if (work_done < budget) {
3480 		napi_complete_done(napi, work_done);
3481 		stmmac_enable_dma_irq(priv, chan);
3482 	}
3483 	return work_done;
3484 }
3485 
3486 /**
3487  *  stmmac_tx_timeout
3488  *  @dev : Pointer to net device structure
3489  *  Description: this function is called when a packet transmission fails to
3490  *   complete within a reasonable time. The driver will mark the error in the
3491  *   netdev structure and arrange for the device to be reset to a sane state
3492  *   in order to transmit a new packet.
3493  */
3494 static void stmmac_tx_timeout(struct net_device *dev)
3495 {
3496 	struct stmmac_priv *priv = netdev_priv(dev);
3497 	u32 tx_count = priv->plat->tx_queues_to_use;
3498 	u32 chan;
3499 
3500 	/* Clear Tx resources and restart transmitting again */
3501 	for (chan = 0; chan < tx_count; chan++)
3502 		stmmac_tx_err(priv, chan);
3503 }
3504 
3505 /**
3506  *  stmmac_set_rx_mode - entry point for multicast addressing
3507  *  @dev : pointer to the device structure
3508  *  Description:
3509  *  This function is a driver entry point which gets called by the kernel
3510  *  whenever multicast addresses must be enabled/disabled.
3511  *  Return value:
3512  *  void.
3513  */
3514 static void stmmac_set_rx_mode(struct net_device *dev)
3515 {
3516 	struct stmmac_priv *priv = netdev_priv(dev);
3517 
3518 	priv->hw->mac->set_filter(priv->hw, dev);
3519 }
3520 
3521 /**
3522  *  stmmac_change_mtu - entry point to change MTU size for the device.
3523  *  @dev : device pointer.
3524  *  @new_mtu : the new MTU size for the device.
3525  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3526  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3527  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3528  *  Return value:
3529  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3530  *  file on failure.
3531  */
3532 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3533 {
3534 	struct stmmac_priv *priv = netdev_priv(dev);
3535 
3536 	if (netif_running(dev)) {
3537 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3538 		return -EBUSY;
3539 	}
3540 
3541 	dev->mtu = new_mtu;
3542 
3543 	netdev_update_features(dev);
3544 
3545 	return 0;
3546 }
3547 
3548 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3549 					     netdev_features_t features)
3550 {
3551 	struct stmmac_priv *priv = netdev_priv(dev);
3552 
3553 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3554 		features &= ~NETIF_F_RXCSUM;
3555 
3556 	if (!priv->plat->tx_coe)
3557 		features &= ~NETIF_F_CSUM_MASK;
3558 
3559 	/* Some GMAC devices have a bugged Jumbo frame support that
3560 	 * needs to have the Tx COE disabled for oversized frames
3561 	 * (due to limited buffer sizes). In this case we disable
3562 	 * the TX csum insertion in the TDES and not use SF.
3563 	 */
3564 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3565 		features &= ~NETIF_F_CSUM_MASK;
3566 
3567 	/* Disable tso if asked by ethtool */
3568 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3569 		if (features & NETIF_F_TSO)
3570 			priv->tso = true;
3571 		else
3572 			priv->tso = false;
3573 	}
3574 
3575 	return features;
3576 }
3577 
3578 static int stmmac_set_features(struct net_device *netdev,
3579 			       netdev_features_t features)
3580 {
3581 	struct stmmac_priv *priv = netdev_priv(netdev);
3582 
3583 	/* Keep the COE Type in case of csum is supporting */
3584 	if (features & NETIF_F_RXCSUM)
3585 		priv->hw->rx_csum = priv->plat->rx_coe;
3586 	else
3587 		priv->hw->rx_csum = 0;
3588 	/* No check needed because rx_coe has been set before and it will be
3589 	 * fixed in case of issue.
3590 	 */
3591 	priv->hw->mac->rx_ipc(priv->hw);
3592 
3593 	return 0;
3594 }
3595 
3596 /**
3597  *  stmmac_interrupt - main ISR
3598  *  @irq: interrupt number.
3599  *  @dev_id: to pass the net device pointer.
3600  *  Description: this is the main driver interrupt service routine.
3601  *  It can call:
3602  *  o DMA service routine (to manage incoming frame reception and transmission
3603  *    status)
3604  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3605  *    interrupts.
3606  */
3607 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3608 {
3609 	struct net_device *dev = (struct net_device *)dev_id;
3610 	struct stmmac_priv *priv = netdev_priv(dev);
3611 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3612 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3613 	u32 queues_count;
3614 	u32 queue;
3615 
3616 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3617 
3618 	if (priv->irq_wake)
3619 		pm_wakeup_event(priv->device, 0);
3620 
3621 	if (unlikely(!dev)) {
3622 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3623 		return IRQ_NONE;
3624 	}
3625 
3626 	/* To handle GMAC own interrupts */
3627 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3628 		int status = priv->hw->mac->host_irq_status(priv->hw,
3629 							    &priv->xstats);
3630 
3631 		if (unlikely(status)) {
3632 			/* For LPI we need to save the tx status */
3633 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3634 				priv->tx_path_in_lpi_mode = true;
3635 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3636 				priv->tx_path_in_lpi_mode = false;
3637 		}
3638 
3639 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3640 			for (queue = 0; queue < queues_count; queue++) {
3641 				struct stmmac_rx_queue *rx_q =
3642 				&priv->rx_queue[queue];
3643 
3644 				status |=
3645 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3646 								   queue);
3647 
3648 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3649 				    priv->hw->dma->set_rx_tail_ptr)
3650 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3651 								rx_q->rx_tail_addr,
3652 								queue);
3653 			}
3654 		}
3655 
3656 		/* PCS link status */
3657 		if (priv->hw->pcs) {
3658 			if (priv->xstats.pcs_link)
3659 				netif_carrier_on(dev);
3660 			else
3661 				netif_carrier_off(dev);
3662 		}
3663 	}
3664 
3665 	/* To handle DMA interrupts */
3666 	stmmac_dma_interrupt(priv);
3667 
3668 	return IRQ_HANDLED;
3669 }
3670 
3671 #ifdef CONFIG_NET_POLL_CONTROLLER
3672 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3673  * to allow network I/O with interrupts disabled.
3674  */
3675 static void stmmac_poll_controller(struct net_device *dev)
3676 {
3677 	disable_irq(dev->irq);
3678 	stmmac_interrupt(dev->irq, dev);
3679 	enable_irq(dev->irq);
3680 }
3681 #endif
3682 
3683 /**
3684  *  stmmac_ioctl - Entry point for the Ioctl
3685  *  @dev: Device pointer.
3686  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3687  *  a proprietary structure used to pass information to the driver.
3688  *  @cmd: IOCTL command
3689  *  Description:
3690  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3691  */
3692 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3693 {
3694 	int ret = -EOPNOTSUPP;
3695 
3696 	if (!netif_running(dev))
3697 		return -EINVAL;
3698 
3699 	switch (cmd) {
3700 	case SIOCGMIIPHY:
3701 	case SIOCGMIIREG:
3702 	case SIOCSMIIREG:
3703 		if (!dev->phydev)
3704 			return -EINVAL;
3705 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3706 		break;
3707 	case SIOCSHWTSTAMP:
3708 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3709 		break;
3710 	default:
3711 		break;
3712 	}
3713 
3714 	return ret;
3715 }
3716 
3717 #ifdef CONFIG_DEBUG_FS
3718 static struct dentry *stmmac_fs_dir;
3719 
3720 static void sysfs_display_ring(void *head, int size, int extend_desc,
3721 			       struct seq_file *seq)
3722 {
3723 	int i;
3724 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3725 	struct dma_desc *p = (struct dma_desc *)head;
3726 
3727 	for (i = 0; i < size; i++) {
3728 		if (extend_desc) {
3729 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3730 				   i, (unsigned int)virt_to_phys(ep),
3731 				   le32_to_cpu(ep->basic.des0),
3732 				   le32_to_cpu(ep->basic.des1),
3733 				   le32_to_cpu(ep->basic.des2),
3734 				   le32_to_cpu(ep->basic.des3));
3735 			ep++;
3736 		} else {
3737 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3738 				   i, (unsigned int)virt_to_phys(p),
3739 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3740 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3741 			p++;
3742 		}
3743 		seq_printf(seq, "\n");
3744 	}
3745 }
3746 
3747 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3748 {
3749 	struct net_device *dev = seq->private;
3750 	struct stmmac_priv *priv = netdev_priv(dev);
3751 	u32 rx_count = priv->plat->rx_queues_to_use;
3752 	u32 tx_count = priv->plat->tx_queues_to_use;
3753 	u32 queue;
3754 
3755 	for (queue = 0; queue < rx_count; queue++) {
3756 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3757 
3758 		seq_printf(seq, "RX Queue %d:\n", queue);
3759 
3760 		if (priv->extend_desc) {
3761 			seq_printf(seq, "Extended descriptor ring:\n");
3762 			sysfs_display_ring((void *)rx_q->dma_erx,
3763 					   DMA_RX_SIZE, 1, seq);
3764 		} else {
3765 			seq_printf(seq, "Descriptor ring:\n");
3766 			sysfs_display_ring((void *)rx_q->dma_rx,
3767 					   DMA_RX_SIZE, 0, seq);
3768 		}
3769 	}
3770 
3771 	for (queue = 0; queue < tx_count; queue++) {
3772 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3773 
3774 		seq_printf(seq, "TX Queue %d:\n", queue);
3775 
3776 		if (priv->extend_desc) {
3777 			seq_printf(seq, "Extended descriptor ring:\n");
3778 			sysfs_display_ring((void *)tx_q->dma_etx,
3779 					   DMA_TX_SIZE, 1, seq);
3780 		} else {
3781 			seq_printf(seq, "Descriptor ring:\n");
3782 			sysfs_display_ring((void *)tx_q->dma_tx,
3783 					   DMA_TX_SIZE, 0, seq);
3784 		}
3785 	}
3786 
3787 	return 0;
3788 }
3789 
3790 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3791 {
3792 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3793 }
3794 
3795 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3796 
3797 static const struct file_operations stmmac_rings_status_fops = {
3798 	.owner = THIS_MODULE,
3799 	.open = stmmac_sysfs_ring_open,
3800 	.read = seq_read,
3801 	.llseek = seq_lseek,
3802 	.release = single_release,
3803 };
3804 
3805 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3806 {
3807 	struct net_device *dev = seq->private;
3808 	struct stmmac_priv *priv = netdev_priv(dev);
3809 
3810 	if (!priv->hw_cap_support) {
3811 		seq_printf(seq, "DMA HW features not supported\n");
3812 		return 0;
3813 	}
3814 
3815 	seq_printf(seq, "==============================\n");
3816 	seq_printf(seq, "\tDMA HW features\n");
3817 	seq_printf(seq, "==============================\n");
3818 
3819 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3820 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3821 	seq_printf(seq, "\t1000 Mbps: %s\n",
3822 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3823 	seq_printf(seq, "\tHalf duplex: %s\n",
3824 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3825 	seq_printf(seq, "\tHash Filter: %s\n",
3826 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3827 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3828 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3829 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3830 		   (priv->dma_cap.pcs) ? "Y" : "N");
3831 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3832 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3833 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3834 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3835 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3836 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3837 	seq_printf(seq, "\tRMON module: %s\n",
3838 		   (priv->dma_cap.rmon) ? "Y" : "N");
3839 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3840 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3841 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3842 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3843 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3844 		   (priv->dma_cap.eee) ? "Y" : "N");
3845 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3846 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3847 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3848 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3849 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3850 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3851 	} else {
3852 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3853 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3854 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3855 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3856 	}
3857 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3858 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3859 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3860 		   priv->dma_cap.number_rx_channel);
3861 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3862 		   priv->dma_cap.number_tx_channel);
3863 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3864 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3865 
3866 	return 0;
3867 }
3868 
3869 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3870 {
3871 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3872 }
3873 
3874 static const struct file_operations stmmac_dma_cap_fops = {
3875 	.owner = THIS_MODULE,
3876 	.open = stmmac_sysfs_dma_cap_open,
3877 	.read = seq_read,
3878 	.llseek = seq_lseek,
3879 	.release = single_release,
3880 };
3881 
3882 static int stmmac_init_fs(struct net_device *dev)
3883 {
3884 	struct stmmac_priv *priv = netdev_priv(dev);
3885 
3886 	/* Create per netdev entries */
3887 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3888 
3889 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3890 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3891 
3892 		return -ENOMEM;
3893 	}
3894 
3895 	/* Entry to report DMA RX/TX rings */
3896 	priv->dbgfs_rings_status =
3897 		debugfs_create_file("descriptors_status", S_IRUGO,
3898 				    priv->dbgfs_dir, dev,
3899 				    &stmmac_rings_status_fops);
3900 
3901 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3902 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3903 		debugfs_remove_recursive(priv->dbgfs_dir);
3904 
3905 		return -ENOMEM;
3906 	}
3907 
3908 	/* Entry to report the DMA HW features */
3909 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3910 					    priv->dbgfs_dir,
3911 					    dev, &stmmac_dma_cap_fops);
3912 
3913 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3914 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3915 		debugfs_remove_recursive(priv->dbgfs_dir);
3916 
3917 		return -ENOMEM;
3918 	}
3919 
3920 	return 0;
3921 }
3922 
3923 static void stmmac_exit_fs(struct net_device *dev)
3924 {
3925 	struct stmmac_priv *priv = netdev_priv(dev);
3926 
3927 	debugfs_remove_recursive(priv->dbgfs_dir);
3928 }
3929 #endif /* CONFIG_DEBUG_FS */
3930 
3931 static const struct net_device_ops stmmac_netdev_ops = {
3932 	.ndo_open = stmmac_open,
3933 	.ndo_start_xmit = stmmac_xmit,
3934 	.ndo_stop = stmmac_release,
3935 	.ndo_change_mtu = stmmac_change_mtu,
3936 	.ndo_fix_features = stmmac_fix_features,
3937 	.ndo_set_features = stmmac_set_features,
3938 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3939 	.ndo_tx_timeout = stmmac_tx_timeout,
3940 	.ndo_do_ioctl = stmmac_ioctl,
3941 #ifdef CONFIG_NET_POLL_CONTROLLER
3942 	.ndo_poll_controller = stmmac_poll_controller,
3943 #endif
3944 	.ndo_set_mac_address = eth_mac_addr,
3945 };
3946 
3947 /**
3948  *  stmmac_hw_init - Init the MAC device
3949  *  @priv: driver private structure
3950  *  Description: this function is to configure the MAC device according to
3951  *  some platform parameters or the HW capability register. It prepares the
3952  *  driver to use either ring or chain modes and to setup either enhanced or
3953  *  normal descriptors.
3954  */
3955 static int stmmac_hw_init(struct stmmac_priv *priv)
3956 {
3957 	struct mac_device_info *mac;
3958 
3959 	/* Identify the MAC HW device */
3960 	if (priv->plat->has_gmac) {
3961 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3962 		mac = dwmac1000_setup(priv->ioaddr,
3963 				      priv->plat->multicast_filter_bins,
3964 				      priv->plat->unicast_filter_entries,
3965 				      &priv->synopsys_id);
3966 	} else if (priv->plat->has_gmac4) {
3967 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3968 		mac = dwmac4_setup(priv->ioaddr,
3969 				   priv->plat->multicast_filter_bins,
3970 				   priv->plat->unicast_filter_entries,
3971 				   &priv->synopsys_id);
3972 	} else {
3973 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3974 	}
3975 	if (!mac)
3976 		return -ENOMEM;
3977 
3978 	priv->hw = mac;
3979 
3980 	/* To use the chained or ring mode */
3981 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3982 		priv->hw->mode = &dwmac4_ring_mode_ops;
3983 	} else {
3984 		if (chain_mode) {
3985 			priv->hw->mode = &chain_mode_ops;
3986 			dev_info(priv->device, "Chain mode enabled\n");
3987 			priv->mode = STMMAC_CHAIN_MODE;
3988 		} else {
3989 			priv->hw->mode = &ring_mode_ops;
3990 			dev_info(priv->device, "Ring mode enabled\n");
3991 			priv->mode = STMMAC_RING_MODE;
3992 		}
3993 	}
3994 
3995 	/* Get the HW capability (new GMAC newer than 3.50a) */
3996 	priv->hw_cap_support = stmmac_get_hw_features(priv);
3997 	if (priv->hw_cap_support) {
3998 		dev_info(priv->device, "DMA HW capability register supported\n");
3999 
4000 		/* We can override some gmac/dma configuration fields: e.g.
4001 		 * enh_desc, tx_coe (e.g. that are passed through the
4002 		 * platform) with the values from the HW capability
4003 		 * register (if supported).
4004 		 */
4005 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4006 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4007 		priv->hw->pmt = priv->plat->pmt;
4008 
4009 		/* TXCOE doesn't work in thresh DMA mode */
4010 		if (priv->plat->force_thresh_dma_mode)
4011 			priv->plat->tx_coe = 0;
4012 		else
4013 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4014 
4015 		/* In case of GMAC4 rx_coe is from HW cap register. */
4016 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4017 
4018 		if (priv->dma_cap.rx_coe_type2)
4019 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4020 		else if (priv->dma_cap.rx_coe_type1)
4021 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4022 
4023 	} else {
4024 		dev_info(priv->device, "No HW DMA feature register supported\n");
4025 	}
4026 
4027 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4028 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4029 		priv->hw->desc = &dwmac4_desc_ops;
4030 	else
4031 		stmmac_selec_desc_mode(priv);
4032 
4033 	if (priv->plat->rx_coe) {
4034 		priv->hw->rx_csum = priv->plat->rx_coe;
4035 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4036 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4037 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4038 	}
4039 	if (priv->plat->tx_coe)
4040 		dev_info(priv->device, "TX Checksum insertion supported\n");
4041 
4042 	if (priv->plat->pmt) {
4043 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4044 		device_set_wakeup_capable(priv->device, 1);
4045 	}
4046 
4047 	if (priv->dma_cap.tsoen)
4048 		dev_info(priv->device, "TSO supported\n");
4049 
4050 	return 0;
4051 }
4052 
4053 /**
4054  * stmmac_dvr_probe
4055  * @device: device pointer
4056  * @plat_dat: platform data pointer
4057  * @res: stmmac resource pointer
4058  * Description: this is the main probe function used to
4059  * call the alloc_etherdev, allocate the priv structure.
4060  * Return:
4061  * returns 0 on success, otherwise errno.
4062  */
4063 int stmmac_dvr_probe(struct device *device,
4064 		     struct plat_stmmacenet_data *plat_dat,
4065 		     struct stmmac_resources *res)
4066 {
4067 	struct net_device *ndev = NULL;
4068 	struct stmmac_priv *priv;
4069 	int ret = 0;
4070 	u32 queue;
4071 
4072 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4073 				  MTL_MAX_TX_QUEUES,
4074 				  MTL_MAX_RX_QUEUES);
4075 	if (!ndev)
4076 		return -ENOMEM;
4077 
4078 	SET_NETDEV_DEV(ndev, device);
4079 
4080 	priv = netdev_priv(ndev);
4081 	priv->device = device;
4082 	priv->dev = ndev;
4083 
4084 	stmmac_set_ethtool_ops(ndev);
4085 	priv->pause = pause;
4086 	priv->plat = plat_dat;
4087 	priv->ioaddr = res->addr;
4088 	priv->dev->base_addr = (unsigned long)res->addr;
4089 
4090 	priv->dev->irq = res->irq;
4091 	priv->wol_irq = res->wol_irq;
4092 	priv->lpi_irq = res->lpi_irq;
4093 
4094 	if (res->mac)
4095 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4096 
4097 	dev_set_drvdata(device, priv->dev);
4098 
4099 	/* Verify driver arguments */
4100 	stmmac_verify_args();
4101 
4102 	/* Override with kernel parameters if supplied XXX CRS XXX
4103 	 * this needs to have multiple instances
4104 	 */
4105 	if ((phyaddr >= 0) && (phyaddr <= 31))
4106 		priv->plat->phy_addr = phyaddr;
4107 
4108 	if (priv->plat->stmmac_rst)
4109 		reset_control_deassert(priv->plat->stmmac_rst);
4110 
4111 	/* Init MAC and get the capabilities */
4112 	ret = stmmac_hw_init(priv);
4113 	if (ret)
4114 		goto error_hw_init;
4115 
4116 	/* Configure real RX and TX queues */
4117 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4118 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4119 
4120 	ndev->netdev_ops = &stmmac_netdev_ops;
4121 
4122 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4123 			    NETIF_F_RXCSUM;
4124 
4125 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4126 		ndev->hw_features |= NETIF_F_TSO;
4127 		priv->tso = true;
4128 		dev_info(priv->device, "TSO feature enabled\n");
4129 	}
4130 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4131 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4132 #ifdef STMMAC_VLAN_TAG_USED
4133 	/* Both mac100 and gmac support receive VLAN tag detection */
4134 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4135 #endif
4136 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4137 
4138 	/* MTU range: 46 - hw-specific max */
4139 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4140 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4141 		ndev->max_mtu = JUMBO_LEN;
4142 	else
4143 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4144 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4145 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4146 	 */
4147 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4148 	    (priv->plat->maxmtu >= ndev->min_mtu))
4149 		ndev->max_mtu = priv->plat->maxmtu;
4150 	else if (priv->plat->maxmtu < ndev->min_mtu)
4151 		dev_warn(priv->device,
4152 			 "%s: warning: maxmtu having invalid value (%d)\n",
4153 			 __func__, priv->plat->maxmtu);
4154 
4155 	if (flow_ctrl)
4156 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4157 
4158 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4159 	 * In some case, for example on bugged HW this feature
4160 	 * has to be disable and this can be done by passing the
4161 	 * riwt_off field from the platform.
4162 	 */
4163 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4164 		priv->use_riwt = 1;
4165 		dev_info(priv->device,
4166 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4167 	}
4168 
4169 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4170 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4171 
4172 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4173 			       (8 * priv->plat->rx_queues_to_use));
4174 	}
4175 
4176 	spin_lock_init(&priv->lock);
4177 
4178 	/* If a specific clk_csr value is passed from the platform
4179 	 * this means that the CSR Clock Range selection cannot be
4180 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4181 	 * set the MDC clock dynamically according to the csr actual
4182 	 * clock input.
4183 	 */
4184 	if (!priv->plat->clk_csr)
4185 		stmmac_clk_csr_set(priv);
4186 	else
4187 		priv->clk_csr = priv->plat->clk_csr;
4188 
4189 	stmmac_check_pcs_mode(priv);
4190 
4191 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4192 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4193 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4194 		/* MDIO bus Registration */
4195 		ret = stmmac_mdio_register(ndev);
4196 		if (ret < 0) {
4197 			dev_err(priv->device,
4198 				"%s: MDIO bus (id: %d) registration failed",
4199 				__func__, priv->plat->bus_id);
4200 			goto error_mdio_register;
4201 		}
4202 	}
4203 
4204 	ret = register_netdev(ndev);
4205 	if (ret) {
4206 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4207 			__func__, ret);
4208 		goto error_netdev_register;
4209 	}
4210 
4211 	return ret;
4212 
4213 error_netdev_register:
4214 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4215 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4216 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4217 		stmmac_mdio_unregister(ndev);
4218 error_mdio_register:
4219 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4220 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4221 
4222 		netif_napi_del(&rx_q->napi);
4223 	}
4224 error_hw_init:
4225 	free_netdev(ndev);
4226 
4227 	return ret;
4228 }
4229 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4230 
4231 /**
4232  * stmmac_dvr_remove
4233  * @dev: device pointer
4234  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4235  * changes the link status, releases the DMA descriptor rings.
4236  */
4237 int stmmac_dvr_remove(struct device *dev)
4238 {
4239 	struct net_device *ndev = dev_get_drvdata(dev);
4240 	struct stmmac_priv *priv = netdev_priv(ndev);
4241 
4242 	netdev_info(priv->dev, "%s: removing driver", __func__);
4243 
4244 	stmmac_stop_all_dma(priv);
4245 
4246 	priv->hw->mac->set_mac(priv->ioaddr, false);
4247 	netif_carrier_off(ndev);
4248 	unregister_netdev(ndev);
4249 	if (priv->plat->stmmac_rst)
4250 		reset_control_assert(priv->plat->stmmac_rst);
4251 	clk_disable_unprepare(priv->plat->pclk);
4252 	clk_disable_unprepare(priv->plat->stmmac_clk);
4253 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4254 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4255 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4256 		stmmac_mdio_unregister(ndev);
4257 	free_netdev(ndev);
4258 
4259 	return 0;
4260 }
4261 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4262 
4263 /**
4264  * stmmac_suspend - suspend callback
4265  * @dev: device pointer
4266  * Description: this is the function to suspend the device and it is called
4267  * by the platform driver to stop the network queue, release the resources,
4268  * program the PMT register (for WoL), clean and release driver resources.
4269  */
4270 int stmmac_suspend(struct device *dev)
4271 {
4272 	struct net_device *ndev = dev_get_drvdata(dev);
4273 	struct stmmac_priv *priv = netdev_priv(ndev);
4274 	unsigned long flags;
4275 
4276 	if (!ndev || !netif_running(ndev))
4277 		return 0;
4278 
4279 	if (ndev->phydev)
4280 		phy_stop(ndev->phydev);
4281 
4282 	spin_lock_irqsave(&priv->lock, flags);
4283 
4284 	netif_device_detach(ndev);
4285 	stmmac_stop_all_queues(priv);
4286 
4287 	stmmac_disable_all_queues(priv);
4288 
4289 	/* Stop TX/RX DMA */
4290 	stmmac_stop_all_dma(priv);
4291 
4292 	/* Enable Power down mode by programming the PMT regs */
4293 	if (device_may_wakeup(priv->device)) {
4294 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4295 		priv->irq_wake = 1;
4296 	} else {
4297 		priv->hw->mac->set_mac(priv->ioaddr, false);
4298 		pinctrl_pm_select_sleep_state(priv->device);
4299 		/* Disable clock in case of PWM is off */
4300 		clk_disable(priv->plat->pclk);
4301 		clk_disable(priv->plat->stmmac_clk);
4302 	}
4303 	spin_unlock_irqrestore(&priv->lock, flags);
4304 
4305 	priv->oldlink = 0;
4306 	priv->speed = SPEED_UNKNOWN;
4307 	priv->oldduplex = DUPLEX_UNKNOWN;
4308 	return 0;
4309 }
4310 EXPORT_SYMBOL_GPL(stmmac_suspend);
4311 
4312 /**
4313  * stmmac_reset_queues_param - reset queue parameters
4314  * @dev: device pointer
4315  */
4316 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4317 {
4318 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4319 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4320 	u32 queue;
4321 
4322 	for (queue = 0; queue < rx_cnt; queue++) {
4323 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4324 
4325 		rx_q->cur_rx = 0;
4326 		rx_q->dirty_rx = 0;
4327 	}
4328 
4329 	for (queue = 0; queue < tx_cnt; queue++) {
4330 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4331 
4332 		tx_q->cur_tx = 0;
4333 		tx_q->dirty_tx = 0;
4334 	}
4335 }
4336 
4337 /**
4338  * stmmac_resume - resume callback
4339  * @dev: device pointer
4340  * Description: when resume this function is invoked to setup the DMA and CORE
4341  * in a usable state.
4342  */
4343 int stmmac_resume(struct device *dev)
4344 {
4345 	struct net_device *ndev = dev_get_drvdata(dev);
4346 	struct stmmac_priv *priv = netdev_priv(ndev);
4347 	unsigned long flags;
4348 
4349 	if (!netif_running(ndev))
4350 		return 0;
4351 
4352 	/* Power Down bit, into the PM register, is cleared
4353 	 * automatically as soon as a magic packet or a Wake-up frame
4354 	 * is received. Anyway, it's better to manually clear
4355 	 * this bit because it can generate problems while resuming
4356 	 * from another devices (e.g. serial console).
4357 	 */
4358 	if (device_may_wakeup(priv->device)) {
4359 		spin_lock_irqsave(&priv->lock, flags);
4360 		priv->hw->mac->pmt(priv->hw, 0);
4361 		spin_unlock_irqrestore(&priv->lock, flags);
4362 		priv->irq_wake = 0;
4363 	} else {
4364 		pinctrl_pm_select_default_state(priv->device);
4365 		/* enable the clk previously disabled */
4366 		clk_enable(priv->plat->stmmac_clk);
4367 		clk_enable(priv->plat->pclk);
4368 		/* reset the phy so that it's ready */
4369 		if (priv->mii)
4370 			stmmac_mdio_reset(priv->mii);
4371 	}
4372 
4373 	netif_device_attach(ndev);
4374 
4375 	spin_lock_irqsave(&priv->lock, flags);
4376 
4377 	stmmac_reset_queues_param(priv);
4378 
4379 	/* reset private mss value to force mss context settings at
4380 	 * next tso xmit (only used for gmac4).
4381 	 */
4382 	priv->mss = 0;
4383 
4384 	stmmac_clear_descriptors(priv);
4385 
4386 	stmmac_hw_setup(ndev, false);
4387 	stmmac_init_tx_coalesce(priv);
4388 	stmmac_set_rx_mode(ndev);
4389 
4390 	stmmac_enable_all_queues(priv);
4391 
4392 	stmmac_start_all_queues(priv);
4393 
4394 	spin_unlock_irqrestore(&priv->lock, flags);
4395 
4396 	if (ndev->phydev)
4397 		phy_start(ndev->phydev);
4398 
4399 	return 0;
4400 }
4401 EXPORT_SYMBOL_GPL(stmmac_resume);
4402 
4403 #ifndef MODULE
4404 static int __init stmmac_cmdline_opt(char *str)
4405 {
4406 	char *opt;
4407 
4408 	if (!str || !*str)
4409 		return -EINVAL;
4410 	while ((opt = strsep(&str, ",")) != NULL) {
4411 		if (!strncmp(opt, "debug:", 6)) {
4412 			if (kstrtoint(opt + 6, 0, &debug))
4413 				goto err;
4414 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4415 			if (kstrtoint(opt + 8, 0, &phyaddr))
4416 				goto err;
4417 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4418 			if (kstrtoint(opt + 7, 0, &buf_sz))
4419 				goto err;
4420 		} else if (!strncmp(opt, "tc:", 3)) {
4421 			if (kstrtoint(opt + 3, 0, &tc))
4422 				goto err;
4423 		} else if (!strncmp(opt, "watchdog:", 9)) {
4424 			if (kstrtoint(opt + 9, 0, &watchdog))
4425 				goto err;
4426 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4427 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4428 				goto err;
4429 		} else if (!strncmp(opt, "pause:", 6)) {
4430 			if (kstrtoint(opt + 6, 0, &pause))
4431 				goto err;
4432 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4433 			if (kstrtoint(opt + 10, 0, &eee_timer))
4434 				goto err;
4435 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4436 			if (kstrtoint(opt + 11, 0, &chain_mode))
4437 				goto err;
4438 		}
4439 	}
4440 	return 0;
4441 
4442 err:
4443 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4444 	return -EINVAL;
4445 }
4446 
4447 __setup("stmmaceth=", stmmac_cmdline_opt);
4448 #endif /* MODULE */
4449 
4450 static int __init stmmac_init(void)
4451 {
4452 #ifdef CONFIG_DEBUG_FS
4453 	/* Create debugfs main directory if it doesn't exist yet */
4454 	if (!stmmac_fs_dir) {
4455 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4456 
4457 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4458 			pr_err("ERROR %s, debugfs create directory failed\n",
4459 			       STMMAC_RESOURCE_NAME);
4460 
4461 			return -ENOMEM;
4462 		}
4463 	}
4464 #endif
4465 
4466 	return 0;
4467 }
4468 
4469 static void __exit stmmac_exit(void)
4470 {
4471 #ifdef CONFIG_DEBUG_FS
4472 	debugfs_remove_recursive(stmmac_fs_dir);
4473 #endif
4474 }
4475 
4476 module_init(stmmac_init)
4477 module_exit(stmmac_exit)
4478 
4479 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4480 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4481 MODULE_LICENSE("GPL");
4482