1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 }
239 
240 static void print_pkt(unsigned char *buf, int len)
241 {
242 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
243 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
244 }
245 
246 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
247 {
248 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
249 	u32 avail;
250 
251 	if (tx_q->dirty_tx > tx_q->cur_tx)
252 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
253 	else
254 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
255 
256 	return avail;
257 }
258 
259 /**
260  * stmmac_rx_dirty - Get RX queue dirty
261  * @priv: driver private structure
262  * @queue: RX queue index
263  */
264 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
265 {
266 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
267 	u32 dirty;
268 
269 	if (rx_q->dirty_rx <= rx_q->cur_rx)
270 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
271 	else
272 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
273 
274 	return dirty;
275 }
276 
277 /**
278  * stmmac_hw_fix_mac_speed - callback for speed selection
279  * @priv: driver private structure
280  * Description: on some platforms (e.g. ST), some HW system configuration
281  * registers have to be set according to the link speed negotiated.
282  */
283 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
284 {
285 	struct net_device *ndev = priv->dev;
286 	struct phy_device *phydev = ndev->phydev;
287 
288 	if (likely(priv->plat->fix_mac_speed))
289 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
290 }
291 
292 /**
293  * stmmac_enable_eee_mode - check and enter in LPI mode
294  * @priv: driver private structure
295  * Description: this function is to verify and enter in LPI mode in case of
296  * EEE.
297  */
298 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
299 {
300 	u32 tx_cnt = priv->plat->tx_queues_to_use;
301 	u32 queue;
302 
303 	/* check if all TX queues have the work finished */
304 	for (queue = 0; queue < tx_cnt; queue++) {
305 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
306 
307 		if (tx_q->dirty_tx != tx_q->cur_tx)
308 			return; /* still unfinished work */
309 	}
310 
311 	/* Check and enter in LPI mode */
312 	if (!priv->tx_path_in_lpi_mode)
313 		priv->hw->mac->set_eee_mode(priv->hw,
314 					    priv->plat->en_tx_lpi_clockgating);
315 }
316 
317 /**
318  * stmmac_disable_eee_mode - disable and exit from LPI mode
319  * @priv: driver private structure
320  * Description: this function is to exit and disable EEE in case of
321  * LPI state is true. This is called by the xmit.
322  */
323 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
324 {
325 	priv->hw->mac->reset_eee_mode(priv->hw);
326 	del_timer_sync(&priv->eee_ctrl_timer);
327 	priv->tx_path_in_lpi_mode = false;
328 }
329 
330 /**
331  * stmmac_eee_ctrl_timer - EEE TX SW timer.
332  * @arg : data hook
333  * Description:
334  *  if there is no data transfer and if we are not in LPI state,
335  *  then MAC Transmitter can be moved to LPI state.
336  */
337 static void stmmac_eee_ctrl_timer(unsigned long arg)
338 {
339 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
340 
341 	stmmac_enable_eee_mode(priv);
342 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
343 }
344 
345 /**
346  * stmmac_eee_init - init EEE
347  * @priv: driver private structure
348  * Description:
349  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
350  *  can also manage EEE, this function enable the LPI state and start related
351  *  timer.
352  */
353 bool stmmac_eee_init(struct stmmac_priv *priv)
354 {
355 	struct net_device *ndev = priv->dev;
356 	unsigned long flags;
357 	bool ret = false;
358 
359 	/* Using PCS we cannot dial with the phy registers at this stage
360 	 * so we do not support extra feature like EEE.
361 	 */
362 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
363 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
364 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
365 		goto out;
366 
367 	/* MAC core supports the EEE feature. */
368 	if (priv->dma_cap.eee) {
369 		int tx_lpi_timer = priv->tx_lpi_timer;
370 
371 		/* Check if the PHY supports EEE */
372 		if (phy_init_eee(ndev->phydev, 1)) {
373 			/* To manage at run-time if the EEE cannot be supported
374 			 * anymore (for example because the lp caps have been
375 			 * changed).
376 			 * In that case the driver disable own timers.
377 			 */
378 			spin_lock_irqsave(&priv->lock, flags);
379 			if (priv->eee_active) {
380 				netdev_dbg(priv->dev, "disable EEE\n");
381 				del_timer_sync(&priv->eee_ctrl_timer);
382 				priv->hw->mac->set_eee_timer(priv->hw, 0,
383 							     tx_lpi_timer);
384 			}
385 			priv->eee_active = 0;
386 			spin_unlock_irqrestore(&priv->lock, flags);
387 			goto out;
388 		}
389 		/* Activate the EEE and start timers */
390 		spin_lock_irqsave(&priv->lock, flags);
391 		if (!priv->eee_active) {
392 			priv->eee_active = 1;
393 			setup_timer(&priv->eee_ctrl_timer,
394 				    stmmac_eee_ctrl_timer,
395 				    (unsigned long)priv);
396 			mod_timer(&priv->eee_ctrl_timer,
397 				  STMMAC_LPI_T(eee_timer));
398 
399 			priv->hw->mac->set_eee_timer(priv->hw,
400 						     STMMAC_DEFAULT_LIT_LS,
401 						     tx_lpi_timer);
402 		}
403 		/* Set HW EEE according to the speed */
404 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
405 
406 		ret = true;
407 		spin_unlock_irqrestore(&priv->lock, flags);
408 
409 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
410 	}
411 out:
412 	return ret;
413 }
414 
415 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
416  * @priv: driver private structure
417  * @p : descriptor pointer
418  * @skb : the socket buffer
419  * Description :
420  * This function will read timestamp from the descriptor & pass it to stack.
421  * and also perform some sanity checks.
422  */
423 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
424 				   struct dma_desc *p, struct sk_buff *skb)
425 {
426 	struct skb_shared_hwtstamps shhwtstamp;
427 	u64 ns;
428 
429 	if (!priv->hwts_tx_en)
430 		return;
431 
432 	/* exit if skb doesn't support hw tstamp */
433 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
434 		return;
435 
436 	/* check tx tstamp status */
437 	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
438 		/* get the valid tstamp */
439 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440 
441 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
443 
444 		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445 		/* pass tstamp to stack */
446 		skb_tstamp_tx(skb, &shhwtstamp);
447 	}
448 
449 	return;
450 }
451 
452 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
453  * @priv: driver private structure
454  * @p : descriptor pointer
455  * @np : next descriptor pointer
456  * @skb : the socket buffer
457  * Description :
458  * This function will read received packet's timestamp from the descriptor
459  * and pass it to stack. It also perform some sanity checks.
460  */
461 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
462 				   struct dma_desc *np, struct sk_buff *skb)
463 {
464 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
465 	u64 ns;
466 
467 	if (!priv->hwts_rx_en)
468 		return;
469 
470 	/* Check if timestamp is available */
471 	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472 		/* For GMAC4, the valid timestamp is from CTX next desc. */
473 		if (priv->plat->has_gmac4)
474 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475 		else
476 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477 
478 		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479 		shhwtstamp = skb_hwtstamps(skb);
480 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
482 	} else  {
483 		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
484 	}
485 }
486 
487 /**
488  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
489  *  @dev: device pointer.
490  *  @ifr: An IOCTL specific structure, that can contain a pointer to
491  *  a proprietary structure used to pass information to the driver.
492  *  Description:
493  *  This function configures the MAC to enable/disable both outgoing(TX)
494  *  and incoming(RX) packets time stamping based on user input.
495  *  Return Value:
496  *  0 on success and an appropriate -ve integer on failure.
497  */
498 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
499 {
500 	struct stmmac_priv *priv = netdev_priv(dev);
501 	struct hwtstamp_config config;
502 	struct timespec64 now;
503 	u64 temp = 0;
504 	u32 ptp_v2 = 0;
505 	u32 tstamp_all = 0;
506 	u32 ptp_over_ipv4_udp = 0;
507 	u32 ptp_over_ipv6_udp = 0;
508 	u32 ptp_over_ethernet = 0;
509 	u32 snap_type_sel = 0;
510 	u32 ts_master_en = 0;
511 	u32 ts_event_en = 0;
512 	u32 value = 0;
513 	u32 sec_inc;
514 
515 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
516 		netdev_alert(priv->dev, "No support for HW time stamping\n");
517 		priv->hwts_tx_en = 0;
518 		priv->hwts_rx_en = 0;
519 
520 		return -EOPNOTSUPP;
521 	}
522 
523 	if (copy_from_user(&config, ifr->ifr_data,
524 			   sizeof(struct hwtstamp_config)))
525 		return -EFAULT;
526 
527 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
528 		   __func__, config.flags, config.tx_type, config.rx_filter);
529 
530 	/* reserved for future extensions */
531 	if (config.flags)
532 		return -EINVAL;
533 
534 	if (config.tx_type != HWTSTAMP_TX_OFF &&
535 	    config.tx_type != HWTSTAMP_TX_ON)
536 		return -ERANGE;
537 
538 	if (priv->adv_ts) {
539 		switch (config.rx_filter) {
540 		case HWTSTAMP_FILTER_NONE:
541 			/* time stamp no incoming packet at all */
542 			config.rx_filter = HWTSTAMP_FILTER_NONE;
543 			break;
544 
545 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
546 			/* PTP v1, UDP, any kind of event packet */
547 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548 			/* take time stamp for all event messages */
549 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
550 
551 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
552 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
553 			break;
554 
555 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
556 			/* PTP v1, UDP, Sync packet */
557 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
558 			/* take time stamp for SYNC messages only */
559 			ts_event_en = PTP_TCR_TSEVNTENA;
560 
561 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563 			break;
564 
565 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
566 			/* PTP v1, UDP, Delay_req packet */
567 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
568 			/* take time stamp for Delay_Req messages only */
569 			ts_master_en = PTP_TCR_TSMSTRENA;
570 			ts_event_en = PTP_TCR_TSEVNTENA;
571 
572 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 			break;
575 
576 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
577 			/* PTP v2, UDP, any kind of event packet */
578 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
579 			ptp_v2 = PTP_TCR_TSVER2ENA;
580 			/* take time stamp for all event messages */
581 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588 			/* PTP v2, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
590 			ptp_v2 = PTP_TCR_TSVER2ENA;
591 			/* take time stamp for SYNC messages only */
592 			ts_event_en = PTP_TCR_TSEVNTENA;
593 
594 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 			break;
597 
598 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
599 			/* PTP v2, UDP, Delay_req packet */
600 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
601 			ptp_v2 = PTP_TCR_TSVER2ENA;
602 			/* take time stamp for Delay_Req messages only */
603 			ts_master_en = PTP_TCR_TSMSTRENA;
604 			ts_event_en = PTP_TCR_TSEVNTENA;
605 
606 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608 			break;
609 
610 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
611 			/* PTP v2/802.AS1 any layer, any kind of event packet */
612 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
613 			ptp_v2 = PTP_TCR_TSVER2ENA;
614 			/* take time stamp for all event messages */
615 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
616 
617 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619 			ptp_over_ethernet = PTP_TCR_TSIPENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
623 			/* PTP v2/802.AS1, any layer, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			ptp_over_ethernet = PTP_TCR_TSIPENA;
632 			break;
633 
634 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
635 			/* PTP v2/802.AS1, any layer, Delay_req packet */
636 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
637 			ptp_v2 = PTP_TCR_TSVER2ENA;
638 			/* take time stamp for Delay_Req messages only */
639 			ts_master_en = PTP_TCR_TSMSTRENA;
640 			ts_event_en = PTP_TCR_TSEVNTENA;
641 
642 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 			ptp_over_ethernet = PTP_TCR_TSIPENA;
645 			break;
646 
647 		case HWTSTAMP_FILTER_ALL:
648 			/* time stamp any incoming packet */
649 			config.rx_filter = HWTSTAMP_FILTER_ALL;
650 			tstamp_all = PTP_TCR_TSENALL;
651 			break;
652 
653 		default:
654 			return -ERANGE;
655 		}
656 	} else {
657 		switch (config.rx_filter) {
658 		case HWTSTAMP_FILTER_NONE:
659 			config.rx_filter = HWTSTAMP_FILTER_NONE;
660 			break;
661 		default:
662 			/* PTP v1, UDP, any kind of event packet */
663 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
664 			break;
665 		}
666 	}
667 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
668 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
669 
670 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
671 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
672 	else {
673 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
674 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
675 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
676 			 ts_master_en | snap_type_sel);
677 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
678 
679 		/* program Sub Second Increment reg */
680 		sec_inc = priv->hw->ptp->config_sub_second_increment(
681 			priv->ptpaddr, priv->plat->clk_ptp_rate,
682 			priv->plat->has_gmac4);
683 		temp = div_u64(1000000000ULL, sec_inc);
684 
685 		/* calculate default added value:
686 		 * formula is :
687 		 * addend = (2^32)/freq_div_ratio;
688 		 * where, freq_div_ratio = 1e9ns/sec_inc
689 		 */
690 		temp = (u64)(temp << 32);
691 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
692 		priv->hw->ptp->config_addend(priv->ptpaddr,
693 					     priv->default_addend);
694 
695 		/* initialize system time */
696 		ktime_get_real_ts64(&now);
697 
698 		/* lower 32 bits of tv_sec are safe until y2106 */
699 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
700 					    now.tv_nsec);
701 	}
702 
703 	return copy_to_user(ifr->ifr_data, &config,
704 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
705 }
706 
707 /**
708  * stmmac_init_ptp - init PTP
709  * @priv: driver private structure
710  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
711  * This is done by looking at the HW cap. register.
712  * This function also registers the ptp driver.
713  */
714 static int stmmac_init_ptp(struct stmmac_priv *priv)
715 {
716 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
717 		return -EOPNOTSUPP;
718 
719 	priv->adv_ts = 0;
720 	/* Check if adv_ts can be enabled for dwmac 4.x core */
721 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
722 		priv->adv_ts = 1;
723 	/* Dwmac 3.x core with extend_desc can support adv_ts */
724 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
725 		priv->adv_ts = 1;
726 
727 	if (priv->dma_cap.time_stamp)
728 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
729 
730 	if (priv->adv_ts)
731 		netdev_info(priv->dev,
732 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
733 
734 	priv->hw->ptp = &stmmac_ptp;
735 	priv->hwts_tx_en = 0;
736 	priv->hwts_rx_en = 0;
737 
738 	stmmac_ptp_register(priv);
739 
740 	return 0;
741 }
742 
743 static void stmmac_release_ptp(struct stmmac_priv *priv)
744 {
745 	if (priv->plat->clk_ptp_ref)
746 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
747 	stmmac_ptp_unregister(priv);
748 }
749 
750 /**
751  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
752  *  @priv: driver private structure
753  *  Description: It is used for configuring the flow control in all queues
754  */
755 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
756 {
757 	u32 tx_cnt = priv->plat->tx_queues_to_use;
758 
759 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
760 				 priv->pause, tx_cnt);
761 }
762 
763 /**
764  * stmmac_adjust_link - adjusts the link parameters
765  * @dev: net device structure
766  * Description: this is the helper called by the physical abstraction layer
767  * drivers to communicate the phy link status. According the speed and duplex
768  * this driver can invoke registered glue-logic as well.
769  * It also invoke the eee initialization because it could happen when switch
770  * on different networks (that are eee capable).
771  */
772 static void stmmac_adjust_link(struct net_device *dev)
773 {
774 	struct stmmac_priv *priv = netdev_priv(dev);
775 	struct phy_device *phydev = dev->phydev;
776 	unsigned long flags;
777 	int new_state = 0;
778 
779 	if (!phydev)
780 		return;
781 
782 	spin_lock_irqsave(&priv->lock, flags);
783 
784 	if (phydev->link) {
785 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
786 
787 		/* Now we make sure that we can be in full duplex mode.
788 		 * If not, we operate in half-duplex mode. */
789 		if (phydev->duplex != priv->oldduplex) {
790 			new_state = 1;
791 			if (!(phydev->duplex))
792 				ctrl &= ~priv->hw->link.duplex;
793 			else
794 				ctrl |= priv->hw->link.duplex;
795 			priv->oldduplex = phydev->duplex;
796 		}
797 		/* Flow Control operation */
798 		if (phydev->pause)
799 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
800 
801 		if (phydev->speed != priv->speed) {
802 			new_state = 1;
803 			switch (phydev->speed) {
804 			case 1000:
805 				if (priv->plat->has_gmac ||
806 				    priv->plat->has_gmac4)
807 					ctrl &= ~priv->hw->link.port;
808 				break;
809 			case 100:
810 				if (priv->plat->has_gmac ||
811 				    priv->plat->has_gmac4) {
812 					ctrl |= priv->hw->link.port;
813 					ctrl |= priv->hw->link.speed;
814 				} else {
815 					ctrl &= ~priv->hw->link.port;
816 				}
817 				break;
818 			case 10:
819 				if (priv->plat->has_gmac ||
820 				    priv->plat->has_gmac4) {
821 					ctrl |= priv->hw->link.port;
822 					ctrl &= ~(priv->hw->link.speed);
823 				} else {
824 					ctrl &= ~priv->hw->link.port;
825 				}
826 				break;
827 			default:
828 				netif_warn(priv, link, priv->dev,
829 					   "broken speed: %d\n", phydev->speed);
830 				phydev->speed = SPEED_UNKNOWN;
831 				break;
832 			}
833 			if (phydev->speed != SPEED_UNKNOWN)
834 				stmmac_hw_fix_mac_speed(priv);
835 			priv->speed = phydev->speed;
836 		}
837 
838 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
839 
840 		if (!priv->oldlink) {
841 			new_state = 1;
842 			priv->oldlink = 1;
843 		}
844 	} else if (priv->oldlink) {
845 		new_state = 1;
846 		priv->oldlink = 0;
847 		priv->speed = SPEED_UNKNOWN;
848 		priv->oldduplex = DUPLEX_UNKNOWN;
849 	}
850 
851 	if (new_state && netif_msg_link(priv))
852 		phy_print_status(phydev);
853 
854 	spin_unlock_irqrestore(&priv->lock, flags);
855 
856 	if (phydev->is_pseudo_fixed_link)
857 		/* Stop PHY layer to call the hook to adjust the link in case
858 		 * of a switch is attached to the stmmac driver.
859 		 */
860 		phydev->irq = PHY_IGNORE_INTERRUPT;
861 	else
862 		/* At this stage, init the EEE if supported.
863 		 * Never called in case of fixed_link.
864 		 */
865 		priv->eee_enabled = stmmac_eee_init(priv);
866 }
867 
868 /**
869  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
870  * @priv: driver private structure
871  * Description: this is to verify if the HW supports the PCS.
872  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
873  * configured for the TBI, RTBI, or SGMII PHY interface.
874  */
875 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
876 {
877 	int interface = priv->plat->interface;
878 
879 	if (priv->dma_cap.pcs) {
880 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
881 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
882 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
883 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
884 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
885 			priv->hw->pcs = STMMAC_PCS_RGMII;
886 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
887 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
888 			priv->hw->pcs = STMMAC_PCS_SGMII;
889 		}
890 	}
891 }
892 
893 /**
894  * stmmac_init_phy - PHY initialization
895  * @dev: net device structure
896  * Description: it initializes the driver's PHY state, and attaches the PHY
897  * to the mac driver.
898  *  Return value:
899  *  0 on success
900  */
901 static int stmmac_init_phy(struct net_device *dev)
902 {
903 	struct stmmac_priv *priv = netdev_priv(dev);
904 	struct phy_device *phydev;
905 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
906 	char bus_id[MII_BUS_ID_SIZE];
907 	int interface = priv->plat->interface;
908 	int max_speed = priv->plat->max_speed;
909 	priv->oldlink = 0;
910 	priv->speed = SPEED_UNKNOWN;
911 	priv->oldduplex = DUPLEX_UNKNOWN;
912 
913 	if (priv->plat->phy_node) {
914 		phydev = of_phy_connect(dev, priv->plat->phy_node,
915 					&stmmac_adjust_link, 0, interface);
916 	} else {
917 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
918 			 priv->plat->bus_id);
919 
920 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
921 			 priv->plat->phy_addr);
922 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
923 			   phy_id_fmt);
924 
925 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
926 				     interface);
927 	}
928 
929 	if (IS_ERR_OR_NULL(phydev)) {
930 		netdev_err(priv->dev, "Could not attach to PHY\n");
931 		if (!phydev)
932 			return -ENODEV;
933 
934 		return PTR_ERR(phydev);
935 	}
936 
937 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
938 	if ((interface == PHY_INTERFACE_MODE_MII) ||
939 	    (interface == PHY_INTERFACE_MODE_RMII) ||
940 		(max_speed < 1000 && max_speed > 0))
941 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
942 					 SUPPORTED_1000baseT_Full);
943 
944 	/*
945 	 * Broken HW is sometimes missing the pull-up resistor on the
946 	 * MDIO line, which results in reads to non-existent devices returning
947 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
948 	 * device as well.
949 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
950 	 */
951 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
952 		phy_disconnect(phydev);
953 		return -ENODEV;
954 	}
955 
956 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
957 	 * subsequent PHY polling, make sure we force a link transition if
958 	 * we have a UP/DOWN/UP transition
959 	 */
960 	if (phydev->is_pseudo_fixed_link)
961 		phydev->irq = PHY_POLL;
962 
963 	phy_attached_info(phydev);
964 	return 0;
965 }
966 
967 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
968 {
969 	u32 rx_cnt = priv->plat->rx_queues_to_use;
970 	void *head_rx;
971 	u32 queue;
972 
973 	/* Display RX rings */
974 	for (queue = 0; queue < rx_cnt; queue++) {
975 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
976 
977 		pr_info("\tRX Queue %u rings\n", queue);
978 
979 		if (priv->extend_desc)
980 			head_rx = (void *)rx_q->dma_erx;
981 		else
982 			head_rx = (void *)rx_q->dma_rx;
983 
984 		/* Display RX ring */
985 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
986 	}
987 }
988 
989 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
990 {
991 	u32 tx_cnt = priv->plat->tx_queues_to_use;
992 	void *head_tx;
993 	u32 queue;
994 
995 	/* Display TX rings */
996 	for (queue = 0; queue < tx_cnt; queue++) {
997 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
998 
999 		pr_info("\tTX Queue %d rings\n", queue);
1000 
1001 		if (priv->extend_desc)
1002 			head_tx = (void *)tx_q->dma_etx;
1003 		else
1004 			head_tx = (void *)tx_q->dma_tx;
1005 
1006 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1007 	}
1008 }
1009 
1010 static void stmmac_display_rings(struct stmmac_priv *priv)
1011 {
1012 	/* Display RX ring */
1013 	stmmac_display_rx_rings(priv);
1014 
1015 	/* Display TX ring */
1016 	stmmac_display_tx_rings(priv);
1017 }
1018 
1019 static int stmmac_set_bfsize(int mtu, int bufsize)
1020 {
1021 	int ret = bufsize;
1022 
1023 	if (mtu >= BUF_SIZE_4KiB)
1024 		ret = BUF_SIZE_8KiB;
1025 	else if (mtu >= BUF_SIZE_2KiB)
1026 		ret = BUF_SIZE_4KiB;
1027 	else if (mtu > DEFAULT_BUFSIZE)
1028 		ret = BUF_SIZE_2KiB;
1029 	else
1030 		ret = DEFAULT_BUFSIZE;
1031 
1032 	return ret;
1033 }
1034 
1035 /**
1036  * stmmac_clear_rx_descriptors - clear RX descriptors
1037  * @priv: driver private structure
1038  * @queue: RX queue index
1039  * Description: this function is called to clear the RX descriptors
1040  * in case of both basic and extended descriptors are used.
1041  */
1042 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1043 {
1044 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1045 	int i;
1046 
1047 	/* Clear the RX descriptors */
1048 	for (i = 0; i < DMA_RX_SIZE; i++)
1049 		if (priv->extend_desc)
1050 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1051 						     priv->use_riwt, priv->mode,
1052 						     (i == DMA_RX_SIZE - 1));
1053 		else
1054 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1055 						     priv->use_riwt, priv->mode,
1056 						     (i == DMA_RX_SIZE - 1));
1057 }
1058 
1059 /**
1060  * stmmac_clear_tx_descriptors - clear tx descriptors
1061  * @priv: driver private structure
1062  * @queue: TX queue index.
1063  * Description: this function is called to clear the TX descriptors
1064  * in case of both basic and extended descriptors are used.
1065  */
1066 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1067 {
1068 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1069 	int i;
1070 
1071 	/* Clear the TX descriptors */
1072 	for (i = 0; i < DMA_TX_SIZE; i++)
1073 		if (priv->extend_desc)
1074 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1075 						     priv->mode,
1076 						     (i == DMA_TX_SIZE - 1));
1077 		else
1078 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1079 						     priv->mode,
1080 						     (i == DMA_TX_SIZE - 1));
1081 }
1082 
1083 /**
1084  * stmmac_clear_descriptors - clear descriptors
1085  * @priv: driver private structure
1086  * Description: this function is called to clear the TX and RX descriptors
1087  * in case of both basic and extended descriptors are used.
1088  */
1089 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1090 {
1091 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1092 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1093 	u32 queue;
1094 
1095 	/* Clear the RX descriptors */
1096 	for (queue = 0; queue < rx_queue_cnt; queue++)
1097 		stmmac_clear_rx_descriptors(priv, queue);
1098 
1099 	/* Clear the TX descriptors */
1100 	for (queue = 0; queue < tx_queue_cnt; queue++)
1101 		stmmac_clear_tx_descriptors(priv, queue);
1102 }
1103 
1104 /**
1105  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1106  * @priv: driver private structure
1107  * @p: descriptor pointer
1108  * @i: descriptor index
1109  * @flags: gfp flag
1110  * @queue: RX queue index
1111  * Description: this function is called to allocate a receive buffer, perform
1112  * the DMA mapping and init the descriptor.
1113  */
1114 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1115 				  int i, gfp_t flags, u32 queue)
1116 {
1117 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1118 	struct sk_buff *skb;
1119 
1120 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1121 	if (!skb) {
1122 		netdev_err(priv->dev,
1123 			   "%s: Rx init fails; skb is NULL\n", __func__);
1124 		return -ENOMEM;
1125 	}
1126 	rx_q->rx_skbuff[i] = skb;
1127 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1128 						priv->dma_buf_sz,
1129 						DMA_FROM_DEVICE);
1130 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1131 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1132 		dev_kfree_skb_any(skb);
1133 		return -EINVAL;
1134 	}
1135 
1136 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1137 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1138 	else
1139 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1140 
1141 	if ((priv->hw->mode->init_desc3) &&
1142 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1143 		priv->hw->mode->init_desc3(p);
1144 
1145 	return 0;
1146 }
1147 
1148 /**
1149  * stmmac_free_rx_buffer - free RX dma buffers
1150  * @priv: private structure
1151  * @queue: RX queue index
1152  * @i: buffer index.
1153  */
1154 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1155 {
1156 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1157 
1158 	if (rx_q->rx_skbuff[i]) {
1159 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1160 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1161 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1162 	}
1163 	rx_q->rx_skbuff[i] = NULL;
1164 }
1165 
1166 /**
1167  * stmmac_free_tx_buffer - free RX dma buffers
1168  * @priv: private structure
1169  * @queue: RX queue index
1170  * @i: buffer index.
1171  */
1172 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1173 {
1174 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1175 
1176 	if (tx_q->tx_skbuff_dma[i].buf) {
1177 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1178 			dma_unmap_page(priv->device,
1179 				       tx_q->tx_skbuff_dma[i].buf,
1180 				       tx_q->tx_skbuff_dma[i].len,
1181 				       DMA_TO_DEVICE);
1182 		else
1183 			dma_unmap_single(priv->device,
1184 					 tx_q->tx_skbuff_dma[i].buf,
1185 					 tx_q->tx_skbuff_dma[i].len,
1186 					 DMA_TO_DEVICE);
1187 	}
1188 
1189 	if (tx_q->tx_skbuff[i]) {
1190 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1191 		tx_q->tx_skbuff[i] = NULL;
1192 		tx_q->tx_skbuff_dma[i].buf = 0;
1193 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1194 	}
1195 }
1196 
1197 /**
1198  * init_dma_rx_desc_rings - init the RX descriptor rings
1199  * @dev: net device structure
1200  * @flags: gfp flag.
1201  * Description: this function initializes the DMA RX descriptors
1202  * and allocates the socket buffers. It supports the chained and ring
1203  * modes.
1204  */
1205 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1206 {
1207 	struct stmmac_priv *priv = netdev_priv(dev);
1208 	u32 rx_count = priv->plat->rx_queues_to_use;
1209 	unsigned int bfsize = 0;
1210 	int ret = -ENOMEM;
1211 	int queue;
1212 	int i;
1213 
1214 	if (priv->hw->mode->set_16kib_bfsize)
1215 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1216 
1217 	if (bfsize < BUF_SIZE_16KiB)
1218 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1219 
1220 	priv->dma_buf_sz = bfsize;
1221 
1222 	/* RX INITIALIZATION */
1223 	netif_dbg(priv, probe, priv->dev,
1224 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1225 
1226 	for (queue = 0; queue < rx_count; queue++) {
1227 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1228 
1229 		netif_dbg(priv, probe, priv->dev,
1230 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1231 			  (u32)rx_q->dma_rx_phy);
1232 
1233 		for (i = 0; i < DMA_RX_SIZE; i++) {
1234 			struct dma_desc *p;
1235 
1236 			if (priv->extend_desc)
1237 				p = &((rx_q->dma_erx + i)->basic);
1238 			else
1239 				p = rx_q->dma_rx + i;
1240 
1241 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1242 						     queue);
1243 			if (ret)
1244 				goto err_init_rx_buffers;
1245 
1246 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1247 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1248 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1249 		}
1250 
1251 		rx_q->cur_rx = 0;
1252 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1253 
1254 		stmmac_clear_rx_descriptors(priv, queue);
1255 
1256 		/* Setup the chained descriptor addresses */
1257 		if (priv->mode == STMMAC_CHAIN_MODE) {
1258 			if (priv->extend_desc)
1259 				priv->hw->mode->init(rx_q->dma_erx,
1260 						     rx_q->dma_rx_phy,
1261 						     DMA_RX_SIZE, 1);
1262 			else
1263 				priv->hw->mode->init(rx_q->dma_rx,
1264 						     rx_q->dma_rx_phy,
1265 						     DMA_RX_SIZE, 0);
1266 		}
1267 	}
1268 
1269 	buf_sz = bfsize;
1270 
1271 	return 0;
1272 
1273 err_init_rx_buffers:
1274 	while (queue >= 0) {
1275 		while (--i >= 0)
1276 			stmmac_free_rx_buffer(priv, queue, i);
1277 
1278 		if (queue == 0)
1279 			break;
1280 
1281 		i = DMA_RX_SIZE;
1282 		queue--;
1283 	}
1284 
1285 	return ret;
1286 }
1287 
1288 /**
1289  * init_dma_tx_desc_rings - init the TX descriptor rings
1290  * @dev: net device structure.
1291  * Description: this function initializes the DMA TX descriptors
1292  * and allocates the socket buffers. It supports the chained and ring
1293  * modes.
1294  */
1295 static int init_dma_tx_desc_rings(struct net_device *dev)
1296 {
1297 	struct stmmac_priv *priv = netdev_priv(dev);
1298 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1299 	u32 queue;
1300 	int i;
1301 
1302 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1304 
1305 		netif_dbg(priv, probe, priv->dev,
1306 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1307 			 (u32)tx_q->dma_tx_phy);
1308 
1309 		/* Setup the chained descriptor addresses */
1310 		if (priv->mode == STMMAC_CHAIN_MODE) {
1311 			if (priv->extend_desc)
1312 				priv->hw->mode->init(tx_q->dma_etx,
1313 						     tx_q->dma_tx_phy,
1314 						     DMA_TX_SIZE, 1);
1315 			else
1316 				priv->hw->mode->init(tx_q->dma_tx,
1317 						     tx_q->dma_tx_phy,
1318 						     DMA_TX_SIZE, 0);
1319 		}
1320 
1321 		for (i = 0; i < DMA_TX_SIZE; i++) {
1322 			struct dma_desc *p;
1323 			if (priv->extend_desc)
1324 				p = &((tx_q->dma_etx + i)->basic);
1325 			else
1326 				p = tx_q->dma_tx + i;
1327 
1328 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1329 				p->des0 = 0;
1330 				p->des1 = 0;
1331 				p->des2 = 0;
1332 				p->des3 = 0;
1333 			} else {
1334 				p->des2 = 0;
1335 			}
1336 
1337 			tx_q->tx_skbuff_dma[i].buf = 0;
1338 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1339 			tx_q->tx_skbuff_dma[i].len = 0;
1340 			tx_q->tx_skbuff_dma[i].last_segment = false;
1341 			tx_q->tx_skbuff[i] = NULL;
1342 		}
1343 
1344 		tx_q->dirty_tx = 0;
1345 		tx_q->cur_tx = 0;
1346 
1347 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1348 	}
1349 
1350 	return 0;
1351 }
1352 
1353 /**
1354  * init_dma_desc_rings - init the RX/TX descriptor rings
1355  * @dev: net device structure
1356  * @flags: gfp flag.
1357  * Description: this function initializes the DMA RX/TX descriptors
1358  * and allocates the socket buffers. It supports the chained and ring
1359  * modes.
1360  */
1361 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1362 {
1363 	struct stmmac_priv *priv = netdev_priv(dev);
1364 	int ret;
1365 
1366 	ret = init_dma_rx_desc_rings(dev, flags);
1367 	if (ret)
1368 		return ret;
1369 
1370 	ret = init_dma_tx_desc_rings(dev);
1371 
1372 	stmmac_clear_descriptors(priv);
1373 
1374 	if (netif_msg_hw(priv))
1375 		stmmac_display_rings(priv);
1376 
1377 	return ret;
1378 }
1379 
1380 /**
1381  * dma_free_rx_skbufs - free RX dma buffers
1382  * @priv: private structure
1383  * @queue: RX queue index
1384  */
1385 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1386 {
1387 	int i;
1388 
1389 	for (i = 0; i < DMA_RX_SIZE; i++)
1390 		stmmac_free_rx_buffer(priv, queue, i);
1391 }
1392 
1393 /**
1394  * dma_free_tx_skbufs - free TX dma buffers
1395  * @priv: private structure
1396  * @queue: TX queue index
1397  */
1398 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1399 {
1400 	int i;
1401 
1402 	for (i = 0; i < DMA_TX_SIZE; i++)
1403 		stmmac_free_tx_buffer(priv, queue, i);
1404 }
1405 
1406 /**
1407  * free_dma_rx_desc_resources - free RX dma desc resources
1408  * @priv: private structure
1409  */
1410 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1411 {
1412 	u32 rx_count = priv->plat->rx_queues_to_use;
1413 	u32 queue;
1414 
1415 	/* Free RX queue resources */
1416 	for (queue = 0; queue < rx_count; queue++) {
1417 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1418 
1419 		/* Release the DMA RX socket buffers */
1420 		dma_free_rx_skbufs(priv, queue);
1421 
1422 		/* Free DMA regions of consistent memory previously allocated */
1423 		if (!priv->extend_desc)
1424 			dma_free_coherent(priv->device,
1425 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1426 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1427 		else
1428 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1429 					  sizeof(struct dma_extended_desc),
1430 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1431 
1432 		kfree(rx_q->rx_skbuff_dma);
1433 		kfree(rx_q->rx_skbuff);
1434 	}
1435 }
1436 
1437 /**
1438  * free_dma_tx_desc_resources - free TX dma desc resources
1439  * @priv: private structure
1440  */
1441 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1442 {
1443 	u32 tx_count = priv->plat->tx_queues_to_use;
1444 	u32 queue = 0;
1445 
1446 	/* Free TX queue resources */
1447 	for (queue = 0; queue < tx_count; queue++) {
1448 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1449 
1450 		/* Release the DMA TX socket buffers */
1451 		dma_free_tx_skbufs(priv, queue);
1452 
1453 		/* Free DMA regions of consistent memory previously allocated */
1454 		if (!priv->extend_desc)
1455 			dma_free_coherent(priv->device,
1456 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1457 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1458 		else
1459 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1460 					  sizeof(struct dma_extended_desc),
1461 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1462 
1463 		kfree(tx_q->tx_skbuff_dma);
1464 		kfree(tx_q->tx_skbuff);
1465 	}
1466 }
1467 
1468 /**
1469  * alloc_dma_rx_desc_resources - alloc RX resources.
1470  * @priv: private structure
1471  * Description: according to which descriptor can be used (extend or basic)
1472  * this function allocates the resources for TX and RX paths. In case of
1473  * reception, for example, it pre-allocated the RX socket buffer in order to
1474  * allow zero-copy mechanism.
1475  */
1476 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1477 {
1478 	u32 rx_count = priv->plat->rx_queues_to_use;
1479 	int ret = -ENOMEM;
1480 	u32 queue;
1481 
1482 	/* RX queues buffers and DMA */
1483 	for (queue = 0; queue < rx_count; queue++) {
1484 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1485 
1486 		rx_q->queue_index = queue;
1487 		rx_q->priv_data = priv;
1488 
1489 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1490 						    sizeof(dma_addr_t),
1491 						    GFP_KERNEL);
1492 		if (!rx_q->rx_skbuff_dma)
1493 			return -ENOMEM;
1494 
1495 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1496 						sizeof(struct sk_buff *),
1497 						GFP_KERNEL);
1498 		if (!rx_q->rx_skbuff)
1499 			goto err_dma;
1500 
1501 		if (priv->extend_desc) {
1502 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1503 							    DMA_RX_SIZE *
1504 							    sizeof(struct
1505 							    dma_extended_desc),
1506 							    &rx_q->dma_rx_phy,
1507 							    GFP_KERNEL);
1508 			if (!rx_q->dma_erx)
1509 				goto err_dma;
1510 
1511 		} else {
1512 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1513 							   DMA_RX_SIZE *
1514 							   sizeof(struct
1515 							   dma_desc),
1516 							   &rx_q->dma_rx_phy,
1517 							   GFP_KERNEL);
1518 			if (!rx_q->dma_rx)
1519 				goto err_dma;
1520 		}
1521 	}
1522 
1523 	return 0;
1524 
1525 err_dma:
1526 	free_dma_rx_desc_resources(priv);
1527 
1528 	return ret;
1529 }
1530 
1531 /**
1532  * alloc_dma_tx_desc_resources - alloc TX resources.
1533  * @priv: private structure
1534  * Description: according to which descriptor can be used (extend or basic)
1535  * this function allocates the resources for TX and RX paths. In case of
1536  * reception, for example, it pre-allocated the RX socket buffer in order to
1537  * allow zero-copy mechanism.
1538  */
1539 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1540 {
1541 	u32 tx_count = priv->plat->tx_queues_to_use;
1542 	int ret = -ENOMEM;
1543 	u32 queue;
1544 
1545 	/* TX queues buffers and DMA */
1546 	for (queue = 0; queue < tx_count; queue++) {
1547 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1548 
1549 		tx_q->queue_index = queue;
1550 		tx_q->priv_data = priv;
1551 
1552 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1553 						    sizeof(*tx_q->tx_skbuff_dma),
1554 						    GFP_KERNEL);
1555 		if (!tx_q->tx_skbuff_dma)
1556 			return -ENOMEM;
1557 
1558 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1559 						sizeof(struct sk_buff *),
1560 						GFP_KERNEL);
1561 		if (!tx_q->tx_skbuff)
1562 			goto err_dma_buffers;
1563 
1564 		if (priv->extend_desc) {
1565 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1566 							    DMA_TX_SIZE *
1567 							    sizeof(struct
1568 							    dma_extended_desc),
1569 							    &tx_q->dma_tx_phy,
1570 							    GFP_KERNEL);
1571 			if (!tx_q->dma_etx)
1572 				goto err_dma_buffers;
1573 		} else {
1574 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1575 							   DMA_TX_SIZE *
1576 							   sizeof(struct
1577 								  dma_desc),
1578 							   &tx_q->dma_tx_phy,
1579 							   GFP_KERNEL);
1580 			if (!tx_q->dma_tx)
1581 				goto err_dma_buffers;
1582 		}
1583 	}
1584 
1585 	return 0;
1586 
1587 err_dma_buffers:
1588 	free_dma_tx_desc_resources(priv);
1589 
1590 	return ret;
1591 }
1592 
1593 /**
1594  * alloc_dma_desc_resources - alloc TX/RX resources.
1595  * @priv: private structure
1596  * Description: according to which descriptor can be used (extend or basic)
1597  * this function allocates the resources for TX and RX paths. In case of
1598  * reception, for example, it pre-allocated the RX socket buffer in order to
1599  * allow zero-copy mechanism.
1600  */
1601 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1602 {
1603 	/* RX Allocation */
1604 	int ret = alloc_dma_rx_desc_resources(priv);
1605 
1606 	if (ret)
1607 		return ret;
1608 
1609 	ret = alloc_dma_tx_desc_resources(priv);
1610 
1611 	return ret;
1612 }
1613 
1614 /**
1615  * free_dma_desc_resources - free dma desc resources
1616  * @priv: private structure
1617  */
1618 static void free_dma_desc_resources(struct stmmac_priv *priv)
1619 {
1620 	/* Release the DMA RX socket buffers */
1621 	free_dma_rx_desc_resources(priv);
1622 
1623 	/* Release the DMA TX socket buffers */
1624 	free_dma_tx_desc_resources(priv);
1625 }
1626 
1627 /**
1628  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1629  *  @priv: driver private structure
1630  *  Description: It is used for enabling the rx queues in the MAC
1631  */
1632 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1633 {
1634 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1635 	int queue;
1636 	u8 mode;
1637 
1638 	for (queue = 0; queue < rx_queues_count; queue++) {
1639 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1640 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1641 	}
1642 }
1643 
1644 /**
1645  * stmmac_start_rx_dma - start RX DMA channel
1646  * @priv: driver private structure
1647  * @chan: RX channel index
1648  * Description:
1649  * This starts a RX DMA channel
1650  */
1651 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1652 {
1653 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1654 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1655 }
1656 
1657 /**
1658  * stmmac_start_tx_dma - start TX DMA channel
1659  * @priv: driver private structure
1660  * @chan: TX channel index
1661  * Description:
1662  * This starts a TX DMA channel
1663  */
1664 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1665 {
1666 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1667 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1668 }
1669 
1670 /**
1671  * stmmac_stop_rx_dma - stop RX DMA channel
1672  * @priv: driver private structure
1673  * @chan: RX channel index
1674  * Description:
1675  * This stops a RX DMA channel
1676  */
1677 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1678 {
1679 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1680 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1681 }
1682 
1683 /**
1684  * stmmac_stop_tx_dma - stop TX DMA channel
1685  * @priv: driver private structure
1686  * @chan: TX channel index
1687  * Description:
1688  * This stops a TX DMA channel
1689  */
1690 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1691 {
1692 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1693 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1694 }
1695 
1696 /**
1697  * stmmac_start_all_dma - start all RX and TX DMA channels
1698  * @priv: driver private structure
1699  * Description:
1700  * This starts all the RX and TX DMA channels
1701  */
1702 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1703 {
1704 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1705 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1706 	u32 chan = 0;
1707 
1708 	for (chan = 0; chan < rx_channels_count; chan++)
1709 		stmmac_start_rx_dma(priv, chan);
1710 
1711 	for (chan = 0; chan < tx_channels_count; chan++)
1712 		stmmac_start_tx_dma(priv, chan);
1713 }
1714 
1715 /**
1716  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1717  * @priv: driver private structure
1718  * Description:
1719  * This stops the RX and TX DMA channels
1720  */
1721 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1722 {
1723 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1724 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1725 	u32 chan = 0;
1726 
1727 	for (chan = 0; chan < rx_channels_count; chan++)
1728 		stmmac_stop_rx_dma(priv, chan);
1729 
1730 	for (chan = 0; chan < tx_channels_count; chan++)
1731 		stmmac_stop_tx_dma(priv, chan);
1732 }
1733 
1734 /**
1735  *  stmmac_dma_operation_mode - HW DMA operation mode
1736  *  @priv: driver private structure
1737  *  Description: it is used for configuring the DMA operation mode register in
1738  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1739  */
1740 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1741 {
1742 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1743 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1744 	int rxfifosz = priv->plat->rx_fifo_size;
1745 	u32 txmode = 0;
1746 	u32 rxmode = 0;
1747 	u32 chan = 0;
1748 
1749 	if (rxfifosz == 0)
1750 		rxfifosz = priv->dma_cap.rx_fifo_size;
1751 
1752 	if (priv->plat->force_thresh_dma_mode) {
1753 		txmode = tc;
1754 		rxmode = tc;
1755 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1756 		/*
1757 		 * In case of GMAC, SF mode can be enabled
1758 		 * to perform the TX COE in HW. This depends on:
1759 		 * 1) TX COE if actually supported
1760 		 * 2) There is no bugged Jumbo frame support
1761 		 *    that needs to not insert csum in the TDES.
1762 		 */
1763 		txmode = SF_DMA_MODE;
1764 		rxmode = SF_DMA_MODE;
1765 		priv->xstats.threshold = SF_DMA_MODE;
1766 	} else {
1767 		txmode = tc;
1768 		rxmode = SF_DMA_MODE;
1769 	}
1770 
1771 	/* configure all channels */
1772 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1773 		for (chan = 0; chan < rx_channels_count; chan++)
1774 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1775 						   rxfifosz);
1776 
1777 		for (chan = 0; chan < tx_channels_count; chan++)
1778 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1779 	} else {
1780 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1781 					rxfifosz);
1782 	}
1783 }
1784 
1785 /**
1786  * stmmac_tx_clean - to manage the transmission completion
1787  * @priv: driver private structure
1788  * @queue: TX queue index
1789  * Description: it reclaims the transmit resources after transmission completes.
1790  */
1791 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1792 {
1793 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1794 	unsigned int bytes_compl = 0, pkts_compl = 0;
1795 	unsigned int entry = tx_q->dirty_tx;
1796 
1797 	netif_tx_lock(priv->dev);
1798 
1799 	priv->xstats.tx_clean++;
1800 
1801 	while (entry != tx_q->cur_tx) {
1802 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1803 		struct dma_desc *p;
1804 		int status;
1805 
1806 		if (priv->extend_desc)
1807 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1808 		else
1809 			p = tx_q->dma_tx + entry;
1810 
1811 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1812 						      &priv->xstats, p,
1813 						      priv->ioaddr);
1814 		/* Check if the descriptor is owned by the DMA */
1815 		if (unlikely(status & tx_dma_own))
1816 			break;
1817 
1818 		/* Just consider the last segment and ...*/
1819 		if (likely(!(status & tx_not_ls))) {
1820 			/* ... verify the status error condition */
1821 			if (unlikely(status & tx_err)) {
1822 				priv->dev->stats.tx_errors++;
1823 			} else {
1824 				priv->dev->stats.tx_packets++;
1825 				priv->xstats.tx_pkt_n++;
1826 			}
1827 			stmmac_get_tx_hwtstamp(priv, p, skb);
1828 		}
1829 
1830 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1831 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1832 				dma_unmap_page(priv->device,
1833 					       tx_q->tx_skbuff_dma[entry].buf,
1834 					       tx_q->tx_skbuff_dma[entry].len,
1835 					       DMA_TO_DEVICE);
1836 			else
1837 				dma_unmap_single(priv->device,
1838 						 tx_q->tx_skbuff_dma[entry].buf,
1839 						 tx_q->tx_skbuff_dma[entry].len,
1840 						 DMA_TO_DEVICE);
1841 			tx_q->tx_skbuff_dma[entry].buf = 0;
1842 			tx_q->tx_skbuff_dma[entry].len = 0;
1843 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1844 		}
1845 
1846 		if (priv->hw->mode->clean_desc3)
1847 			priv->hw->mode->clean_desc3(tx_q, p);
1848 
1849 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1850 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1851 
1852 		if (likely(skb != NULL)) {
1853 			pkts_compl++;
1854 			bytes_compl += skb->len;
1855 			dev_consume_skb_any(skb);
1856 			tx_q->tx_skbuff[entry] = NULL;
1857 		}
1858 
1859 		priv->hw->desc->release_tx_desc(p, priv->mode);
1860 
1861 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1862 	}
1863 	tx_q->dirty_tx = entry;
1864 
1865 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1866 				  pkts_compl, bytes_compl);
1867 
1868 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1869 								queue))) &&
1870 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1871 
1872 		netif_dbg(priv, tx_done, priv->dev,
1873 			  "%s: restart transmit\n", __func__);
1874 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1875 	}
1876 
1877 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1878 		stmmac_enable_eee_mode(priv);
1879 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1880 	}
1881 	netif_tx_unlock(priv->dev);
1882 }
1883 
1884 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1885 {
1886 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1887 }
1888 
1889 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1890 {
1891 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1892 }
1893 
1894 /**
1895  * stmmac_tx_err - to manage the tx error
1896  * @priv: driver private structure
1897  * @chan: channel index
1898  * Description: it cleans the descriptors and restarts the transmission
1899  * in case of transmission errors.
1900  */
1901 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1902 {
1903 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1904 	int i;
1905 
1906 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1907 
1908 	stmmac_stop_tx_dma(priv, chan);
1909 	dma_free_tx_skbufs(priv, chan);
1910 	for (i = 0; i < DMA_TX_SIZE; i++)
1911 		if (priv->extend_desc)
1912 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1913 						     priv->mode,
1914 						     (i == DMA_TX_SIZE - 1));
1915 		else
1916 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1917 						     priv->mode,
1918 						     (i == DMA_TX_SIZE - 1));
1919 	tx_q->dirty_tx = 0;
1920 	tx_q->cur_tx = 0;
1921 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1922 	stmmac_start_tx_dma(priv, chan);
1923 
1924 	priv->dev->stats.tx_errors++;
1925 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1926 }
1927 
1928 /**
1929  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1930  *  @priv: driver private structure
1931  *  @txmode: TX operating mode
1932  *  @rxmode: RX operating mode
1933  *  @chan: channel index
1934  *  Description: it is used for configuring of the DMA operation mode in
1935  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1936  *  mode.
1937  */
1938 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1939 					  u32 rxmode, u32 chan)
1940 {
1941 	int rxfifosz = priv->plat->rx_fifo_size;
1942 
1943 	if (rxfifosz == 0)
1944 		rxfifosz = priv->dma_cap.rx_fifo_size;
1945 
1946 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1947 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1948 					   rxfifosz);
1949 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1950 	} else {
1951 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1952 					rxfifosz);
1953 	}
1954 }
1955 
1956 /**
1957  * stmmac_dma_interrupt - DMA ISR
1958  * @priv: driver private structure
1959  * Description: this is the DMA ISR. It is called by the main ISR.
1960  * It calls the dwmac dma routine and schedule poll method in case of some
1961  * work can be done.
1962  */
1963 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1964 {
1965 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1966 	int status;
1967 	u32 chan;
1968 
1969 	for (chan = 0; chan < tx_channel_count; chan++) {
1970 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1971 
1972 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1973 						      &priv->xstats, chan);
1974 		if (likely((status & handle_rx)) || (status & handle_tx)) {
1975 			if (likely(napi_schedule_prep(&rx_q->napi))) {
1976 				stmmac_disable_dma_irq(priv, chan);
1977 				__napi_schedule(&rx_q->napi);
1978 			}
1979 		}
1980 
1981 		if (unlikely(status & tx_hard_error_bump_tc)) {
1982 			/* Try to bump up the dma threshold on this failure */
1983 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1984 			    (tc <= 256)) {
1985 				tc += 64;
1986 				if (priv->plat->force_thresh_dma_mode)
1987 					stmmac_set_dma_operation_mode(priv,
1988 								      tc,
1989 								      tc,
1990 								      chan);
1991 				else
1992 					stmmac_set_dma_operation_mode(priv,
1993 								    tc,
1994 								    SF_DMA_MODE,
1995 								    chan);
1996 				priv->xstats.threshold = tc;
1997 			}
1998 		} else if (unlikely(status == tx_hard_error)) {
1999 			stmmac_tx_err(priv, chan);
2000 		}
2001 	}
2002 }
2003 
2004 /**
2005  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2006  * @priv: driver private structure
2007  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2008  */
2009 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2010 {
2011 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2012 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2013 
2014 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2015 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2016 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2017 	} else {
2018 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2019 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2020 	}
2021 
2022 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2023 
2024 	if (priv->dma_cap.rmon) {
2025 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2026 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2027 	} else
2028 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2029 }
2030 
2031 /**
2032  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2033  * @priv: driver private structure
2034  * Description: select the Enhanced/Alternate or Normal descriptors.
2035  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2036  * supported by the HW capability register.
2037  */
2038 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2039 {
2040 	if (priv->plat->enh_desc) {
2041 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2042 
2043 		/* GMAC older than 3.50 has no extended descriptors */
2044 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2045 			dev_info(priv->device, "Enabled extended descriptors\n");
2046 			priv->extend_desc = 1;
2047 		} else
2048 			dev_warn(priv->device, "Extended descriptors not supported\n");
2049 
2050 		priv->hw->desc = &enh_desc_ops;
2051 	} else {
2052 		dev_info(priv->device, "Normal descriptors\n");
2053 		priv->hw->desc = &ndesc_ops;
2054 	}
2055 }
2056 
2057 /**
2058  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2059  * @priv: driver private structure
2060  * Description:
2061  *  new GMAC chip generations have a new register to indicate the
2062  *  presence of the optional feature/functions.
2063  *  This can be also used to override the value passed through the
2064  *  platform and necessary for old MAC10/100 and GMAC chips.
2065  */
2066 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2067 {
2068 	u32 ret = 0;
2069 
2070 	if (priv->hw->dma->get_hw_feature) {
2071 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2072 					      &priv->dma_cap);
2073 		ret = 1;
2074 	}
2075 
2076 	return ret;
2077 }
2078 
2079 /**
2080  * stmmac_check_ether_addr - check if the MAC addr is valid
2081  * @priv: driver private structure
2082  * Description:
2083  * it is to verify if the MAC address is valid, in case of failures it
2084  * generates a random MAC address
2085  */
2086 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2087 {
2088 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2089 		priv->hw->mac->get_umac_addr(priv->hw,
2090 					     priv->dev->dev_addr, 0);
2091 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2092 			eth_hw_addr_random(priv->dev);
2093 		netdev_info(priv->dev, "device MAC address %pM\n",
2094 			    priv->dev->dev_addr);
2095 	}
2096 }
2097 
2098 /**
2099  * stmmac_init_dma_engine - DMA init.
2100  * @priv: driver private structure
2101  * Description:
2102  * It inits the DMA invoking the specific MAC/GMAC callback.
2103  * Some DMA parameters can be passed from the platform;
2104  * in case of these are not passed a default is kept for the MAC or GMAC.
2105  */
2106 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2107 {
2108 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2109 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2110 	struct stmmac_rx_queue *rx_q;
2111 	struct stmmac_tx_queue *tx_q;
2112 	u32 dummy_dma_rx_phy = 0;
2113 	u32 dummy_dma_tx_phy = 0;
2114 	u32 chan = 0;
2115 	int atds = 0;
2116 	int ret = 0;
2117 
2118 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2119 		dev_err(priv->device, "Invalid DMA configuration\n");
2120 		return -EINVAL;
2121 	}
2122 
2123 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2124 		atds = 1;
2125 
2126 	ret = priv->hw->dma->reset(priv->ioaddr);
2127 	if (ret) {
2128 		dev_err(priv->device, "Failed to reset the dma\n");
2129 		return ret;
2130 	}
2131 
2132 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2133 		/* DMA Configuration */
2134 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2135 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2136 
2137 		/* DMA RX Channel Configuration */
2138 		for (chan = 0; chan < rx_channels_count; chan++) {
2139 			rx_q = &priv->rx_queue[chan];
2140 
2141 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2142 						    priv->plat->dma_cfg,
2143 						    rx_q->dma_rx_phy, chan);
2144 
2145 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2146 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2147 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2148 						       rx_q->rx_tail_addr,
2149 						       chan);
2150 		}
2151 
2152 		/* DMA TX Channel Configuration */
2153 		for (chan = 0; chan < tx_channels_count; chan++) {
2154 			tx_q = &priv->tx_queue[chan];
2155 
2156 			priv->hw->dma->init_chan(priv->ioaddr,
2157 						 priv->plat->dma_cfg,
2158 						 chan);
2159 
2160 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2161 						    priv->plat->dma_cfg,
2162 						    tx_q->dma_tx_phy, chan);
2163 
2164 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2165 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2166 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2167 						       tx_q->tx_tail_addr,
2168 						       chan);
2169 		}
2170 	} else {
2171 		rx_q = &priv->rx_queue[chan];
2172 		tx_q = &priv->tx_queue[chan];
2173 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2174 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2175 	}
2176 
2177 	if (priv->plat->axi && priv->hw->dma->axi)
2178 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2179 
2180 	return ret;
2181 }
2182 
2183 /**
2184  * stmmac_tx_timer - mitigation sw timer for tx.
2185  * @data: data pointer
2186  * Description:
2187  * This is the timer handler to directly invoke the stmmac_tx_clean.
2188  */
2189 static void stmmac_tx_timer(unsigned long data)
2190 {
2191 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
2192 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2193 	u32 queue;
2194 
2195 	/* let's scan all the tx queues */
2196 	for (queue = 0; queue < tx_queues_count; queue++)
2197 		stmmac_tx_clean(priv, queue);
2198 }
2199 
2200 /**
2201  * stmmac_init_tx_coalesce - init tx mitigation options.
2202  * @priv: driver private structure
2203  * Description:
2204  * This inits the transmit coalesce parameters: i.e. timer rate,
2205  * timer handler and default threshold used for enabling the
2206  * interrupt on completion bit.
2207  */
2208 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2209 {
2210 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2211 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2212 	init_timer(&priv->txtimer);
2213 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2214 	priv->txtimer.data = (unsigned long)priv;
2215 	priv->txtimer.function = stmmac_tx_timer;
2216 	add_timer(&priv->txtimer);
2217 }
2218 
2219 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2220 {
2221 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2222 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2223 	u32 chan;
2224 
2225 	/* set TX ring length */
2226 	if (priv->hw->dma->set_tx_ring_len) {
2227 		for (chan = 0; chan < tx_channels_count; chan++)
2228 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2229 						       (DMA_TX_SIZE - 1), chan);
2230 	}
2231 
2232 	/* set RX ring length */
2233 	if (priv->hw->dma->set_rx_ring_len) {
2234 		for (chan = 0; chan < rx_channels_count; chan++)
2235 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2236 						       (DMA_RX_SIZE - 1), chan);
2237 	}
2238 }
2239 
2240 /**
2241  *  stmmac_set_tx_queue_weight - Set TX queue weight
2242  *  @priv: driver private structure
2243  *  Description: It is used for setting TX queues weight
2244  */
2245 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2246 {
2247 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2248 	u32 weight;
2249 	u32 queue;
2250 
2251 	for (queue = 0; queue < tx_queues_count; queue++) {
2252 		weight = priv->plat->tx_queues_cfg[queue].weight;
2253 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2254 	}
2255 }
2256 
2257 /**
2258  *  stmmac_configure_cbs - Configure CBS in TX queue
2259  *  @priv: driver private structure
2260  *  Description: It is used for configuring CBS in AVB TX queues
2261  */
2262 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2263 {
2264 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2265 	u32 mode_to_use;
2266 	u32 queue;
2267 
2268 	/* queue 0 is reserved for legacy traffic */
2269 	for (queue = 1; queue < tx_queues_count; queue++) {
2270 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2271 		if (mode_to_use == MTL_QUEUE_DCB)
2272 			continue;
2273 
2274 		priv->hw->mac->config_cbs(priv->hw,
2275 				priv->plat->tx_queues_cfg[queue].send_slope,
2276 				priv->plat->tx_queues_cfg[queue].idle_slope,
2277 				priv->plat->tx_queues_cfg[queue].high_credit,
2278 				priv->plat->tx_queues_cfg[queue].low_credit,
2279 				queue);
2280 	}
2281 }
2282 
2283 /**
2284  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2285  *  @priv: driver private structure
2286  *  Description: It is used for mapping RX queues to RX dma channels
2287  */
2288 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2289 {
2290 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2291 	u32 queue;
2292 	u32 chan;
2293 
2294 	for (queue = 0; queue < rx_queues_count; queue++) {
2295 		chan = priv->plat->rx_queues_cfg[queue].chan;
2296 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2297 	}
2298 }
2299 
2300 /**
2301  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2302  *  @priv: driver private structure
2303  *  Description: It is used for configuring the RX Queue Priority
2304  */
2305 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2306 {
2307 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2308 	u32 queue;
2309 	u32 prio;
2310 
2311 	for (queue = 0; queue < rx_queues_count; queue++) {
2312 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2313 			continue;
2314 
2315 		prio = priv->plat->rx_queues_cfg[queue].prio;
2316 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2317 	}
2318 }
2319 
2320 /**
2321  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2322  *  @priv: driver private structure
2323  *  Description: It is used for configuring the TX Queue Priority
2324  */
2325 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2326 {
2327 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2328 	u32 queue;
2329 	u32 prio;
2330 
2331 	for (queue = 0; queue < tx_queues_count; queue++) {
2332 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2333 			continue;
2334 
2335 		prio = priv->plat->tx_queues_cfg[queue].prio;
2336 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2337 	}
2338 }
2339 
2340 /**
2341  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2342  *  @priv: driver private structure
2343  *  Description: It is used for configuring the RX queue routing
2344  */
2345 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2348 	u32 queue;
2349 	u8 packet;
2350 
2351 	for (queue = 0; queue < rx_queues_count; queue++) {
2352 		/* no specific packet type routing specified for the queue */
2353 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2354 			continue;
2355 
2356 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2357 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2358 	}
2359 }
2360 
2361 /**
2362  *  stmmac_mtl_configuration - Configure MTL
2363  *  @priv: driver private structure
2364  *  Description: It is used for configurring MTL
2365  */
2366 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2367 {
2368 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2369 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2370 
2371 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2372 		stmmac_set_tx_queue_weight(priv);
2373 
2374 	/* Configure MTL RX algorithms */
2375 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2376 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2377 						priv->plat->rx_sched_algorithm);
2378 
2379 	/* Configure MTL TX algorithms */
2380 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2381 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2382 						priv->plat->tx_sched_algorithm);
2383 
2384 	/* Configure CBS in AVB TX queues */
2385 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2386 		stmmac_configure_cbs(priv);
2387 
2388 	/* Map RX MTL to DMA channels */
2389 	if (priv->hw->mac->map_mtl_to_dma)
2390 		stmmac_rx_queue_dma_chan_map(priv);
2391 
2392 	/* Enable MAC RX Queues */
2393 	if (priv->hw->mac->rx_queue_enable)
2394 		stmmac_mac_enable_rx_queues(priv);
2395 
2396 	/* Set RX priorities */
2397 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2398 		stmmac_mac_config_rx_queues_prio(priv);
2399 
2400 	/* Set TX priorities */
2401 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2402 		stmmac_mac_config_tx_queues_prio(priv);
2403 
2404 	/* Set RX routing */
2405 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2406 		stmmac_mac_config_rx_queues_routing(priv);
2407 }
2408 
2409 /**
2410  * stmmac_hw_setup - setup mac in a usable state.
2411  *  @dev : pointer to the device structure.
2412  *  Description:
2413  *  this is the main function to setup the HW in a usable state because the
2414  *  dma engine is reset, the core registers are configured (e.g. AXI,
2415  *  Checksum features, timers). The DMA is ready to start receiving and
2416  *  transmitting.
2417  *  Return value:
2418  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2419  *  file on failure.
2420  */
2421 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2422 {
2423 	struct stmmac_priv *priv = netdev_priv(dev);
2424 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2425 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2426 	u32 chan;
2427 	int ret;
2428 
2429 	/* DMA initialization and SW reset */
2430 	ret = stmmac_init_dma_engine(priv);
2431 	if (ret < 0) {
2432 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2433 			   __func__);
2434 		return ret;
2435 	}
2436 
2437 	/* Copy the MAC addr into the HW  */
2438 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2439 
2440 	/* PS and related bits will be programmed according to the speed */
2441 	if (priv->hw->pcs) {
2442 		int speed = priv->plat->mac_port_sel_speed;
2443 
2444 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2445 		    (speed == SPEED_1000)) {
2446 			priv->hw->ps = speed;
2447 		} else {
2448 			dev_warn(priv->device, "invalid port speed\n");
2449 			priv->hw->ps = 0;
2450 		}
2451 	}
2452 
2453 	/* Initialize the MAC Core */
2454 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2455 
2456 	/* Initialize MTL*/
2457 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2458 		stmmac_mtl_configuration(priv);
2459 
2460 	ret = priv->hw->mac->rx_ipc(priv->hw);
2461 	if (!ret) {
2462 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2463 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2464 		priv->hw->rx_csum = 0;
2465 	}
2466 
2467 	/* Enable the MAC Rx/Tx */
2468 	priv->hw->mac->set_mac(priv->ioaddr, true);
2469 
2470 	/* Set the HW DMA mode and the COE */
2471 	stmmac_dma_operation_mode(priv);
2472 
2473 	stmmac_mmc_setup(priv);
2474 
2475 	if (init_ptp) {
2476 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2477 		if (ret < 0)
2478 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2479 
2480 		ret = stmmac_init_ptp(priv);
2481 		if (ret == -EOPNOTSUPP)
2482 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2483 		else if (ret)
2484 			netdev_warn(priv->dev, "PTP init failed\n");
2485 	}
2486 
2487 #ifdef CONFIG_DEBUG_FS
2488 	ret = stmmac_init_fs(dev);
2489 	if (ret < 0)
2490 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2491 			    __func__);
2492 #endif
2493 	/* Start the ball rolling... */
2494 	stmmac_start_all_dma(priv);
2495 
2496 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2497 
2498 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2499 		priv->rx_riwt = MAX_DMA_RIWT;
2500 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2501 	}
2502 
2503 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2504 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2505 
2506 	/* set TX and RX rings length */
2507 	stmmac_set_rings_length(priv);
2508 
2509 	/* Enable TSO */
2510 	if (priv->tso) {
2511 		for (chan = 0; chan < tx_cnt; chan++)
2512 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 static void stmmac_hw_teardown(struct net_device *dev)
2519 {
2520 	struct stmmac_priv *priv = netdev_priv(dev);
2521 
2522 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2523 }
2524 
2525 /**
2526  *  stmmac_open - open entry point of the driver
2527  *  @dev : pointer to the device structure.
2528  *  Description:
2529  *  This function is the open entry point of the driver.
2530  *  Return value:
2531  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2532  *  file on failure.
2533  */
2534 static int stmmac_open(struct net_device *dev)
2535 {
2536 	struct stmmac_priv *priv = netdev_priv(dev);
2537 	int ret;
2538 
2539 	stmmac_check_ether_addr(priv);
2540 
2541 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2542 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2543 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2544 		ret = stmmac_init_phy(dev);
2545 		if (ret) {
2546 			netdev_err(priv->dev,
2547 				   "%s: Cannot attach to PHY (error: %d)\n",
2548 				   __func__, ret);
2549 			return ret;
2550 		}
2551 	}
2552 
2553 	/* Extra statistics */
2554 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2555 	priv->xstats.threshold = tc;
2556 
2557 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2558 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2559 
2560 	ret = alloc_dma_desc_resources(priv);
2561 	if (ret < 0) {
2562 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2563 			   __func__);
2564 		goto dma_desc_error;
2565 	}
2566 
2567 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2568 	if (ret < 0) {
2569 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2570 			   __func__);
2571 		goto init_error;
2572 	}
2573 
2574 	ret = stmmac_hw_setup(dev, true);
2575 	if (ret < 0) {
2576 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2577 		goto init_error;
2578 	}
2579 
2580 	stmmac_init_tx_coalesce(priv);
2581 
2582 	if (dev->phydev)
2583 		phy_start(dev->phydev);
2584 
2585 	/* Request the IRQ lines */
2586 	ret = request_irq(dev->irq, stmmac_interrupt,
2587 			  IRQF_SHARED, dev->name, dev);
2588 	if (unlikely(ret < 0)) {
2589 		netdev_err(priv->dev,
2590 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2591 			   __func__, dev->irq, ret);
2592 		goto irq_error;
2593 	}
2594 
2595 	/* Request the Wake IRQ in case of another line is used for WoL */
2596 	if (priv->wol_irq != dev->irq) {
2597 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2598 				  IRQF_SHARED, dev->name, dev);
2599 		if (unlikely(ret < 0)) {
2600 			netdev_err(priv->dev,
2601 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2602 				   __func__, priv->wol_irq, ret);
2603 			goto wolirq_error;
2604 		}
2605 	}
2606 
2607 	/* Request the IRQ lines */
2608 	if (priv->lpi_irq > 0) {
2609 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2610 				  dev->name, dev);
2611 		if (unlikely(ret < 0)) {
2612 			netdev_err(priv->dev,
2613 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2614 				   __func__, priv->lpi_irq, ret);
2615 			goto lpiirq_error;
2616 		}
2617 	}
2618 
2619 	stmmac_enable_all_queues(priv);
2620 	stmmac_start_all_queues(priv);
2621 
2622 	return 0;
2623 
2624 lpiirq_error:
2625 	if (priv->wol_irq != dev->irq)
2626 		free_irq(priv->wol_irq, dev);
2627 wolirq_error:
2628 	free_irq(dev->irq, dev);
2629 irq_error:
2630 	if (dev->phydev)
2631 		phy_stop(dev->phydev);
2632 
2633 	del_timer_sync(&priv->txtimer);
2634 	stmmac_hw_teardown(dev);
2635 init_error:
2636 	free_dma_desc_resources(priv);
2637 dma_desc_error:
2638 	if (dev->phydev)
2639 		phy_disconnect(dev->phydev);
2640 
2641 	return ret;
2642 }
2643 
2644 /**
2645  *  stmmac_release - close entry point of the driver
2646  *  @dev : device pointer.
2647  *  Description:
2648  *  This is the stop entry point of the driver.
2649  */
2650 static int stmmac_release(struct net_device *dev)
2651 {
2652 	struct stmmac_priv *priv = netdev_priv(dev);
2653 
2654 	if (priv->eee_enabled)
2655 		del_timer_sync(&priv->eee_ctrl_timer);
2656 
2657 	/* Stop and disconnect the PHY */
2658 	if (dev->phydev) {
2659 		phy_stop(dev->phydev);
2660 		phy_disconnect(dev->phydev);
2661 	}
2662 
2663 	stmmac_stop_all_queues(priv);
2664 
2665 	stmmac_disable_all_queues(priv);
2666 
2667 	del_timer_sync(&priv->txtimer);
2668 
2669 	/* Free the IRQ lines */
2670 	free_irq(dev->irq, dev);
2671 	if (priv->wol_irq != dev->irq)
2672 		free_irq(priv->wol_irq, dev);
2673 	if (priv->lpi_irq > 0)
2674 		free_irq(priv->lpi_irq, dev);
2675 
2676 	/* Stop TX/RX DMA and clear the descriptors */
2677 	stmmac_stop_all_dma(priv);
2678 
2679 	/* Release and free the Rx/Tx resources */
2680 	free_dma_desc_resources(priv);
2681 
2682 	/* Disable the MAC Rx/Tx */
2683 	priv->hw->mac->set_mac(priv->ioaddr, false);
2684 
2685 	netif_carrier_off(dev);
2686 
2687 #ifdef CONFIG_DEBUG_FS
2688 	stmmac_exit_fs(dev);
2689 #endif
2690 
2691 	stmmac_release_ptp(priv);
2692 
2693 	return 0;
2694 }
2695 
2696 /**
2697  *  stmmac_tso_allocator - close entry point of the driver
2698  *  @priv: driver private structure
2699  *  @des: buffer start address
2700  *  @total_len: total length to fill in descriptors
2701  *  @last_segmant: condition for the last descriptor
2702  *  @queue: TX queue index
2703  *  Description:
2704  *  This function fills descriptor and request new descriptors according to
2705  *  buffer length to fill
2706  */
2707 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2708 				 int total_len, bool last_segment, u32 queue)
2709 {
2710 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2711 	struct dma_desc *desc;
2712 	u32 buff_size;
2713 	int tmp_len;
2714 
2715 	tmp_len = total_len;
2716 
2717 	while (tmp_len > 0) {
2718 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2719 		desc = tx_q->dma_tx + tx_q->cur_tx;
2720 
2721 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2722 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2723 			    TSO_MAX_BUFF_SIZE : tmp_len;
2724 
2725 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2726 			0, 1,
2727 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2728 			0, 0);
2729 
2730 		tmp_len -= TSO_MAX_BUFF_SIZE;
2731 	}
2732 }
2733 
2734 /**
2735  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2736  *  @skb : the socket buffer
2737  *  @dev : device pointer
2738  *  Description: this is the transmit function that is called on TSO frames
2739  *  (support available on GMAC4 and newer chips).
2740  *  Diagram below show the ring programming in case of TSO frames:
2741  *
2742  *  First Descriptor
2743  *   --------
2744  *   | DES0 |---> buffer1 = L2/L3/L4 header
2745  *   | DES1 |---> TCP Payload (can continue on next descr...)
2746  *   | DES2 |---> buffer 1 and 2 len
2747  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2748  *   --------
2749  *	|
2750  *     ...
2751  *	|
2752  *   --------
2753  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2754  *   | DES1 | --|
2755  *   | DES2 | --> buffer 1 and 2 len
2756  *   | DES3 |
2757  *   --------
2758  *
2759  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2760  */
2761 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2762 {
2763 	struct dma_desc *desc, *first, *mss_desc = NULL;
2764 	struct stmmac_priv *priv = netdev_priv(dev);
2765 	int nfrags = skb_shinfo(skb)->nr_frags;
2766 	u32 queue = skb_get_queue_mapping(skb);
2767 	unsigned int first_entry, des;
2768 	struct stmmac_tx_queue *tx_q;
2769 	int tmp_pay_len = 0;
2770 	u32 pay_len, mss;
2771 	u8 proto_hdr_len;
2772 	int i;
2773 
2774 	tx_q = &priv->tx_queue[queue];
2775 
2776 	/* Compute header lengths */
2777 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2778 
2779 	/* Desc availability based on threshold should be enough safe */
2780 	if (unlikely(stmmac_tx_avail(priv, queue) <
2781 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2782 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2783 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2784 								queue));
2785 			/* This is a hard error, log it. */
2786 			netdev_err(priv->dev,
2787 				   "%s: Tx Ring full when queue awake\n",
2788 				   __func__);
2789 		}
2790 		return NETDEV_TX_BUSY;
2791 	}
2792 
2793 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2794 
2795 	mss = skb_shinfo(skb)->gso_size;
2796 
2797 	/* set new MSS value if needed */
2798 	if (mss != priv->mss) {
2799 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2800 		priv->hw->desc->set_mss(mss_desc, mss);
2801 		priv->mss = mss;
2802 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2803 	}
2804 
2805 	if (netif_msg_tx_queued(priv)) {
2806 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2807 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2808 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2809 			skb->data_len);
2810 	}
2811 
2812 	first_entry = tx_q->cur_tx;
2813 
2814 	desc = tx_q->dma_tx + first_entry;
2815 	first = desc;
2816 
2817 	/* first descriptor: fill Headers on Buf1 */
2818 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2819 			     DMA_TO_DEVICE);
2820 	if (dma_mapping_error(priv->device, des))
2821 		goto dma_map_err;
2822 
2823 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2824 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2825 	tx_q->tx_skbuff[first_entry] = skb;
2826 
2827 	first->des0 = cpu_to_le32(des);
2828 
2829 	/* Fill start of payload in buff2 of first descriptor */
2830 	if (pay_len)
2831 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2832 
2833 	/* If needed take extra descriptors to fill the remaining payload */
2834 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2835 
2836 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2837 
2838 	/* Prepare fragments */
2839 	for (i = 0; i < nfrags; i++) {
2840 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2841 
2842 		des = skb_frag_dma_map(priv->device, frag, 0,
2843 				       skb_frag_size(frag),
2844 				       DMA_TO_DEVICE);
2845 		if (dma_mapping_error(priv->device, des))
2846 			goto dma_map_err;
2847 
2848 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2849 				     (i == nfrags - 1), queue);
2850 
2851 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2852 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2853 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2854 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2855 	}
2856 
2857 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2858 
2859 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2860 
2861 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2862 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2863 			  __func__);
2864 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2865 	}
2866 
2867 	dev->stats.tx_bytes += skb->len;
2868 	priv->xstats.tx_tso_frames++;
2869 	priv->xstats.tx_tso_nfrags += nfrags;
2870 
2871 	/* Manage tx mitigation */
2872 	priv->tx_count_frames += nfrags + 1;
2873 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2874 		mod_timer(&priv->txtimer,
2875 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2876 	} else {
2877 		priv->tx_count_frames = 0;
2878 		priv->hw->desc->set_tx_ic(desc);
2879 		priv->xstats.tx_set_ic_bit++;
2880 	}
2881 
2882 	if (!priv->hwts_tx_en)
2883 		skb_tx_timestamp(skb);
2884 
2885 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2886 		     priv->hwts_tx_en)) {
2887 		/* declare that device is doing timestamping */
2888 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2889 		priv->hw->desc->enable_tx_timestamp(first);
2890 	}
2891 
2892 	/* Complete the first descriptor before granting the DMA */
2893 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2894 			proto_hdr_len,
2895 			pay_len,
2896 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2897 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2898 
2899 	/* If context desc is used to change MSS */
2900 	if (mss_desc)
2901 		priv->hw->desc->set_tx_owner(mss_desc);
2902 
2903 	/* The own bit must be the latest setting done when prepare the
2904 	 * descriptor and then barrier is needed to make sure that
2905 	 * all is coherent before granting the DMA engine.
2906 	 */
2907 	dma_wmb();
2908 
2909 	if (netif_msg_pktdata(priv)) {
2910 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2911 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2912 			tx_q->cur_tx, first, nfrags);
2913 
2914 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2915 					     0);
2916 
2917 		pr_info(">>> frame to be transmitted: ");
2918 		print_pkt(skb->data, skb_headlen(skb));
2919 	}
2920 
2921 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2922 
2923 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2924 				       queue);
2925 
2926 	return NETDEV_TX_OK;
2927 
2928 dma_map_err:
2929 	dev_err(priv->device, "Tx dma map failed\n");
2930 	dev_kfree_skb(skb);
2931 	priv->dev->stats.tx_dropped++;
2932 	return NETDEV_TX_OK;
2933 }
2934 
2935 /**
2936  *  stmmac_xmit - Tx entry point of the driver
2937  *  @skb : the socket buffer
2938  *  @dev : device pointer
2939  *  Description : this is the tx entry point of the driver.
2940  *  It programs the chain or the ring and supports oversized frames
2941  *  and SG feature.
2942  */
2943 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2944 {
2945 	struct stmmac_priv *priv = netdev_priv(dev);
2946 	unsigned int nopaged_len = skb_headlen(skb);
2947 	int i, csum_insertion = 0, is_jumbo = 0;
2948 	u32 queue = skb_get_queue_mapping(skb);
2949 	int nfrags = skb_shinfo(skb)->nr_frags;
2950 	int entry;
2951 	unsigned int first_entry;
2952 	struct dma_desc *desc, *first;
2953 	struct stmmac_tx_queue *tx_q;
2954 	unsigned int enh_desc;
2955 	unsigned int des;
2956 
2957 	tx_q = &priv->tx_queue[queue];
2958 
2959 	/* Manage oversized TCP frames for GMAC4 device */
2960 	if (skb_is_gso(skb) && priv->tso) {
2961 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2962 			return stmmac_tso_xmit(skb, dev);
2963 	}
2964 
2965 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2966 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2967 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2968 								queue));
2969 			/* This is a hard error, log it. */
2970 			netdev_err(priv->dev,
2971 				   "%s: Tx Ring full when queue awake\n",
2972 				   __func__);
2973 		}
2974 		return NETDEV_TX_BUSY;
2975 	}
2976 
2977 	if (priv->tx_path_in_lpi_mode)
2978 		stmmac_disable_eee_mode(priv);
2979 
2980 	entry = tx_q->cur_tx;
2981 	first_entry = entry;
2982 
2983 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2984 
2985 	if (likely(priv->extend_desc))
2986 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2987 	else
2988 		desc = tx_q->dma_tx + entry;
2989 
2990 	first = desc;
2991 
2992 	tx_q->tx_skbuff[first_entry] = skb;
2993 
2994 	enh_desc = priv->plat->enh_desc;
2995 	/* To program the descriptors according to the size of the frame */
2996 	if (enh_desc)
2997 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2998 
2999 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3000 					 DWMAC_CORE_4_00)) {
3001 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3002 		if (unlikely(entry < 0))
3003 			goto dma_map_err;
3004 	}
3005 
3006 	for (i = 0; i < nfrags; i++) {
3007 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3008 		int len = skb_frag_size(frag);
3009 		bool last_segment = (i == (nfrags - 1));
3010 
3011 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3012 
3013 		if (likely(priv->extend_desc))
3014 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3015 		else
3016 			desc = tx_q->dma_tx + entry;
3017 
3018 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3019 				       DMA_TO_DEVICE);
3020 		if (dma_mapping_error(priv->device, des))
3021 			goto dma_map_err; /* should reuse desc w/o issues */
3022 
3023 		tx_q->tx_skbuff[entry] = NULL;
3024 
3025 		tx_q->tx_skbuff_dma[entry].buf = des;
3026 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3027 			desc->des0 = cpu_to_le32(des);
3028 		else
3029 			desc->des2 = cpu_to_le32(des);
3030 
3031 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3032 		tx_q->tx_skbuff_dma[entry].len = len;
3033 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3034 
3035 		/* Prepare the descriptor and set the own bit too */
3036 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3037 						priv->mode, 1, last_segment,
3038 						skb->len);
3039 	}
3040 
3041 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3042 
3043 	tx_q->cur_tx = entry;
3044 
3045 	if (netif_msg_pktdata(priv)) {
3046 		void *tx_head;
3047 
3048 		netdev_dbg(priv->dev,
3049 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3050 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3051 			   entry, first, nfrags);
3052 
3053 		if (priv->extend_desc)
3054 			tx_head = (void *)tx_q->dma_etx;
3055 		else
3056 			tx_head = (void *)tx_q->dma_tx;
3057 
3058 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3059 
3060 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3061 		print_pkt(skb->data, skb->len);
3062 	}
3063 
3064 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3065 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3066 			  __func__);
3067 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3068 	}
3069 
3070 	dev->stats.tx_bytes += skb->len;
3071 
3072 	/* According to the coalesce parameter the IC bit for the latest
3073 	 * segment is reset and the timer re-started to clean the tx status.
3074 	 * This approach takes care about the fragments: desc is the first
3075 	 * element in case of no SG.
3076 	 */
3077 	priv->tx_count_frames += nfrags + 1;
3078 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3079 		mod_timer(&priv->txtimer,
3080 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3081 	} else {
3082 		priv->tx_count_frames = 0;
3083 		priv->hw->desc->set_tx_ic(desc);
3084 		priv->xstats.tx_set_ic_bit++;
3085 	}
3086 
3087 	if (!priv->hwts_tx_en)
3088 		skb_tx_timestamp(skb);
3089 
3090 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3091 	 * problems because all the descriptors are actually ready to be
3092 	 * passed to the DMA engine.
3093 	 */
3094 	if (likely(!is_jumbo)) {
3095 		bool last_segment = (nfrags == 0);
3096 
3097 		des = dma_map_single(priv->device, skb->data,
3098 				     nopaged_len, DMA_TO_DEVICE);
3099 		if (dma_mapping_error(priv->device, des))
3100 			goto dma_map_err;
3101 
3102 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3103 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3104 			first->des0 = cpu_to_le32(des);
3105 		else
3106 			first->des2 = cpu_to_le32(des);
3107 
3108 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3109 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3110 
3111 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3112 			     priv->hwts_tx_en)) {
3113 			/* declare that device is doing timestamping */
3114 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3115 			priv->hw->desc->enable_tx_timestamp(first);
3116 		}
3117 
3118 		/* Prepare the first descriptor setting the OWN bit too */
3119 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3120 						csum_insertion, priv->mode, 1,
3121 						last_segment, skb->len);
3122 
3123 		/* The own bit must be the latest setting done when prepare the
3124 		 * descriptor and then barrier is needed to make sure that
3125 		 * all is coherent before granting the DMA engine.
3126 		 */
3127 		dma_wmb();
3128 	}
3129 
3130 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3131 
3132 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3133 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3134 	else
3135 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3136 					       queue);
3137 
3138 	return NETDEV_TX_OK;
3139 
3140 dma_map_err:
3141 	netdev_err(priv->dev, "Tx DMA map failed\n");
3142 	dev_kfree_skb(skb);
3143 	priv->dev->stats.tx_dropped++;
3144 	return NETDEV_TX_OK;
3145 }
3146 
3147 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3148 {
3149 	struct ethhdr *ehdr;
3150 	u16 vlanid;
3151 
3152 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3153 	    NETIF_F_HW_VLAN_CTAG_RX &&
3154 	    !__vlan_get_tag(skb, &vlanid)) {
3155 		/* pop the vlan tag */
3156 		ehdr = (struct ethhdr *)skb->data;
3157 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3158 		skb_pull(skb, VLAN_HLEN);
3159 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3160 	}
3161 }
3162 
3163 
3164 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3165 {
3166 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3167 		return 0;
3168 
3169 	return 1;
3170 }
3171 
3172 /**
3173  * stmmac_rx_refill - refill used skb preallocated buffers
3174  * @priv: driver private structure
3175  * @queue: RX queue index
3176  * Description : this is to reallocate the skb for the reception process
3177  * that is based on zero-copy.
3178  */
3179 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3180 {
3181 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3182 	int dirty = stmmac_rx_dirty(priv, queue);
3183 	unsigned int entry = rx_q->dirty_rx;
3184 
3185 	int bfsize = priv->dma_buf_sz;
3186 
3187 	while (dirty-- > 0) {
3188 		struct dma_desc *p;
3189 
3190 		if (priv->extend_desc)
3191 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3192 		else
3193 			p = rx_q->dma_rx + entry;
3194 
3195 		if (likely(!rx_q->rx_skbuff[entry])) {
3196 			struct sk_buff *skb;
3197 
3198 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3199 			if (unlikely(!skb)) {
3200 				/* so for a while no zero-copy! */
3201 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3202 				if (unlikely(net_ratelimit()))
3203 					dev_err(priv->device,
3204 						"fail to alloc skb entry %d\n",
3205 						entry);
3206 				break;
3207 			}
3208 
3209 			rx_q->rx_skbuff[entry] = skb;
3210 			rx_q->rx_skbuff_dma[entry] =
3211 			    dma_map_single(priv->device, skb->data, bfsize,
3212 					   DMA_FROM_DEVICE);
3213 			if (dma_mapping_error(priv->device,
3214 					      rx_q->rx_skbuff_dma[entry])) {
3215 				netdev_err(priv->dev, "Rx DMA map failed\n");
3216 				dev_kfree_skb(skb);
3217 				break;
3218 			}
3219 
3220 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3221 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3222 				p->des1 = 0;
3223 			} else {
3224 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3225 			}
3226 			if (priv->hw->mode->refill_desc3)
3227 				priv->hw->mode->refill_desc3(rx_q, p);
3228 
3229 			if (rx_q->rx_zeroc_thresh > 0)
3230 				rx_q->rx_zeroc_thresh--;
3231 
3232 			netif_dbg(priv, rx_status, priv->dev,
3233 				  "refill entry #%d\n", entry);
3234 		}
3235 		dma_wmb();
3236 
3237 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3238 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3239 		else
3240 			priv->hw->desc->set_rx_owner(p);
3241 
3242 		dma_wmb();
3243 
3244 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3245 	}
3246 	rx_q->dirty_rx = entry;
3247 }
3248 
3249 /**
3250  * stmmac_rx - manage the receive process
3251  * @priv: driver private structure
3252  * @limit: napi bugget
3253  * @queue: RX queue index.
3254  * Description :  this the function called by the napi poll method.
3255  * It gets all the frames inside the ring.
3256  */
3257 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3258 {
3259 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3260 	unsigned int entry = rx_q->cur_rx;
3261 	int coe = priv->hw->rx_csum;
3262 	unsigned int next_entry;
3263 	unsigned int count = 0;
3264 
3265 	if (netif_msg_rx_status(priv)) {
3266 		void *rx_head;
3267 
3268 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3269 		if (priv->extend_desc)
3270 			rx_head = (void *)rx_q->dma_erx;
3271 		else
3272 			rx_head = (void *)rx_q->dma_rx;
3273 
3274 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3275 	}
3276 	while (count < limit) {
3277 		int status;
3278 		struct dma_desc *p;
3279 		struct dma_desc *np;
3280 
3281 		if (priv->extend_desc)
3282 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3283 		else
3284 			p = rx_q->dma_rx + entry;
3285 
3286 		/* read the status of the incoming frame */
3287 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3288 						   &priv->xstats, p);
3289 		/* check if managed by the DMA otherwise go ahead */
3290 		if (unlikely(status & dma_own))
3291 			break;
3292 
3293 		count++;
3294 
3295 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3296 		next_entry = rx_q->cur_rx;
3297 
3298 		if (priv->extend_desc)
3299 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3300 		else
3301 			np = rx_q->dma_rx + next_entry;
3302 
3303 		prefetch(np);
3304 
3305 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3306 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3307 							   &priv->xstats,
3308 							   rx_q->dma_erx +
3309 							   entry);
3310 		if (unlikely(status == discard_frame)) {
3311 			priv->dev->stats.rx_errors++;
3312 			if (priv->hwts_rx_en && !priv->extend_desc) {
3313 				/* DESC2 & DESC3 will be overwritten by device
3314 				 * with timestamp value, hence reinitialize
3315 				 * them in stmmac_rx_refill() function so that
3316 				 * device can reuse it.
3317 				 */
3318 				rx_q->rx_skbuff[entry] = NULL;
3319 				dma_unmap_single(priv->device,
3320 						 rx_q->rx_skbuff_dma[entry],
3321 						 priv->dma_buf_sz,
3322 						 DMA_FROM_DEVICE);
3323 			}
3324 		} else {
3325 			struct sk_buff *skb;
3326 			int frame_len;
3327 			unsigned int des;
3328 
3329 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3330 				des = le32_to_cpu(p->des0);
3331 			else
3332 				des = le32_to_cpu(p->des2);
3333 
3334 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3335 
3336 			/*  If frame length is greater than skb buffer size
3337 			 *  (preallocated during init) then the packet is
3338 			 *  ignored
3339 			 */
3340 			if (frame_len > priv->dma_buf_sz) {
3341 				netdev_err(priv->dev,
3342 					   "len %d larger than size (%d)\n",
3343 					   frame_len, priv->dma_buf_sz);
3344 				priv->dev->stats.rx_length_errors++;
3345 				break;
3346 			}
3347 
3348 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3349 			 * Type frames (LLC/LLC-SNAP)
3350 			 */
3351 			if (unlikely(status != llc_snap))
3352 				frame_len -= ETH_FCS_LEN;
3353 
3354 			if (netif_msg_rx_status(priv)) {
3355 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3356 					   p, entry, des);
3357 				if (frame_len > ETH_FRAME_LEN)
3358 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3359 						   frame_len, status);
3360 			}
3361 
3362 			/* The zero-copy is always used for all the sizes
3363 			 * in case of GMAC4 because it needs
3364 			 * to refill the used descriptors, always.
3365 			 */
3366 			if (unlikely(!priv->plat->has_gmac4 &&
3367 				     ((frame_len < priv->rx_copybreak) ||
3368 				     stmmac_rx_threshold_count(rx_q)))) {
3369 				skb = netdev_alloc_skb_ip_align(priv->dev,
3370 								frame_len);
3371 				if (unlikely(!skb)) {
3372 					if (net_ratelimit())
3373 						dev_warn(priv->device,
3374 							 "packet dropped\n");
3375 					priv->dev->stats.rx_dropped++;
3376 					break;
3377 				}
3378 
3379 				dma_sync_single_for_cpu(priv->device,
3380 							rx_q->rx_skbuff_dma
3381 							[entry], frame_len,
3382 							DMA_FROM_DEVICE);
3383 				skb_copy_to_linear_data(skb,
3384 							rx_q->
3385 							rx_skbuff[entry]->data,
3386 							frame_len);
3387 
3388 				skb_put(skb, frame_len);
3389 				dma_sync_single_for_device(priv->device,
3390 							   rx_q->rx_skbuff_dma
3391 							   [entry], frame_len,
3392 							   DMA_FROM_DEVICE);
3393 			} else {
3394 				skb = rx_q->rx_skbuff[entry];
3395 				if (unlikely(!skb)) {
3396 					netdev_err(priv->dev,
3397 						   "%s: Inconsistent Rx chain\n",
3398 						   priv->dev->name);
3399 					priv->dev->stats.rx_dropped++;
3400 					break;
3401 				}
3402 				prefetch(skb->data - NET_IP_ALIGN);
3403 				rx_q->rx_skbuff[entry] = NULL;
3404 				rx_q->rx_zeroc_thresh++;
3405 
3406 				skb_put(skb, frame_len);
3407 				dma_unmap_single(priv->device,
3408 						 rx_q->rx_skbuff_dma[entry],
3409 						 priv->dma_buf_sz,
3410 						 DMA_FROM_DEVICE);
3411 			}
3412 
3413 			if (netif_msg_pktdata(priv)) {
3414 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3415 					   frame_len);
3416 				print_pkt(skb->data, frame_len);
3417 			}
3418 
3419 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3420 
3421 			stmmac_rx_vlan(priv->dev, skb);
3422 
3423 			skb->protocol = eth_type_trans(skb, priv->dev);
3424 
3425 			if (unlikely(!coe))
3426 				skb_checksum_none_assert(skb);
3427 			else
3428 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3429 
3430 			napi_gro_receive(&rx_q->napi, skb);
3431 
3432 			priv->dev->stats.rx_packets++;
3433 			priv->dev->stats.rx_bytes += frame_len;
3434 		}
3435 		entry = next_entry;
3436 	}
3437 
3438 	stmmac_rx_refill(priv, queue);
3439 
3440 	priv->xstats.rx_pkt_n += count;
3441 
3442 	return count;
3443 }
3444 
3445 /**
3446  *  stmmac_poll - stmmac poll method (NAPI)
3447  *  @napi : pointer to the napi structure.
3448  *  @budget : maximum number of packets that the current CPU can receive from
3449  *	      all interfaces.
3450  *  Description :
3451  *  To look at the incoming frames and clear the tx resources.
3452  */
3453 static int stmmac_poll(struct napi_struct *napi, int budget)
3454 {
3455 	struct stmmac_rx_queue *rx_q =
3456 		container_of(napi, struct stmmac_rx_queue, napi);
3457 	struct stmmac_priv *priv = rx_q->priv_data;
3458 	u32 tx_count = priv->plat->tx_queues_to_use;
3459 	u32 chan = rx_q->queue_index;
3460 	int work_done = 0;
3461 	u32 queue;
3462 
3463 	priv->xstats.napi_poll++;
3464 
3465 	/* check all the queues */
3466 	for (queue = 0; queue < tx_count; queue++)
3467 		stmmac_tx_clean(priv, queue);
3468 
3469 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3470 	if (work_done < budget) {
3471 		napi_complete_done(napi, work_done);
3472 		stmmac_enable_dma_irq(priv, chan);
3473 	}
3474 	return work_done;
3475 }
3476 
3477 /**
3478  *  stmmac_tx_timeout
3479  *  @dev : Pointer to net device structure
3480  *  Description: this function is called when a packet transmission fails to
3481  *   complete within a reasonable time. The driver will mark the error in the
3482  *   netdev structure and arrange for the device to be reset to a sane state
3483  *   in order to transmit a new packet.
3484  */
3485 static void stmmac_tx_timeout(struct net_device *dev)
3486 {
3487 	struct stmmac_priv *priv = netdev_priv(dev);
3488 	u32 tx_count = priv->plat->tx_queues_to_use;
3489 	u32 chan;
3490 
3491 	/* Clear Tx resources and restart transmitting again */
3492 	for (chan = 0; chan < tx_count; chan++)
3493 		stmmac_tx_err(priv, chan);
3494 }
3495 
3496 /**
3497  *  stmmac_set_rx_mode - entry point for multicast addressing
3498  *  @dev : pointer to the device structure
3499  *  Description:
3500  *  This function is a driver entry point which gets called by the kernel
3501  *  whenever multicast addresses must be enabled/disabled.
3502  *  Return value:
3503  *  void.
3504  */
3505 static void stmmac_set_rx_mode(struct net_device *dev)
3506 {
3507 	struct stmmac_priv *priv = netdev_priv(dev);
3508 
3509 	priv->hw->mac->set_filter(priv->hw, dev);
3510 }
3511 
3512 /**
3513  *  stmmac_change_mtu - entry point to change MTU size for the device.
3514  *  @dev : device pointer.
3515  *  @new_mtu : the new MTU size for the device.
3516  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3517  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3518  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3519  *  Return value:
3520  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3521  *  file on failure.
3522  */
3523 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3524 {
3525 	struct stmmac_priv *priv = netdev_priv(dev);
3526 
3527 	if (netif_running(dev)) {
3528 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3529 		return -EBUSY;
3530 	}
3531 
3532 	dev->mtu = new_mtu;
3533 
3534 	netdev_update_features(dev);
3535 
3536 	return 0;
3537 }
3538 
3539 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3540 					     netdev_features_t features)
3541 {
3542 	struct stmmac_priv *priv = netdev_priv(dev);
3543 
3544 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3545 		features &= ~NETIF_F_RXCSUM;
3546 
3547 	if (!priv->plat->tx_coe)
3548 		features &= ~NETIF_F_CSUM_MASK;
3549 
3550 	/* Some GMAC devices have a bugged Jumbo frame support that
3551 	 * needs to have the Tx COE disabled for oversized frames
3552 	 * (due to limited buffer sizes). In this case we disable
3553 	 * the TX csum insertion in the TDES and not use SF.
3554 	 */
3555 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3556 		features &= ~NETIF_F_CSUM_MASK;
3557 
3558 	/* Disable tso if asked by ethtool */
3559 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3560 		if (features & NETIF_F_TSO)
3561 			priv->tso = true;
3562 		else
3563 			priv->tso = false;
3564 	}
3565 
3566 	return features;
3567 }
3568 
3569 static int stmmac_set_features(struct net_device *netdev,
3570 			       netdev_features_t features)
3571 {
3572 	struct stmmac_priv *priv = netdev_priv(netdev);
3573 
3574 	/* Keep the COE Type in case of csum is supporting */
3575 	if (features & NETIF_F_RXCSUM)
3576 		priv->hw->rx_csum = priv->plat->rx_coe;
3577 	else
3578 		priv->hw->rx_csum = 0;
3579 	/* No check needed because rx_coe has been set before and it will be
3580 	 * fixed in case of issue.
3581 	 */
3582 	priv->hw->mac->rx_ipc(priv->hw);
3583 
3584 	return 0;
3585 }
3586 
3587 /**
3588  *  stmmac_interrupt - main ISR
3589  *  @irq: interrupt number.
3590  *  @dev_id: to pass the net device pointer.
3591  *  Description: this is the main driver interrupt service routine.
3592  *  It can call:
3593  *  o DMA service routine (to manage incoming frame reception and transmission
3594  *    status)
3595  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3596  *    interrupts.
3597  */
3598 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3599 {
3600 	struct net_device *dev = (struct net_device *)dev_id;
3601 	struct stmmac_priv *priv = netdev_priv(dev);
3602 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3603 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3604 	u32 queues_count;
3605 	u32 queue;
3606 
3607 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3608 
3609 	if (priv->irq_wake)
3610 		pm_wakeup_event(priv->device, 0);
3611 
3612 	if (unlikely(!dev)) {
3613 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3614 		return IRQ_NONE;
3615 	}
3616 
3617 	/* To handle GMAC own interrupts */
3618 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3619 		int status = priv->hw->mac->host_irq_status(priv->hw,
3620 							    &priv->xstats);
3621 
3622 		if (unlikely(status)) {
3623 			/* For LPI we need to save the tx status */
3624 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3625 				priv->tx_path_in_lpi_mode = true;
3626 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3627 				priv->tx_path_in_lpi_mode = false;
3628 		}
3629 
3630 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3631 			for (queue = 0; queue < queues_count; queue++) {
3632 				struct stmmac_rx_queue *rx_q =
3633 				&priv->rx_queue[queue];
3634 
3635 				status |=
3636 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3637 								   queue);
3638 
3639 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3640 				    priv->hw->dma->set_rx_tail_ptr)
3641 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3642 								rx_q->rx_tail_addr,
3643 								queue);
3644 			}
3645 		}
3646 
3647 		/* PCS link status */
3648 		if (priv->hw->pcs) {
3649 			if (priv->xstats.pcs_link)
3650 				netif_carrier_on(dev);
3651 			else
3652 				netif_carrier_off(dev);
3653 		}
3654 	}
3655 
3656 	/* To handle DMA interrupts */
3657 	stmmac_dma_interrupt(priv);
3658 
3659 	return IRQ_HANDLED;
3660 }
3661 
3662 #ifdef CONFIG_NET_POLL_CONTROLLER
3663 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3664  * to allow network I/O with interrupts disabled.
3665  */
3666 static void stmmac_poll_controller(struct net_device *dev)
3667 {
3668 	disable_irq(dev->irq);
3669 	stmmac_interrupt(dev->irq, dev);
3670 	enable_irq(dev->irq);
3671 }
3672 #endif
3673 
3674 /**
3675  *  stmmac_ioctl - Entry point for the Ioctl
3676  *  @dev: Device pointer.
3677  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3678  *  a proprietary structure used to pass information to the driver.
3679  *  @cmd: IOCTL command
3680  *  Description:
3681  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3682  */
3683 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3684 {
3685 	int ret = -EOPNOTSUPP;
3686 
3687 	if (!netif_running(dev))
3688 		return -EINVAL;
3689 
3690 	switch (cmd) {
3691 	case SIOCGMIIPHY:
3692 	case SIOCGMIIREG:
3693 	case SIOCSMIIREG:
3694 		if (!dev->phydev)
3695 			return -EINVAL;
3696 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3697 		break;
3698 	case SIOCSHWTSTAMP:
3699 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3700 		break;
3701 	default:
3702 		break;
3703 	}
3704 
3705 	return ret;
3706 }
3707 
3708 #ifdef CONFIG_DEBUG_FS
3709 static struct dentry *stmmac_fs_dir;
3710 
3711 static void sysfs_display_ring(void *head, int size, int extend_desc,
3712 			       struct seq_file *seq)
3713 {
3714 	int i;
3715 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3716 	struct dma_desc *p = (struct dma_desc *)head;
3717 
3718 	for (i = 0; i < size; i++) {
3719 		if (extend_desc) {
3720 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3721 				   i, (unsigned int)virt_to_phys(ep),
3722 				   le32_to_cpu(ep->basic.des0),
3723 				   le32_to_cpu(ep->basic.des1),
3724 				   le32_to_cpu(ep->basic.des2),
3725 				   le32_to_cpu(ep->basic.des3));
3726 			ep++;
3727 		} else {
3728 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3729 				   i, (unsigned int)virt_to_phys(p),
3730 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3731 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3732 			p++;
3733 		}
3734 		seq_printf(seq, "\n");
3735 	}
3736 }
3737 
3738 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3739 {
3740 	struct net_device *dev = seq->private;
3741 	struct stmmac_priv *priv = netdev_priv(dev);
3742 	u32 rx_count = priv->plat->rx_queues_to_use;
3743 	u32 tx_count = priv->plat->tx_queues_to_use;
3744 	u32 queue;
3745 
3746 	for (queue = 0; queue < rx_count; queue++) {
3747 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3748 
3749 		seq_printf(seq, "RX Queue %d:\n", queue);
3750 
3751 		if (priv->extend_desc) {
3752 			seq_printf(seq, "Extended descriptor ring:\n");
3753 			sysfs_display_ring((void *)rx_q->dma_erx,
3754 					   DMA_RX_SIZE, 1, seq);
3755 		} else {
3756 			seq_printf(seq, "Descriptor ring:\n");
3757 			sysfs_display_ring((void *)rx_q->dma_rx,
3758 					   DMA_RX_SIZE, 0, seq);
3759 		}
3760 	}
3761 
3762 	for (queue = 0; queue < tx_count; queue++) {
3763 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3764 
3765 		seq_printf(seq, "TX Queue %d:\n", queue);
3766 
3767 		if (priv->extend_desc) {
3768 			seq_printf(seq, "Extended descriptor ring:\n");
3769 			sysfs_display_ring((void *)tx_q->dma_etx,
3770 					   DMA_TX_SIZE, 1, seq);
3771 		} else {
3772 			seq_printf(seq, "Descriptor ring:\n");
3773 			sysfs_display_ring((void *)tx_q->dma_tx,
3774 					   DMA_TX_SIZE, 0, seq);
3775 		}
3776 	}
3777 
3778 	return 0;
3779 }
3780 
3781 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3782 {
3783 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3784 }
3785 
3786 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3787 
3788 static const struct file_operations stmmac_rings_status_fops = {
3789 	.owner = THIS_MODULE,
3790 	.open = stmmac_sysfs_ring_open,
3791 	.read = seq_read,
3792 	.llseek = seq_lseek,
3793 	.release = single_release,
3794 };
3795 
3796 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3797 {
3798 	struct net_device *dev = seq->private;
3799 	struct stmmac_priv *priv = netdev_priv(dev);
3800 
3801 	if (!priv->hw_cap_support) {
3802 		seq_printf(seq, "DMA HW features not supported\n");
3803 		return 0;
3804 	}
3805 
3806 	seq_printf(seq, "==============================\n");
3807 	seq_printf(seq, "\tDMA HW features\n");
3808 	seq_printf(seq, "==============================\n");
3809 
3810 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3811 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3812 	seq_printf(seq, "\t1000 Mbps: %s\n",
3813 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3814 	seq_printf(seq, "\tHalf duplex: %s\n",
3815 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3816 	seq_printf(seq, "\tHash Filter: %s\n",
3817 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3818 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3819 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3820 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3821 		   (priv->dma_cap.pcs) ? "Y" : "N");
3822 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3823 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3824 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3825 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3826 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3827 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3828 	seq_printf(seq, "\tRMON module: %s\n",
3829 		   (priv->dma_cap.rmon) ? "Y" : "N");
3830 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3831 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3832 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3833 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3834 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3835 		   (priv->dma_cap.eee) ? "Y" : "N");
3836 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3837 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3838 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3839 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3840 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3841 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3842 	} else {
3843 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3844 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3845 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3846 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3847 	}
3848 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3849 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3850 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3851 		   priv->dma_cap.number_rx_channel);
3852 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3853 		   priv->dma_cap.number_tx_channel);
3854 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3855 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3856 
3857 	return 0;
3858 }
3859 
3860 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3861 {
3862 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3863 }
3864 
3865 static const struct file_operations stmmac_dma_cap_fops = {
3866 	.owner = THIS_MODULE,
3867 	.open = stmmac_sysfs_dma_cap_open,
3868 	.read = seq_read,
3869 	.llseek = seq_lseek,
3870 	.release = single_release,
3871 };
3872 
3873 static int stmmac_init_fs(struct net_device *dev)
3874 {
3875 	struct stmmac_priv *priv = netdev_priv(dev);
3876 
3877 	/* Create per netdev entries */
3878 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3879 
3880 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3881 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3882 
3883 		return -ENOMEM;
3884 	}
3885 
3886 	/* Entry to report DMA RX/TX rings */
3887 	priv->dbgfs_rings_status =
3888 		debugfs_create_file("descriptors_status", S_IRUGO,
3889 				    priv->dbgfs_dir, dev,
3890 				    &stmmac_rings_status_fops);
3891 
3892 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3893 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3894 		debugfs_remove_recursive(priv->dbgfs_dir);
3895 
3896 		return -ENOMEM;
3897 	}
3898 
3899 	/* Entry to report the DMA HW features */
3900 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3901 					    priv->dbgfs_dir,
3902 					    dev, &stmmac_dma_cap_fops);
3903 
3904 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3905 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3906 		debugfs_remove_recursive(priv->dbgfs_dir);
3907 
3908 		return -ENOMEM;
3909 	}
3910 
3911 	return 0;
3912 }
3913 
3914 static void stmmac_exit_fs(struct net_device *dev)
3915 {
3916 	struct stmmac_priv *priv = netdev_priv(dev);
3917 
3918 	debugfs_remove_recursive(priv->dbgfs_dir);
3919 }
3920 #endif /* CONFIG_DEBUG_FS */
3921 
3922 static const struct net_device_ops stmmac_netdev_ops = {
3923 	.ndo_open = stmmac_open,
3924 	.ndo_start_xmit = stmmac_xmit,
3925 	.ndo_stop = stmmac_release,
3926 	.ndo_change_mtu = stmmac_change_mtu,
3927 	.ndo_fix_features = stmmac_fix_features,
3928 	.ndo_set_features = stmmac_set_features,
3929 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3930 	.ndo_tx_timeout = stmmac_tx_timeout,
3931 	.ndo_do_ioctl = stmmac_ioctl,
3932 #ifdef CONFIG_NET_POLL_CONTROLLER
3933 	.ndo_poll_controller = stmmac_poll_controller,
3934 #endif
3935 	.ndo_set_mac_address = eth_mac_addr,
3936 };
3937 
3938 /**
3939  *  stmmac_hw_init - Init the MAC device
3940  *  @priv: driver private structure
3941  *  Description: this function is to configure the MAC device according to
3942  *  some platform parameters or the HW capability register. It prepares the
3943  *  driver to use either ring or chain modes and to setup either enhanced or
3944  *  normal descriptors.
3945  */
3946 static int stmmac_hw_init(struct stmmac_priv *priv)
3947 {
3948 	struct mac_device_info *mac;
3949 
3950 	/* Identify the MAC HW device */
3951 	if (priv->plat->has_gmac) {
3952 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3953 		mac = dwmac1000_setup(priv->ioaddr,
3954 				      priv->plat->multicast_filter_bins,
3955 				      priv->plat->unicast_filter_entries,
3956 				      &priv->synopsys_id);
3957 	} else if (priv->plat->has_gmac4) {
3958 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3959 		mac = dwmac4_setup(priv->ioaddr,
3960 				   priv->plat->multicast_filter_bins,
3961 				   priv->plat->unicast_filter_entries,
3962 				   &priv->synopsys_id);
3963 	} else {
3964 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3965 	}
3966 	if (!mac)
3967 		return -ENOMEM;
3968 
3969 	priv->hw = mac;
3970 
3971 	/* To use the chained or ring mode */
3972 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3973 		priv->hw->mode = &dwmac4_ring_mode_ops;
3974 	} else {
3975 		if (chain_mode) {
3976 			priv->hw->mode = &chain_mode_ops;
3977 			dev_info(priv->device, "Chain mode enabled\n");
3978 			priv->mode = STMMAC_CHAIN_MODE;
3979 		} else {
3980 			priv->hw->mode = &ring_mode_ops;
3981 			dev_info(priv->device, "Ring mode enabled\n");
3982 			priv->mode = STMMAC_RING_MODE;
3983 		}
3984 	}
3985 
3986 	/* Get the HW capability (new GMAC newer than 3.50a) */
3987 	priv->hw_cap_support = stmmac_get_hw_features(priv);
3988 	if (priv->hw_cap_support) {
3989 		dev_info(priv->device, "DMA HW capability register supported\n");
3990 
3991 		/* We can override some gmac/dma configuration fields: e.g.
3992 		 * enh_desc, tx_coe (e.g. that are passed through the
3993 		 * platform) with the values from the HW capability
3994 		 * register (if supported).
3995 		 */
3996 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
3997 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3998 		priv->hw->pmt = priv->plat->pmt;
3999 
4000 		/* TXCOE doesn't work in thresh DMA mode */
4001 		if (priv->plat->force_thresh_dma_mode)
4002 			priv->plat->tx_coe = 0;
4003 		else
4004 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4005 
4006 		/* In case of GMAC4 rx_coe is from HW cap register. */
4007 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4008 
4009 		if (priv->dma_cap.rx_coe_type2)
4010 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4011 		else if (priv->dma_cap.rx_coe_type1)
4012 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4013 
4014 	} else {
4015 		dev_info(priv->device, "No HW DMA feature register supported\n");
4016 	}
4017 
4018 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4019 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4020 		priv->hw->desc = &dwmac4_desc_ops;
4021 	else
4022 		stmmac_selec_desc_mode(priv);
4023 
4024 	if (priv->plat->rx_coe) {
4025 		priv->hw->rx_csum = priv->plat->rx_coe;
4026 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4027 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4028 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4029 	}
4030 	if (priv->plat->tx_coe)
4031 		dev_info(priv->device, "TX Checksum insertion supported\n");
4032 
4033 	if (priv->plat->pmt) {
4034 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4035 		device_set_wakeup_capable(priv->device, 1);
4036 	}
4037 
4038 	if (priv->dma_cap.tsoen)
4039 		dev_info(priv->device, "TSO supported\n");
4040 
4041 	return 0;
4042 }
4043 
4044 /**
4045  * stmmac_dvr_probe
4046  * @device: device pointer
4047  * @plat_dat: platform data pointer
4048  * @res: stmmac resource pointer
4049  * Description: this is the main probe function used to
4050  * call the alloc_etherdev, allocate the priv structure.
4051  * Return:
4052  * returns 0 on success, otherwise errno.
4053  */
4054 int stmmac_dvr_probe(struct device *device,
4055 		     struct plat_stmmacenet_data *plat_dat,
4056 		     struct stmmac_resources *res)
4057 {
4058 	struct net_device *ndev = NULL;
4059 	struct stmmac_priv *priv;
4060 	int ret = 0;
4061 	u32 queue;
4062 
4063 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4064 				  MTL_MAX_TX_QUEUES,
4065 				  MTL_MAX_RX_QUEUES);
4066 	if (!ndev)
4067 		return -ENOMEM;
4068 
4069 	SET_NETDEV_DEV(ndev, device);
4070 
4071 	priv = netdev_priv(ndev);
4072 	priv->device = device;
4073 	priv->dev = ndev;
4074 
4075 	stmmac_set_ethtool_ops(ndev);
4076 	priv->pause = pause;
4077 	priv->plat = plat_dat;
4078 	priv->ioaddr = res->addr;
4079 	priv->dev->base_addr = (unsigned long)res->addr;
4080 
4081 	priv->dev->irq = res->irq;
4082 	priv->wol_irq = res->wol_irq;
4083 	priv->lpi_irq = res->lpi_irq;
4084 
4085 	if (res->mac)
4086 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4087 
4088 	dev_set_drvdata(device, priv->dev);
4089 
4090 	/* Verify driver arguments */
4091 	stmmac_verify_args();
4092 
4093 	/* Override with kernel parameters if supplied XXX CRS XXX
4094 	 * this needs to have multiple instances
4095 	 */
4096 	if ((phyaddr >= 0) && (phyaddr <= 31))
4097 		priv->plat->phy_addr = phyaddr;
4098 
4099 	if (priv->plat->stmmac_rst)
4100 		reset_control_deassert(priv->plat->stmmac_rst);
4101 
4102 	/* Init MAC and get the capabilities */
4103 	ret = stmmac_hw_init(priv);
4104 	if (ret)
4105 		goto error_hw_init;
4106 
4107 	/* Configure real RX and TX queues */
4108 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4109 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4110 
4111 	ndev->netdev_ops = &stmmac_netdev_ops;
4112 
4113 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4114 			    NETIF_F_RXCSUM;
4115 
4116 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4117 		ndev->hw_features |= NETIF_F_TSO;
4118 		priv->tso = true;
4119 		dev_info(priv->device, "TSO feature enabled\n");
4120 	}
4121 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4122 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4123 #ifdef STMMAC_VLAN_TAG_USED
4124 	/* Both mac100 and gmac support receive VLAN tag detection */
4125 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4126 #endif
4127 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4128 
4129 	/* MTU range: 46 - hw-specific max */
4130 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4131 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4132 		ndev->max_mtu = JUMBO_LEN;
4133 	else
4134 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4135 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4136 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4137 	 */
4138 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4139 	    (priv->plat->maxmtu >= ndev->min_mtu))
4140 		ndev->max_mtu = priv->plat->maxmtu;
4141 	else if (priv->plat->maxmtu < ndev->min_mtu)
4142 		dev_warn(priv->device,
4143 			 "%s: warning: maxmtu having invalid value (%d)\n",
4144 			 __func__, priv->plat->maxmtu);
4145 
4146 	if (flow_ctrl)
4147 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4148 
4149 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4150 	 * In some case, for example on bugged HW this feature
4151 	 * has to be disable and this can be done by passing the
4152 	 * riwt_off field from the platform.
4153 	 */
4154 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4155 		priv->use_riwt = 1;
4156 		dev_info(priv->device,
4157 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4158 	}
4159 
4160 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4161 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4162 
4163 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4164 			       (8 * priv->plat->rx_queues_to_use));
4165 	}
4166 
4167 	spin_lock_init(&priv->lock);
4168 
4169 	/* If a specific clk_csr value is passed from the platform
4170 	 * this means that the CSR Clock Range selection cannot be
4171 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4172 	 * set the MDC clock dynamically according to the csr actual
4173 	 * clock input.
4174 	 */
4175 	if (!priv->plat->clk_csr)
4176 		stmmac_clk_csr_set(priv);
4177 	else
4178 		priv->clk_csr = priv->plat->clk_csr;
4179 
4180 	stmmac_check_pcs_mode(priv);
4181 
4182 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4183 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4184 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4185 		/* MDIO bus Registration */
4186 		ret = stmmac_mdio_register(ndev);
4187 		if (ret < 0) {
4188 			dev_err(priv->device,
4189 				"%s: MDIO bus (id: %d) registration failed",
4190 				__func__, priv->plat->bus_id);
4191 			goto error_mdio_register;
4192 		}
4193 	}
4194 
4195 	ret = register_netdev(ndev);
4196 	if (ret) {
4197 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4198 			__func__, ret);
4199 		goto error_netdev_register;
4200 	}
4201 
4202 	return ret;
4203 
4204 error_netdev_register:
4205 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4206 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4207 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4208 		stmmac_mdio_unregister(ndev);
4209 error_mdio_register:
4210 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4211 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4212 
4213 		netif_napi_del(&rx_q->napi);
4214 	}
4215 error_hw_init:
4216 	free_netdev(ndev);
4217 
4218 	return ret;
4219 }
4220 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4221 
4222 /**
4223  * stmmac_dvr_remove
4224  * @dev: device pointer
4225  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4226  * changes the link status, releases the DMA descriptor rings.
4227  */
4228 int stmmac_dvr_remove(struct device *dev)
4229 {
4230 	struct net_device *ndev = dev_get_drvdata(dev);
4231 	struct stmmac_priv *priv = netdev_priv(ndev);
4232 
4233 	netdev_info(priv->dev, "%s: removing driver", __func__);
4234 
4235 	stmmac_stop_all_dma(priv);
4236 
4237 	priv->hw->mac->set_mac(priv->ioaddr, false);
4238 	netif_carrier_off(ndev);
4239 	unregister_netdev(ndev);
4240 	if (priv->plat->stmmac_rst)
4241 		reset_control_assert(priv->plat->stmmac_rst);
4242 	clk_disable_unprepare(priv->plat->pclk);
4243 	clk_disable_unprepare(priv->plat->stmmac_clk);
4244 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4245 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4246 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4247 		stmmac_mdio_unregister(ndev);
4248 	free_netdev(ndev);
4249 
4250 	return 0;
4251 }
4252 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4253 
4254 /**
4255  * stmmac_suspend - suspend callback
4256  * @dev: device pointer
4257  * Description: this is the function to suspend the device and it is called
4258  * by the platform driver to stop the network queue, release the resources,
4259  * program the PMT register (for WoL), clean and release driver resources.
4260  */
4261 int stmmac_suspend(struct device *dev)
4262 {
4263 	struct net_device *ndev = dev_get_drvdata(dev);
4264 	struct stmmac_priv *priv = netdev_priv(ndev);
4265 	unsigned long flags;
4266 
4267 	if (!ndev || !netif_running(ndev))
4268 		return 0;
4269 
4270 	if (ndev->phydev)
4271 		phy_stop(ndev->phydev);
4272 
4273 	spin_lock_irqsave(&priv->lock, flags);
4274 
4275 	netif_device_detach(ndev);
4276 	stmmac_stop_all_queues(priv);
4277 
4278 	stmmac_disable_all_queues(priv);
4279 
4280 	/* Stop TX/RX DMA */
4281 	stmmac_stop_all_dma(priv);
4282 
4283 	/* Enable Power down mode by programming the PMT regs */
4284 	if (device_may_wakeup(priv->device)) {
4285 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4286 		priv->irq_wake = 1;
4287 	} else {
4288 		priv->hw->mac->set_mac(priv->ioaddr, false);
4289 		pinctrl_pm_select_sleep_state(priv->device);
4290 		/* Disable clock in case of PWM is off */
4291 		clk_disable(priv->plat->pclk);
4292 		clk_disable(priv->plat->stmmac_clk);
4293 	}
4294 	spin_unlock_irqrestore(&priv->lock, flags);
4295 
4296 	priv->oldlink = 0;
4297 	priv->speed = SPEED_UNKNOWN;
4298 	priv->oldduplex = DUPLEX_UNKNOWN;
4299 	return 0;
4300 }
4301 EXPORT_SYMBOL_GPL(stmmac_suspend);
4302 
4303 /**
4304  * stmmac_reset_queues_param - reset queue parameters
4305  * @dev: device pointer
4306  */
4307 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4308 {
4309 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4310 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4311 	u32 queue;
4312 
4313 	for (queue = 0; queue < rx_cnt; queue++) {
4314 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4315 
4316 		rx_q->cur_rx = 0;
4317 		rx_q->dirty_rx = 0;
4318 	}
4319 
4320 	for (queue = 0; queue < tx_cnt; queue++) {
4321 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4322 
4323 		tx_q->cur_tx = 0;
4324 		tx_q->dirty_tx = 0;
4325 	}
4326 }
4327 
4328 /**
4329  * stmmac_resume - resume callback
4330  * @dev: device pointer
4331  * Description: when resume this function is invoked to setup the DMA and CORE
4332  * in a usable state.
4333  */
4334 int stmmac_resume(struct device *dev)
4335 {
4336 	struct net_device *ndev = dev_get_drvdata(dev);
4337 	struct stmmac_priv *priv = netdev_priv(ndev);
4338 	unsigned long flags;
4339 
4340 	if (!netif_running(ndev))
4341 		return 0;
4342 
4343 	/* Power Down bit, into the PM register, is cleared
4344 	 * automatically as soon as a magic packet or a Wake-up frame
4345 	 * is received. Anyway, it's better to manually clear
4346 	 * this bit because it can generate problems while resuming
4347 	 * from another devices (e.g. serial console).
4348 	 */
4349 	if (device_may_wakeup(priv->device)) {
4350 		spin_lock_irqsave(&priv->lock, flags);
4351 		priv->hw->mac->pmt(priv->hw, 0);
4352 		spin_unlock_irqrestore(&priv->lock, flags);
4353 		priv->irq_wake = 0;
4354 	} else {
4355 		pinctrl_pm_select_default_state(priv->device);
4356 		/* enable the clk previously disabled */
4357 		clk_enable(priv->plat->stmmac_clk);
4358 		clk_enable(priv->plat->pclk);
4359 		/* reset the phy so that it's ready */
4360 		if (priv->mii)
4361 			stmmac_mdio_reset(priv->mii);
4362 	}
4363 
4364 	netif_device_attach(ndev);
4365 
4366 	spin_lock_irqsave(&priv->lock, flags);
4367 
4368 	stmmac_reset_queues_param(priv);
4369 
4370 	/* reset private mss value to force mss context settings at
4371 	 * next tso xmit (only used for gmac4).
4372 	 */
4373 	priv->mss = 0;
4374 
4375 	stmmac_clear_descriptors(priv);
4376 
4377 	stmmac_hw_setup(ndev, false);
4378 	stmmac_init_tx_coalesce(priv);
4379 	stmmac_set_rx_mode(ndev);
4380 
4381 	stmmac_enable_all_queues(priv);
4382 
4383 	stmmac_start_all_queues(priv);
4384 
4385 	spin_unlock_irqrestore(&priv->lock, flags);
4386 
4387 	if (ndev->phydev)
4388 		phy_start(ndev->phydev);
4389 
4390 	return 0;
4391 }
4392 EXPORT_SYMBOL_GPL(stmmac_resume);
4393 
4394 #ifndef MODULE
4395 static int __init stmmac_cmdline_opt(char *str)
4396 {
4397 	char *opt;
4398 
4399 	if (!str || !*str)
4400 		return -EINVAL;
4401 	while ((opt = strsep(&str, ",")) != NULL) {
4402 		if (!strncmp(opt, "debug:", 6)) {
4403 			if (kstrtoint(opt + 6, 0, &debug))
4404 				goto err;
4405 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4406 			if (kstrtoint(opt + 8, 0, &phyaddr))
4407 				goto err;
4408 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4409 			if (kstrtoint(opt + 7, 0, &buf_sz))
4410 				goto err;
4411 		} else if (!strncmp(opt, "tc:", 3)) {
4412 			if (kstrtoint(opt + 3, 0, &tc))
4413 				goto err;
4414 		} else if (!strncmp(opt, "watchdog:", 9)) {
4415 			if (kstrtoint(opt + 9, 0, &watchdog))
4416 				goto err;
4417 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4418 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4419 				goto err;
4420 		} else if (!strncmp(opt, "pause:", 6)) {
4421 			if (kstrtoint(opt + 6, 0, &pause))
4422 				goto err;
4423 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4424 			if (kstrtoint(opt + 10, 0, &eee_timer))
4425 				goto err;
4426 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4427 			if (kstrtoint(opt + 11, 0, &chain_mode))
4428 				goto err;
4429 		}
4430 	}
4431 	return 0;
4432 
4433 err:
4434 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4435 	return -EINVAL;
4436 }
4437 
4438 __setup("stmmaceth=", stmmac_cmdline_opt);
4439 #endif /* MODULE */
4440 
4441 static int __init stmmac_init(void)
4442 {
4443 #ifdef CONFIG_DEBUG_FS
4444 	/* Create debugfs main directory if it doesn't exist yet */
4445 	if (!stmmac_fs_dir) {
4446 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4447 
4448 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4449 			pr_err("ERROR %s, debugfs create directory failed\n",
4450 			       STMMAC_RESOURCE_NAME);
4451 
4452 			return -ENOMEM;
4453 		}
4454 	}
4455 #endif
4456 
4457 	return 0;
4458 }
4459 
4460 static void __exit stmmac_exit(void)
4461 {
4462 #ifdef CONFIG_DEBUG_FS
4463 	debugfs_remove_recursive(stmmac_fs_dir);
4464 #endif
4465 }
4466 
4467 module_init(stmmac_init)
4468 module_exit(stmmac_exit)
4469 
4470 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4471 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4472 MODULE_LICENSE("GPL");
4473