1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 
239 	if (priv->plat->has_sun8i) {
240 		if (clk_rate > 160000000)
241 			priv->clk_csr = 0x03;
242 		else if (clk_rate > 80000000)
243 			priv->clk_csr = 0x02;
244 		else if (clk_rate > 40000000)
245 			priv->clk_csr = 0x01;
246 		else
247 			priv->clk_csr = 0;
248 	}
249 }
250 
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256 
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 	u32 avail;
261 
262 	if (tx_q->dirty_tx > tx_q->cur_tx)
263 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 	else
265 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266 
267 	return avail;
268 }
269 
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 	u32 dirty;
279 
280 	if (rx_q->dirty_rx <= rx_q->cur_rx)
281 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 	else
283 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284 
285 	return dirty;
286 }
287 
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 	struct net_device *ndev = priv->dev;
297 	struct phy_device *phydev = ndev->phydev;
298 
299 	if (likely(priv->plat->fix_mac_speed))
300 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302 
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
312 	u32 queue;
313 
314 	/* check if all TX queues have the work finished */
315 	for (queue = 0; queue < tx_cnt; queue++) {
316 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317 
318 		if (tx_q->dirty_tx != tx_q->cur_tx)
319 			return; /* still unfinished work */
320 	}
321 
322 	/* Check and enter in LPI mode */
323 	if (!priv->tx_path_in_lpi_mode)
324 		priv->hw->mac->set_eee_mode(priv->hw,
325 					    priv->plat->en_tx_lpi_clockgating);
326 }
327 
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 	priv->hw->mac->reset_eee_mode(priv->hw);
337 	del_timer_sync(&priv->eee_ctrl_timer);
338 	priv->tx_path_in_lpi_mode = false;
339 }
340 
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351 
352 	stmmac_enable_eee_mode(priv);
353 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355 
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 	struct net_device *ndev = priv->dev;
367 	unsigned long flags;
368 	bool ret = false;
369 
370 	/* Using PCS we cannot dial with the phy registers at this stage
371 	 * so we do not support extra feature like EEE.
372 	 */
373 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
376 		goto out;
377 
378 	/* MAC core supports the EEE feature. */
379 	if (priv->dma_cap.eee) {
380 		int tx_lpi_timer = priv->tx_lpi_timer;
381 
382 		/* Check if the PHY supports EEE */
383 		if (phy_init_eee(ndev->phydev, 1)) {
384 			/* To manage at run-time if the EEE cannot be supported
385 			 * anymore (for example because the lp caps have been
386 			 * changed).
387 			 * In that case the driver disable own timers.
388 			 */
389 			spin_lock_irqsave(&priv->lock, flags);
390 			if (priv->eee_active) {
391 				netdev_dbg(priv->dev, "disable EEE\n");
392 				del_timer_sync(&priv->eee_ctrl_timer);
393 				priv->hw->mac->set_eee_timer(priv->hw, 0,
394 							     tx_lpi_timer);
395 			}
396 			priv->eee_active = 0;
397 			spin_unlock_irqrestore(&priv->lock, flags);
398 			goto out;
399 		}
400 		/* Activate the EEE and start timers */
401 		spin_lock_irqsave(&priv->lock, flags);
402 		if (!priv->eee_active) {
403 			priv->eee_active = 1;
404 			setup_timer(&priv->eee_ctrl_timer,
405 				    stmmac_eee_ctrl_timer,
406 				    (unsigned long)priv);
407 			mod_timer(&priv->eee_ctrl_timer,
408 				  STMMAC_LPI_T(eee_timer));
409 
410 			priv->hw->mac->set_eee_timer(priv->hw,
411 						     STMMAC_DEFAULT_LIT_LS,
412 						     tx_lpi_timer);
413 		}
414 		/* Set HW EEE according to the speed */
415 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416 
417 		ret = true;
418 		spin_unlock_irqrestore(&priv->lock, flags);
419 
420 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 	}
422 out:
423 	return ret;
424 }
425 
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427  * @priv: driver private structure
428  * @p : descriptor pointer
429  * @skb : the socket buffer
430  * Description :
431  * This function will read timestamp from the descriptor & pass it to stack.
432  * and also perform some sanity checks.
433  */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435 				   struct dma_desc *p, struct sk_buff *skb)
436 {
437 	struct skb_shared_hwtstamps shhwtstamp;
438 	u64 ns;
439 
440 	if (!priv->hwts_tx_en)
441 		return;
442 
443 	/* exit if skb doesn't support hw tstamp */
444 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445 		return;
446 
447 	/* check tx tstamp status */
448 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
449 		/* get the valid tstamp */
450 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451 
452 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
454 
455 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456 		/* pass tstamp to stack */
457 		skb_tstamp_tx(skb, &shhwtstamp);
458 	}
459 
460 	return;
461 }
462 
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 				   struct dma_desc *np, struct sk_buff *skb)
474 {
475 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 	u64 ns;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 
481 	/* Check if timestamp is available */
482 	if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
483 		/* For GMAC4, the valid timestamp is from CTX next desc. */
484 		if (priv->plat->has_gmac4)
485 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 		else
487 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488 
489 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 		shhwtstamp = skb_hwtstamps(skb);
491 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 	} else  {
494 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495 	}
496 }
497 
498 /**
499  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
500  *  @dev: device pointer.
501  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502  *  a proprietary structure used to pass information to the driver.
503  *  Description:
504  *  This function configures the MAC to enable/disable both outgoing(TX)
505  *  and incoming(RX) packets time stamping based on user input.
506  *  Return Value:
507  *  0 on success and an appropriate -ve integer on failure.
508  */
509 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
510 {
511 	struct stmmac_priv *priv = netdev_priv(dev);
512 	struct hwtstamp_config config;
513 	struct timespec64 now;
514 	u64 temp = 0;
515 	u32 ptp_v2 = 0;
516 	u32 tstamp_all = 0;
517 	u32 ptp_over_ipv4_udp = 0;
518 	u32 ptp_over_ipv6_udp = 0;
519 	u32 ptp_over_ethernet = 0;
520 	u32 snap_type_sel = 0;
521 	u32 ts_master_en = 0;
522 	u32 ts_event_en = 0;
523 	u32 value = 0;
524 	u32 sec_inc;
525 
526 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
527 		netdev_alert(priv->dev, "No support for HW time stamping\n");
528 		priv->hwts_tx_en = 0;
529 		priv->hwts_rx_en = 0;
530 
531 		return -EOPNOTSUPP;
532 	}
533 
534 	if (copy_from_user(&config, ifr->ifr_data,
535 			   sizeof(struct hwtstamp_config)))
536 		return -EFAULT;
537 
538 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539 		   __func__, config.flags, config.tx_type, config.rx_filter);
540 
541 	/* reserved for future extensions */
542 	if (config.flags)
543 		return -EINVAL;
544 
545 	if (config.tx_type != HWTSTAMP_TX_OFF &&
546 	    config.tx_type != HWTSTAMP_TX_ON)
547 		return -ERANGE;
548 
549 	if (priv->adv_ts) {
550 		switch (config.rx_filter) {
551 		case HWTSTAMP_FILTER_NONE:
552 			/* time stamp no incoming packet at all */
553 			config.rx_filter = HWTSTAMP_FILTER_NONE;
554 			break;
555 
556 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
557 			/* PTP v1, UDP, any kind of event packet */
558 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
559 			/* take time stamp for all event messages */
560 			if (priv->plat->has_gmac4)
561 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
562 			else
563 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
564 
565 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567 			break;
568 
569 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
570 			/* PTP v1, UDP, Sync packet */
571 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572 			/* take time stamp for SYNC messages only */
573 			ts_event_en = PTP_TCR_TSEVNTENA;
574 
575 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577 			break;
578 
579 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
580 			/* PTP v1, UDP, Delay_req packet */
581 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582 			/* take time stamp for Delay_Req messages only */
583 			ts_master_en = PTP_TCR_TSMSTRENA;
584 			ts_event_en = PTP_TCR_TSEVNTENA;
585 
586 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588 			break;
589 
590 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
591 			/* PTP v2, UDP, any kind of event packet */
592 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593 			ptp_v2 = PTP_TCR_TSVER2ENA;
594 			/* take time stamp for all event messages */
595 			if (priv->plat->has_gmac4)
596 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
597 			else
598 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599 
600 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602 			break;
603 
604 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605 			/* PTP v2, UDP, Sync packet */
606 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607 			ptp_v2 = PTP_TCR_TSVER2ENA;
608 			/* take time stamp for SYNC messages only */
609 			ts_event_en = PTP_TCR_TSEVNTENA;
610 
611 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613 			break;
614 
615 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616 			/* PTP v2, UDP, Delay_req packet */
617 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618 			ptp_v2 = PTP_TCR_TSVER2ENA;
619 			/* take time stamp for Delay_Req messages only */
620 			ts_master_en = PTP_TCR_TSMSTRENA;
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
628 			/* PTP v2/802.AS1 any layer, any kind of event packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630 			ptp_v2 = PTP_TCR_TSVER2ENA;
631 			/* take time stamp for all event messages */
632 			if (priv->plat->has_gmac4)
633 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634 			else
635 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636 
637 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 			ptp_over_ethernet = PTP_TCR_TSIPENA;
640 			break;
641 
642 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
643 			/* PTP v2/802.AS1, any layer, Sync packet */
644 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
645 			ptp_v2 = PTP_TCR_TSVER2ENA;
646 			/* take time stamp for SYNC messages only */
647 			ts_event_en = PTP_TCR_TSEVNTENA;
648 
649 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651 			ptp_over_ethernet = PTP_TCR_TSIPENA;
652 			break;
653 
654 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
655 			/* PTP v2/802.AS1, any layer, Delay_req packet */
656 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
657 			ptp_v2 = PTP_TCR_TSVER2ENA;
658 			/* take time stamp for Delay_Req messages only */
659 			ts_master_en = PTP_TCR_TSMSTRENA;
660 			ts_event_en = PTP_TCR_TSEVNTENA;
661 
662 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 			ptp_over_ethernet = PTP_TCR_TSIPENA;
665 			break;
666 
667 		case HWTSTAMP_FILTER_NTP_ALL:
668 		case HWTSTAMP_FILTER_ALL:
669 			/* time stamp any incoming packet */
670 			config.rx_filter = HWTSTAMP_FILTER_ALL;
671 			tstamp_all = PTP_TCR_TSENALL;
672 			break;
673 
674 		default:
675 			return -ERANGE;
676 		}
677 	} else {
678 		switch (config.rx_filter) {
679 		case HWTSTAMP_FILTER_NONE:
680 			config.rx_filter = HWTSTAMP_FILTER_NONE;
681 			break;
682 		default:
683 			/* PTP v1, UDP, any kind of event packet */
684 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685 			break;
686 		}
687 	}
688 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
689 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
690 
691 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
692 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
693 	else {
694 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
695 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
696 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
697 			 ts_master_en | snap_type_sel);
698 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
699 
700 		/* program Sub Second Increment reg */
701 		sec_inc = priv->hw->ptp->config_sub_second_increment(
702 			priv->ptpaddr, priv->plat->clk_ptp_rate,
703 			priv->plat->has_gmac4);
704 		temp = div_u64(1000000000ULL, sec_inc);
705 
706 		/* calculate default added value:
707 		 * formula is :
708 		 * addend = (2^32)/freq_div_ratio;
709 		 * where, freq_div_ratio = 1e9ns/sec_inc
710 		 */
711 		temp = (u64)(temp << 32);
712 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
713 		priv->hw->ptp->config_addend(priv->ptpaddr,
714 					     priv->default_addend);
715 
716 		/* initialize system time */
717 		ktime_get_real_ts64(&now);
718 
719 		/* lower 32 bits of tv_sec are safe until y2106 */
720 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
721 					    now.tv_nsec);
722 	}
723 
724 	return copy_to_user(ifr->ifr_data, &config,
725 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
726 }
727 
728 /**
729  * stmmac_init_ptp - init PTP
730  * @priv: driver private structure
731  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
732  * This is done by looking at the HW cap. register.
733  * This function also registers the ptp driver.
734  */
735 static int stmmac_init_ptp(struct stmmac_priv *priv)
736 {
737 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
738 		return -EOPNOTSUPP;
739 
740 	priv->adv_ts = 0;
741 	/* Check if adv_ts can be enabled for dwmac 4.x core */
742 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
743 		priv->adv_ts = 1;
744 	/* Dwmac 3.x core with extend_desc can support adv_ts */
745 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
746 		priv->adv_ts = 1;
747 
748 	if (priv->dma_cap.time_stamp)
749 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
750 
751 	if (priv->adv_ts)
752 		netdev_info(priv->dev,
753 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
754 
755 	priv->hw->ptp = &stmmac_ptp;
756 	priv->hwts_tx_en = 0;
757 	priv->hwts_rx_en = 0;
758 
759 	stmmac_ptp_register(priv);
760 
761 	return 0;
762 }
763 
764 static void stmmac_release_ptp(struct stmmac_priv *priv)
765 {
766 	if (priv->plat->clk_ptp_ref)
767 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
768 	stmmac_ptp_unregister(priv);
769 }
770 
771 /**
772  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
773  *  @priv: driver private structure
774  *  Description: It is used for configuring the flow control in all queues
775  */
776 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777 {
778 	u32 tx_cnt = priv->plat->tx_queues_to_use;
779 
780 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
781 				 priv->pause, tx_cnt);
782 }
783 
784 /**
785  * stmmac_adjust_link - adjusts the link parameters
786  * @dev: net device structure
787  * Description: this is the helper called by the physical abstraction layer
788  * drivers to communicate the phy link status. According the speed and duplex
789  * this driver can invoke registered glue-logic as well.
790  * It also invoke the eee initialization because it could happen when switch
791  * on different networks (that are eee capable).
792  */
793 static void stmmac_adjust_link(struct net_device *dev)
794 {
795 	struct stmmac_priv *priv = netdev_priv(dev);
796 	struct phy_device *phydev = dev->phydev;
797 	unsigned long flags;
798 	bool new_state = false;
799 
800 	if (!phydev)
801 		return;
802 
803 	spin_lock_irqsave(&priv->lock, flags);
804 
805 	if (phydev->link) {
806 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
807 
808 		/* Now we make sure that we can be in full duplex mode.
809 		 * If not, we operate in half-duplex mode. */
810 		if (phydev->duplex != priv->oldduplex) {
811 			new_state = true;
812 			if (!phydev->duplex)
813 				ctrl &= ~priv->hw->link.duplex;
814 			else
815 				ctrl |= priv->hw->link.duplex;
816 			priv->oldduplex = phydev->duplex;
817 		}
818 		/* Flow Control operation */
819 		if (phydev->pause)
820 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
821 
822 		if (phydev->speed != priv->speed) {
823 			new_state = true;
824 			ctrl &= ~priv->hw->link.speed_mask;
825 			switch (phydev->speed) {
826 			case SPEED_1000:
827 				ctrl |= priv->hw->link.speed1000;
828 				break;
829 			case SPEED_100:
830 				ctrl |= priv->hw->link.speed100;
831 				break;
832 			case SPEED_10:
833 				ctrl |= priv->hw->link.speed10;
834 				break;
835 			default:
836 				netif_warn(priv, link, priv->dev,
837 					   "broken speed: %d\n", phydev->speed);
838 				phydev->speed = SPEED_UNKNOWN;
839 				break;
840 			}
841 			if (phydev->speed != SPEED_UNKNOWN)
842 				stmmac_hw_fix_mac_speed(priv);
843 			priv->speed = phydev->speed;
844 		}
845 
846 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
847 
848 		if (!priv->oldlink) {
849 			new_state = true;
850 			priv->oldlink = true;
851 		}
852 	} else if (priv->oldlink) {
853 		new_state = true;
854 		priv->oldlink = false;
855 		priv->speed = SPEED_UNKNOWN;
856 		priv->oldduplex = DUPLEX_UNKNOWN;
857 	}
858 
859 	if (new_state && netif_msg_link(priv))
860 		phy_print_status(phydev);
861 
862 	spin_unlock_irqrestore(&priv->lock, flags);
863 
864 	if (phydev->is_pseudo_fixed_link)
865 		/* Stop PHY layer to call the hook to adjust the link in case
866 		 * of a switch is attached to the stmmac driver.
867 		 */
868 		phydev->irq = PHY_IGNORE_INTERRUPT;
869 	else
870 		/* At this stage, init the EEE if supported.
871 		 * Never called in case of fixed_link.
872 		 */
873 		priv->eee_enabled = stmmac_eee_init(priv);
874 }
875 
876 /**
877  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PCS.
880  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
881  * configured for the TBI, RTBI, or SGMII PHY interface.
882  */
883 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
884 {
885 	int interface = priv->plat->interface;
886 
887 	if (priv->dma_cap.pcs) {
888 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
889 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
890 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
891 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
892 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
893 			priv->hw->pcs = STMMAC_PCS_RGMII;
894 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
895 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
896 			priv->hw->pcs = STMMAC_PCS_SGMII;
897 		}
898 	}
899 }
900 
901 /**
902  * stmmac_init_phy - PHY initialization
903  * @dev: net device structure
904  * Description: it initializes the driver's PHY state, and attaches the PHY
905  * to the mac driver.
906  *  Return value:
907  *  0 on success
908  */
909 static int stmmac_init_phy(struct net_device *dev)
910 {
911 	struct stmmac_priv *priv = netdev_priv(dev);
912 	struct phy_device *phydev;
913 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
914 	char bus_id[MII_BUS_ID_SIZE];
915 	int interface = priv->plat->interface;
916 	int max_speed = priv->plat->max_speed;
917 	priv->oldlink = false;
918 	priv->speed = SPEED_UNKNOWN;
919 	priv->oldduplex = DUPLEX_UNKNOWN;
920 
921 	if (priv->plat->phy_node) {
922 		phydev = of_phy_connect(dev, priv->plat->phy_node,
923 					&stmmac_adjust_link, 0, interface);
924 	} else {
925 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
926 			 priv->plat->bus_id);
927 
928 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
929 			 priv->plat->phy_addr);
930 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
931 			   phy_id_fmt);
932 
933 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
934 				     interface);
935 	}
936 
937 	if (IS_ERR_OR_NULL(phydev)) {
938 		netdev_err(priv->dev, "Could not attach to PHY\n");
939 		if (!phydev)
940 			return -ENODEV;
941 
942 		return PTR_ERR(phydev);
943 	}
944 
945 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
946 	if ((interface == PHY_INTERFACE_MODE_MII) ||
947 	    (interface == PHY_INTERFACE_MODE_RMII) ||
948 		(max_speed < 1000 && max_speed > 0))
949 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
950 					 SUPPORTED_1000baseT_Full);
951 
952 	/*
953 	 * Broken HW is sometimes missing the pull-up resistor on the
954 	 * MDIO line, which results in reads to non-existent devices returning
955 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
956 	 * device as well.
957 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
958 	 */
959 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
960 		phy_disconnect(phydev);
961 		return -ENODEV;
962 	}
963 
964 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
965 	 * subsequent PHY polling, make sure we force a link transition if
966 	 * we have a UP/DOWN/UP transition
967 	 */
968 	if (phydev->is_pseudo_fixed_link)
969 		phydev->irq = PHY_POLL;
970 
971 	phy_attached_info(phydev);
972 	return 0;
973 }
974 
975 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
976 {
977 	u32 rx_cnt = priv->plat->rx_queues_to_use;
978 	void *head_rx;
979 	u32 queue;
980 
981 	/* Display RX rings */
982 	for (queue = 0; queue < rx_cnt; queue++) {
983 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
984 
985 		pr_info("\tRX Queue %u rings\n", queue);
986 
987 		if (priv->extend_desc)
988 			head_rx = (void *)rx_q->dma_erx;
989 		else
990 			head_rx = (void *)rx_q->dma_rx;
991 
992 		/* Display RX ring */
993 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
994 	}
995 }
996 
997 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
998 {
999 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1000 	void *head_tx;
1001 	u32 queue;
1002 
1003 	/* Display TX rings */
1004 	for (queue = 0; queue < tx_cnt; queue++) {
1005 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1006 
1007 		pr_info("\tTX Queue %d rings\n", queue);
1008 
1009 		if (priv->extend_desc)
1010 			head_tx = (void *)tx_q->dma_etx;
1011 		else
1012 			head_tx = (void *)tx_q->dma_tx;
1013 
1014 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1015 	}
1016 }
1017 
1018 static void stmmac_display_rings(struct stmmac_priv *priv)
1019 {
1020 	/* Display RX ring */
1021 	stmmac_display_rx_rings(priv);
1022 
1023 	/* Display TX ring */
1024 	stmmac_display_tx_rings(priv);
1025 }
1026 
1027 static int stmmac_set_bfsize(int mtu, int bufsize)
1028 {
1029 	int ret = bufsize;
1030 
1031 	if (mtu >= BUF_SIZE_4KiB)
1032 		ret = BUF_SIZE_8KiB;
1033 	else if (mtu >= BUF_SIZE_2KiB)
1034 		ret = BUF_SIZE_4KiB;
1035 	else if (mtu > DEFAULT_BUFSIZE)
1036 		ret = BUF_SIZE_2KiB;
1037 	else
1038 		ret = DEFAULT_BUFSIZE;
1039 
1040 	return ret;
1041 }
1042 
1043 /**
1044  * stmmac_clear_rx_descriptors - clear RX descriptors
1045  * @priv: driver private structure
1046  * @queue: RX queue index
1047  * Description: this function is called to clear the RX descriptors
1048  * in case of both basic and extended descriptors are used.
1049  */
1050 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1051 {
1052 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1053 	int i;
1054 
1055 	/* Clear the RX descriptors */
1056 	for (i = 0; i < DMA_RX_SIZE; i++)
1057 		if (priv->extend_desc)
1058 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1059 						     priv->use_riwt, priv->mode,
1060 						     (i == DMA_RX_SIZE - 1));
1061 		else
1062 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1063 						     priv->use_riwt, priv->mode,
1064 						     (i == DMA_RX_SIZE - 1));
1065 }
1066 
1067 /**
1068  * stmmac_clear_tx_descriptors - clear tx descriptors
1069  * @priv: driver private structure
1070  * @queue: TX queue index.
1071  * Description: this function is called to clear the TX descriptors
1072  * in case of both basic and extended descriptors are used.
1073  */
1074 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1075 {
1076 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1077 	int i;
1078 
1079 	/* Clear the TX descriptors */
1080 	for (i = 0; i < DMA_TX_SIZE; i++)
1081 		if (priv->extend_desc)
1082 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1083 						     priv->mode,
1084 						     (i == DMA_TX_SIZE - 1));
1085 		else
1086 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1087 						     priv->mode,
1088 						     (i == DMA_TX_SIZE - 1));
1089 }
1090 
1091 /**
1092  * stmmac_clear_descriptors - clear descriptors
1093  * @priv: driver private structure
1094  * Description: this function is called to clear the TX and RX descriptors
1095  * in case of both basic and extended descriptors are used.
1096  */
1097 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1098 {
1099 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1100 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1101 	u32 queue;
1102 
1103 	/* Clear the RX descriptors */
1104 	for (queue = 0; queue < rx_queue_cnt; queue++)
1105 		stmmac_clear_rx_descriptors(priv, queue);
1106 
1107 	/* Clear the TX descriptors */
1108 	for (queue = 0; queue < tx_queue_cnt; queue++)
1109 		stmmac_clear_tx_descriptors(priv, queue);
1110 }
1111 
1112 /**
1113  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1114  * @priv: driver private structure
1115  * @p: descriptor pointer
1116  * @i: descriptor index
1117  * @flags: gfp flag
1118  * @queue: RX queue index
1119  * Description: this function is called to allocate a receive buffer, perform
1120  * the DMA mapping and init the descriptor.
1121  */
1122 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1123 				  int i, gfp_t flags, u32 queue)
1124 {
1125 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1126 	struct sk_buff *skb;
1127 
1128 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1129 	if (!skb) {
1130 		netdev_err(priv->dev,
1131 			   "%s: Rx init fails; skb is NULL\n", __func__);
1132 		return -ENOMEM;
1133 	}
1134 	rx_q->rx_skbuff[i] = skb;
1135 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1136 						priv->dma_buf_sz,
1137 						DMA_FROM_DEVICE);
1138 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1139 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1140 		dev_kfree_skb_any(skb);
1141 		return -EINVAL;
1142 	}
1143 
1144 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1145 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146 	else
1147 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1148 
1149 	if ((priv->hw->mode->init_desc3) &&
1150 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1151 		priv->hw->mode->init_desc3(p);
1152 
1153 	return 0;
1154 }
1155 
1156 /**
1157  * stmmac_free_rx_buffer - free RX dma buffers
1158  * @priv: private structure
1159  * @queue: RX queue index
1160  * @i: buffer index.
1161  */
1162 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1163 {
1164 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1165 
1166 	if (rx_q->rx_skbuff[i]) {
1167 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1168 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1169 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1170 	}
1171 	rx_q->rx_skbuff[i] = NULL;
1172 }
1173 
1174 /**
1175  * stmmac_free_tx_buffer - free RX dma buffers
1176  * @priv: private structure
1177  * @queue: RX queue index
1178  * @i: buffer index.
1179  */
1180 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1181 {
1182 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1183 
1184 	if (tx_q->tx_skbuff_dma[i].buf) {
1185 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1186 			dma_unmap_page(priv->device,
1187 				       tx_q->tx_skbuff_dma[i].buf,
1188 				       tx_q->tx_skbuff_dma[i].len,
1189 				       DMA_TO_DEVICE);
1190 		else
1191 			dma_unmap_single(priv->device,
1192 					 tx_q->tx_skbuff_dma[i].buf,
1193 					 tx_q->tx_skbuff_dma[i].len,
1194 					 DMA_TO_DEVICE);
1195 	}
1196 
1197 	if (tx_q->tx_skbuff[i]) {
1198 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1199 		tx_q->tx_skbuff[i] = NULL;
1200 		tx_q->tx_skbuff_dma[i].buf = 0;
1201 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1202 	}
1203 }
1204 
1205 /**
1206  * init_dma_rx_desc_rings - init the RX descriptor rings
1207  * @dev: net device structure
1208  * @flags: gfp flag.
1209  * Description: this function initializes the DMA RX descriptors
1210  * and allocates the socket buffers. It supports the chained and ring
1211  * modes.
1212  */
1213 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1214 {
1215 	struct stmmac_priv *priv = netdev_priv(dev);
1216 	u32 rx_count = priv->plat->rx_queues_to_use;
1217 	unsigned int bfsize = 0;
1218 	int ret = -ENOMEM;
1219 	int queue;
1220 	int i;
1221 
1222 	if (priv->hw->mode->set_16kib_bfsize)
1223 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1224 
1225 	if (bfsize < BUF_SIZE_16KiB)
1226 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1227 
1228 	priv->dma_buf_sz = bfsize;
1229 
1230 	/* RX INITIALIZATION */
1231 	netif_dbg(priv, probe, priv->dev,
1232 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1233 
1234 	for (queue = 0; queue < rx_count; queue++) {
1235 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1236 
1237 		netif_dbg(priv, probe, priv->dev,
1238 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1239 			  (u32)rx_q->dma_rx_phy);
1240 
1241 		for (i = 0; i < DMA_RX_SIZE; i++) {
1242 			struct dma_desc *p;
1243 
1244 			if (priv->extend_desc)
1245 				p = &((rx_q->dma_erx + i)->basic);
1246 			else
1247 				p = rx_q->dma_rx + i;
1248 
1249 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1250 						     queue);
1251 			if (ret)
1252 				goto err_init_rx_buffers;
1253 
1254 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1255 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1256 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1257 		}
1258 
1259 		rx_q->cur_rx = 0;
1260 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1261 
1262 		stmmac_clear_rx_descriptors(priv, queue);
1263 
1264 		/* Setup the chained descriptor addresses */
1265 		if (priv->mode == STMMAC_CHAIN_MODE) {
1266 			if (priv->extend_desc)
1267 				priv->hw->mode->init(rx_q->dma_erx,
1268 						     rx_q->dma_rx_phy,
1269 						     DMA_RX_SIZE, 1);
1270 			else
1271 				priv->hw->mode->init(rx_q->dma_rx,
1272 						     rx_q->dma_rx_phy,
1273 						     DMA_RX_SIZE, 0);
1274 		}
1275 	}
1276 
1277 	buf_sz = bfsize;
1278 
1279 	return 0;
1280 
1281 err_init_rx_buffers:
1282 	while (queue >= 0) {
1283 		while (--i >= 0)
1284 			stmmac_free_rx_buffer(priv, queue, i);
1285 
1286 		if (queue == 0)
1287 			break;
1288 
1289 		i = DMA_RX_SIZE;
1290 		queue--;
1291 	}
1292 
1293 	return ret;
1294 }
1295 
1296 /**
1297  * init_dma_tx_desc_rings - init the TX descriptor rings
1298  * @dev: net device structure.
1299  * Description: this function initializes the DMA TX descriptors
1300  * and allocates the socket buffers. It supports the chained and ring
1301  * modes.
1302  */
1303 static int init_dma_tx_desc_rings(struct net_device *dev)
1304 {
1305 	struct stmmac_priv *priv = netdev_priv(dev);
1306 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1307 	u32 queue;
1308 	int i;
1309 
1310 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1311 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1312 
1313 		netif_dbg(priv, probe, priv->dev,
1314 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1315 			 (u32)tx_q->dma_tx_phy);
1316 
1317 		/* Setup the chained descriptor addresses */
1318 		if (priv->mode == STMMAC_CHAIN_MODE) {
1319 			if (priv->extend_desc)
1320 				priv->hw->mode->init(tx_q->dma_etx,
1321 						     tx_q->dma_tx_phy,
1322 						     DMA_TX_SIZE, 1);
1323 			else
1324 				priv->hw->mode->init(tx_q->dma_tx,
1325 						     tx_q->dma_tx_phy,
1326 						     DMA_TX_SIZE, 0);
1327 		}
1328 
1329 		for (i = 0; i < DMA_TX_SIZE; i++) {
1330 			struct dma_desc *p;
1331 			if (priv->extend_desc)
1332 				p = &((tx_q->dma_etx + i)->basic);
1333 			else
1334 				p = tx_q->dma_tx + i;
1335 
1336 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1337 				p->des0 = 0;
1338 				p->des1 = 0;
1339 				p->des2 = 0;
1340 				p->des3 = 0;
1341 			} else {
1342 				p->des2 = 0;
1343 			}
1344 
1345 			tx_q->tx_skbuff_dma[i].buf = 0;
1346 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1347 			tx_q->tx_skbuff_dma[i].len = 0;
1348 			tx_q->tx_skbuff_dma[i].last_segment = false;
1349 			tx_q->tx_skbuff[i] = NULL;
1350 		}
1351 
1352 		tx_q->dirty_tx = 0;
1353 		tx_q->cur_tx = 0;
1354 
1355 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 /**
1362  * init_dma_desc_rings - init the RX/TX descriptor rings
1363  * @dev: net device structure
1364  * @flags: gfp flag.
1365  * Description: this function initializes the DMA RX/TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370 {
1371 	struct stmmac_priv *priv = netdev_priv(dev);
1372 	int ret;
1373 
1374 	ret = init_dma_rx_desc_rings(dev, flags);
1375 	if (ret)
1376 		return ret;
1377 
1378 	ret = init_dma_tx_desc_rings(dev);
1379 
1380 	stmmac_clear_descriptors(priv);
1381 
1382 	if (netif_msg_hw(priv))
1383 		stmmac_display_rings(priv);
1384 
1385 	return ret;
1386 }
1387 
1388 /**
1389  * dma_free_rx_skbufs - free RX dma buffers
1390  * @priv: private structure
1391  * @queue: RX queue index
1392  */
1393 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1394 {
1395 	int i;
1396 
1397 	for (i = 0; i < DMA_RX_SIZE; i++)
1398 		stmmac_free_rx_buffer(priv, queue, i);
1399 }
1400 
1401 /**
1402  * dma_free_tx_skbufs - free TX dma buffers
1403  * @priv: private structure
1404  * @queue: TX queue index
1405  */
1406 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < DMA_TX_SIZE; i++)
1411 		stmmac_free_tx_buffer(priv, queue, i);
1412 }
1413 
1414 /**
1415  * free_dma_rx_desc_resources - free RX dma desc resources
1416  * @priv: private structure
1417  */
1418 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419 {
1420 	u32 rx_count = priv->plat->rx_queues_to_use;
1421 	u32 queue;
1422 
1423 	/* Free RX queue resources */
1424 	for (queue = 0; queue < rx_count; queue++) {
1425 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426 
1427 		/* Release the DMA RX socket buffers */
1428 		dma_free_rx_skbufs(priv, queue);
1429 
1430 		/* Free DMA regions of consistent memory previously allocated */
1431 		if (!priv->extend_desc)
1432 			dma_free_coherent(priv->device,
1433 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1434 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1435 		else
1436 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1437 					  sizeof(struct dma_extended_desc),
1438 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1439 
1440 		kfree(rx_q->rx_skbuff_dma);
1441 		kfree(rx_q->rx_skbuff);
1442 	}
1443 }
1444 
1445 /**
1446  * free_dma_tx_desc_resources - free TX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450 {
1451 	u32 tx_count = priv->plat->tx_queues_to_use;
1452 	u32 queue;
1453 
1454 	/* Free TX queue resources */
1455 	for (queue = 0; queue < tx_count; queue++) {
1456 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457 
1458 		/* Release the DMA TX socket buffers */
1459 		dma_free_tx_skbufs(priv, queue);
1460 
1461 		/* Free DMA regions of consistent memory previously allocated */
1462 		if (!priv->extend_desc)
1463 			dma_free_coherent(priv->device,
1464 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1465 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1466 		else
1467 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1468 					  sizeof(struct dma_extended_desc),
1469 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1470 
1471 		kfree(tx_q->tx_skbuff_dma);
1472 		kfree(tx_q->tx_skbuff);
1473 	}
1474 }
1475 
1476 /**
1477  * alloc_dma_rx_desc_resources - alloc RX resources.
1478  * @priv: private structure
1479  * Description: according to which descriptor can be used (extend or basic)
1480  * this function allocates the resources for TX and RX paths. In case of
1481  * reception, for example, it pre-allocated the RX socket buffer in order to
1482  * allow zero-copy mechanism.
1483  */
1484 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	int ret = -ENOMEM;
1488 	u32 queue;
1489 
1490 	/* RX queues buffers and DMA */
1491 	for (queue = 0; queue < rx_count; queue++) {
1492 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1493 
1494 		rx_q->queue_index = queue;
1495 		rx_q->priv_data = priv;
1496 
1497 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498 						    sizeof(dma_addr_t),
1499 						    GFP_KERNEL);
1500 		if (!rx_q->rx_skbuff_dma)
1501 			goto err_dma;
1502 
1503 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504 						sizeof(struct sk_buff *),
1505 						GFP_KERNEL);
1506 		if (!rx_q->rx_skbuff)
1507 			goto err_dma;
1508 
1509 		if (priv->extend_desc) {
1510 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511 							    DMA_RX_SIZE *
1512 							    sizeof(struct
1513 							    dma_extended_desc),
1514 							    &rx_q->dma_rx_phy,
1515 							    GFP_KERNEL);
1516 			if (!rx_q->dma_erx)
1517 				goto err_dma;
1518 
1519 		} else {
1520 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521 							   DMA_RX_SIZE *
1522 							   sizeof(struct
1523 							   dma_desc),
1524 							   &rx_q->dma_rx_phy,
1525 							   GFP_KERNEL);
1526 			if (!rx_q->dma_rx)
1527 				goto err_dma;
1528 		}
1529 	}
1530 
1531 	return 0;
1532 
1533 err_dma:
1534 	free_dma_rx_desc_resources(priv);
1535 
1536 	return ret;
1537 }
1538 
1539 /**
1540  * alloc_dma_tx_desc_resources - alloc TX resources.
1541  * @priv: private structure
1542  * Description: according to which descriptor can be used (extend or basic)
1543  * this function allocates the resources for TX and RX paths. In case of
1544  * reception, for example, it pre-allocated the RX socket buffer in order to
1545  * allow zero-copy mechanism.
1546  */
1547 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548 {
1549 	u32 tx_count = priv->plat->tx_queues_to_use;
1550 	int ret = -ENOMEM;
1551 	u32 queue;
1552 
1553 	/* TX queues buffers and DMA */
1554 	for (queue = 0; queue < tx_count; queue++) {
1555 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1556 
1557 		tx_q->queue_index = queue;
1558 		tx_q->priv_data = priv;
1559 
1560 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561 						    sizeof(*tx_q->tx_skbuff_dma),
1562 						    GFP_KERNEL);
1563 		if (!tx_q->tx_skbuff_dma)
1564 			goto err_dma;
1565 
1566 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567 						sizeof(struct sk_buff *),
1568 						GFP_KERNEL);
1569 		if (!tx_q->tx_skbuff)
1570 			goto err_dma;
1571 
1572 		if (priv->extend_desc) {
1573 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574 							    DMA_TX_SIZE *
1575 							    sizeof(struct
1576 							    dma_extended_desc),
1577 							    &tx_q->dma_tx_phy,
1578 							    GFP_KERNEL);
1579 			if (!tx_q->dma_etx)
1580 				goto err_dma;
1581 		} else {
1582 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583 							   DMA_TX_SIZE *
1584 							   sizeof(struct
1585 								  dma_desc),
1586 							   &tx_q->dma_tx_phy,
1587 							   GFP_KERNEL);
1588 			if (!tx_q->dma_tx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_tx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_desc_resources - alloc TX/RX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	/* RX Allocation */
1612 	int ret = alloc_dma_rx_desc_resources(priv);
1613 
1614 	if (ret)
1615 		return ret;
1616 
1617 	ret = alloc_dma_tx_desc_resources(priv);
1618 
1619 	return ret;
1620 }
1621 
1622 /**
1623  * free_dma_desc_resources - free dma desc resources
1624  * @priv: private structure
1625  */
1626 static void free_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628 	/* Release the DMA RX socket buffers */
1629 	free_dma_rx_desc_resources(priv);
1630 
1631 	/* Release the DMA TX socket buffers */
1632 	free_dma_tx_desc_resources(priv);
1633 }
1634 
1635 /**
1636  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637  *  @priv: driver private structure
1638  *  Description: It is used for enabling the rx queues in the MAC
1639  */
1640 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641 {
1642 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643 	int queue;
1644 	u8 mode;
1645 
1646 	for (queue = 0; queue < rx_queues_count; queue++) {
1647 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1649 	}
1650 }
1651 
1652 /**
1653  * stmmac_start_rx_dma - start RX DMA channel
1654  * @priv: driver private structure
1655  * @chan: RX channel index
1656  * Description:
1657  * This starts a RX DMA channel
1658  */
1659 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660 {
1661 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1663 }
1664 
1665 /**
1666  * stmmac_start_tx_dma - start TX DMA channel
1667  * @priv: driver private structure
1668  * @chan: TX channel index
1669  * Description:
1670  * This starts a TX DMA channel
1671  */
1672 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673 {
1674 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1676 }
1677 
1678 /**
1679  * stmmac_stop_rx_dma - stop RX DMA channel
1680  * @priv: driver private structure
1681  * @chan: RX channel index
1682  * Description:
1683  * This stops a RX DMA channel
1684  */
1685 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686 {
1687 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1689 }
1690 
1691 /**
1692  * stmmac_stop_tx_dma - stop TX DMA channel
1693  * @priv: driver private structure
1694  * @chan: TX channel index
1695  * Description:
1696  * This stops a TX DMA channel
1697  */
1698 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699 {
1700 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1702 }
1703 
1704 /**
1705  * stmmac_start_all_dma - start all RX and TX DMA channels
1706  * @priv: driver private structure
1707  * Description:
1708  * This starts all the RX and TX DMA channels
1709  */
1710 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711 {
1712 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714 	u32 chan = 0;
1715 
1716 	for (chan = 0; chan < rx_channels_count; chan++)
1717 		stmmac_start_rx_dma(priv, chan);
1718 
1719 	for (chan = 0; chan < tx_channels_count; chan++)
1720 		stmmac_start_tx_dma(priv, chan);
1721 }
1722 
1723 /**
1724  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This stops the RX and TX DMA channels
1728  */
1729 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730 {
1731 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733 	u32 chan = 0;
1734 
1735 	for (chan = 0; chan < rx_channels_count; chan++)
1736 		stmmac_stop_rx_dma(priv, chan);
1737 
1738 	for (chan = 0; chan < tx_channels_count; chan++)
1739 		stmmac_stop_tx_dma(priv, chan);
1740 }
1741 
1742 /**
1743  *  stmmac_dma_operation_mode - HW DMA operation mode
1744  *  @priv: driver private structure
1745  *  Description: it is used for configuring the DMA operation mode register in
1746  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1747  */
1748 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749 {
1750 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752 	int rxfifosz = priv->plat->rx_fifo_size;
1753 	u32 txmode = 0;
1754 	u32 rxmode = 0;
1755 	u32 chan = 0;
1756 
1757 	if (rxfifosz == 0)
1758 		rxfifosz = priv->dma_cap.rx_fifo_size;
1759 
1760 	if (priv->plat->force_thresh_dma_mode) {
1761 		txmode = tc;
1762 		rxmode = tc;
1763 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1764 		/*
1765 		 * In case of GMAC, SF mode can be enabled
1766 		 * to perform the TX COE in HW. This depends on:
1767 		 * 1) TX COE if actually supported
1768 		 * 2) There is no bugged Jumbo frame support
1769 		 *    that needs to not insert csum in the TDES.
1770 		 */
1771 		txmode = SF_DMA_MODE;
1772 		rxmode = SF_DMA_MODE;
1773 		priv->xstats.threshold = SF_DMA_MODE;
1774 	} else {
1775 		txmode = tc;
1776 		rxmode = SF_DMA_MODE;
1777 	}
1778 
1779 	/* configure all channels */
1780 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1781 		for (chan = 0; chan < rx_channels_count; chan++)
1782 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1783 						   rxfifosz);
1784 
1785 		for (chan = 0; chan < tx_channels_count; chan++)
1786 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1787 	} else {
1788 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1789 					rxfifosz);
1790 	}
1791 }
1792 
1793 /**
1794  * stmmac_tx_clean - to manage the transmission completion
1795  * @priv: driver private structure
1796  * @queue: TX queue index
1797  * Description: it reclaims the transmit resources after transmission completes.
1798  */
1799 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1800 {
1801 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1802 	unsigned int bytes_compl = 0, pkts_compl = 0;
1803 	unsigned int entry = tx_q->dirty_tx;
1804 
1805 	netif_tx_lock(priv->dev);
1806 
1807 	priv->xstats.tx_clean++;
1808 
1809 	while (entry != tx_q->cur_tx) {
1810 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811 		struct dma_desc *p;
1812 		int status;
1813 
1814 		if (priv->extend_desc)
1815 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1816 		else
1817 			p = tx_q->dma_tx + entry;
1818 
1819 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1820 						      &priv->xstats, p,
1821 						      priv->ioaddr);
1822 		/* Check if the descriptor is owned by the DMA */
1823 		if (unlikely(status & tx_dma_own))
1824 			break;
1825 
1826 		/* Just consider the last segment and ...*/
1827 		if (likely(!(status & tx_not_ls))) {
1828 			/* ... verify the status error condition */
1829 			if (unlikely(status & tx_err)) {
1830 				priv->dev->stats.tx_errors++;
1831 			} else {
1832 				priv->dev->stats.tx_packets++;
1833 				priv->xstats.tx_pkt_n++;
1834 			}
1835 			stmmac_get_tx_hwtstamp(priv, p, skb);
1836 		}
1837 
1838 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1839 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1840 				dma_unmap_page(priv->device,
1841 					       tx_q->tx_skbuff_dma[entry].buf,
1842 					       tx_q->tx_skbuff_dma[entry].len,
1843 					       DMA_TO_DEVICE);
1844 			else
1845 				dma_unmap_single(priv->device,
1846 						 tx_q->tx_skbuff_dma[entry].buf,
1847 						 tx_q->tx_skbuff_dma[entry].len,
1848 						 DMA_TO_DEVICE);
1849 			tx_q->tx_skbuff_dma[entry].buf = 0;
1850 			tx_q->tx_skbuff_dma[entry].len = 0;
1851 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1852 		}
1853 
1854 		if (priv->hw->mode->clean_desc3)
1855 			priv->hw->mode->clean_desc3(tx_q, p);
1856 
1857 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1858 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1859 
1860 		if (likely(skb != NULL)) {
1861 			pkts_compl++;
1862 			bytes_compl += skb->len;
1863 			dev_consume_skb_any(skb);
1864 			tx_q->tx_skbuff[entry] = NULL;
1865 		}
1866 
1867 		priv->hw->desc->release_tx_desc(p, priv->mode);
1868 
1869 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1870 	}
1871 	tx_q->dirty_tx = entry;
1872 
1873 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1874 				  pkts_compl, bytes_compl);
1875 
1876 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1877 								queue))) &&
1878 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1879 
1880 		netif_dbg(priv, tx_done, priv->dev,
1881 			  "%s: restart transmit\n", __func__);
1882 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1883 	}
1884 
1885 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1886 		stmmac_enable_eee_mode(priv);
1887 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1888 	}
1889 	netif_tx_unlock(priv->dev);
1890 }
1891 
1892 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1893 {
1894 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1895 }
1896 
1897 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1898 {
1899 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1900 }
1901 
1902 /**
1903  * stmmac_tx_err - to manage the tx error
1904  * @priv: driver private structure
1905  * @chan: channel index
1906  * Description: it cleans the descriptors and restarts the transmission
1907  * in case of transmission errors.
1908  */
1909 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1910 {
1911 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1912 	int i;
1913 
1914 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1915 
1916 	stmmac_stop_tx_dma(priv, chan);
1917 	dma_free_tx_skbufs(priv, chan);
1918 	for (i = 0; i < DMA_TX_SIZE; i++)
1919 		if (priv->extend_desc)
1920 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1921 						     priv->mode,
1922 						     (i == DMA_TX_SIZE - 1));
1923 		else
1924 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1925 						     priv->mode,
1926 						     (i == DMA_TX_SIZE - 1));
1927 	tx_q->dirty_tx = 0;
1928 	tx_q->cur_tx = 0;
1929 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1930 	stmmac_start_tx_dma(priv, chan);
1931 
1932 	priv->dev->stats.tx_errors++;
1933 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1934 }
1935 
1936 /**
1937  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1938  *  @priv: driver private structure
1939  *  @txmode: TX operating mode
1940  *  @rxmode: RX operating mode
1941  *  @chan: channel index
1942  *  Description: it is used for configuring of the DMA operation mode in
1943  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1944  *  mode.
1945  */
1946 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1947 					  u32 rxmode, u32 chan)
1948 {
1949 	int rxfifosz = priv->plat->rx_fifo_size;
1950 
1951 	if (rxfifosz == 0)
1952 		rxfifosz = priv->dma_cap.rx_fifo_size;
1953 
1954 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1955 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1956 					   rxfifosz);
1957 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1958 	} else {
1959 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1960 					rxfifosz);
1961 	}
1962 }
1963 
1964 /**
1965  * stmmac_dma_interrupt - DMA ISR
1966  * @priv: driver private structure
1967  * Description: this is the DMA ISR. It is called by the main ISR.
1968  * It calls the dwmac dma routine and schedule poll method in case of some
1969  * work can be done.
1970  */
1971 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1972 {
1973 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1974 	int status;
1975 	u32 chan;
1976 
1977 	for (chan = 0; chan < tx_channel_count; chan++) {
1978 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1979 
1980 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1981 						      &priv->xstats, chan);
1982 		if (likely((status & handle_rx)) || (status & handle_tx)) {
1983 			if (likely(napi_schedule_prep(&rx_q->napi))) {
1984 				stmmac_disable_dma_irq(priv, chan);
1985 				__napi_schedule(&rx_q->napi);
1986 			}
1987 		}
1988 
1989 		if (unlikely(status & tx_hard_error_bump_tc)) {
1990 			/* Try to bump up the dma threshold on this failure */
1991 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1992 			    (tc <= 256)) {
1993 				tc += 64;
1994 				if (priv->plat->force_thresh_dma_mode)
1995 					stmmac_set_dma_operation_mode(priv,
1996 								      tc,
1997 								      tc,
1998 								      chan);
1999 				else
2000 					stmmac_set_dma_operation_mode(priv,
2001 								    tc,
2002 								    SF_DMA_MODE,
2003 								    chan);
2004 				priv->xstats.threshold = tc;
2005 			}
2006 		} else if (unlikely(status == tx_hard_error)) {
2007 			stmmac_tx_err(priv, chan);
2008 		}
2009 	}
2010 }
2011 
2012 /**
2013  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2014  * @priv: driver private structure
2015  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2016  */
2017 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2018 {
2019 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2020 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2021 
2022 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2023 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2024 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2025 	} else {
2026 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2027 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2028 	}
2029 
2030 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2031 
2032 	if (priv->dma_cap.rmon) {
2033 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2034 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2035 	} else
2036 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2037 }
2038 
2039 /**
2040  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2041  * @priv: driver private structure
2042  * Description: select the Enhanced/Alternate or Normal descriptors.
2043  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2044  * supported by the HW capability register.
2045  */
2046 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2047 {
2048 	if (priv->plat->enh_desc) {
2049 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2050 
2051 		/* GMAC older than 3.50 has no extended descriptors */
2052 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2053 			dev_info(priv->device, "Enabled extended descriptors\n");
2054 			priv->extend_desc = 1;
2055 		} else
2056 			dev_warn(priv->device, "Extended descriptors not supported\n");
2057 
2058 		priv->hw->desc = &enh_desc_ops;
2059 	} else {
2060 		dev_info(priv->device, "Normal descriptors\n");
2061 		priv->hw->desc = &ndesc_ops;
2062 	}
2063 }
2064 
2065 /**
2066  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2067  * @priv: driver private structure
2068  * Description:
2069  *  new GMAC chip generations have a new register to indicate the
2070  *  presence of the optional feature/functions.
2071  *  This can be also used to override the value passed through the
2072  *  platform and necessary for old MAC10/100 and GMAC chips.
2073  */
2074 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2075 {
2076 	u32 ret = 0;
2077 
2078 	if (priv->hw->dma->get_hw_feature) {
2079 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2080 					      &priv->dma_cap);
2081 		ret = 1;
2082 	}
2083 
2084 	return ret;
2085 }
2086 
2087 /**
2088  * stmmac_check_ether_addr - check if the MAC addr is valid
2089  * @priv: driver private structure
2090  * Description:
2091  * it is to verify if the MAC address is valid, in case of failures it
2092  * generates a random MAC address
2093  */
2094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2095 {
2096 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2097 		priv->hw->mac->get_umac_addr(priv->hw,
2098 					     priv->dev->dev_addr, 0);
2099 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2100 			eth_hw_addr_random(priv->dev);
2101 		netdev_info(priv->dev, "device MAC address %pM\n",
2102 			    priv->dev->dev_addr);
2103 	}
2104 }
2105 
2106 /**
2107  * stmmac_init_dma_engine - DMA init.
2108  * @priv: driver private structure
2109  * Description:
2110  * It inits the DMA invoking the specific MAC/GMAC callback.
2111  * Some DMA parameters can be passed from the platform;
2112  * in case of these are not passed a default is kept for the MAC or GMAC.
2113  */
2114 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2115 {
2116 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2117 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2118 	struct stmmac_rx_queue *rx_q;
2119 	struct stmmac_tx_queue *tx_q;
2120 	u32 dummy_dma_rx_phy = 0;
2121 	u32 dummy_dma_tx_phy = 0;
2122 	u32 chan = 0;
2123 	int atds = 0;
2124 	int ret = 0;
2125 
2126 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2127 		dev_err(priv->device, "Invalid DMA configuration\n");
2128 		return -EINVAL;
2129 	}
2130 
2131 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2132 		atds = 1;
2133 
2134 	ret = priv->hw->dma->reset(priv->ioaddr);
2135 	if (ret) {
2136 		dev_err(priv->device, "Failed to reset the dma\n");
2137 		return ret;
2138 	}
2139 
2140 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2141 		/* DMA Configuration */
2142 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2143 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2144 
2145 		/* DMA RX Channel Configuration */
2146 		for (chan = 0; chan < rx_channels_count; chan++) {
2147 			rx_q = &priv->rx_queue[chan];
2148 
2149 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2150 						    priv->plat->dma_cfg,
2151 						    rx_q->dma_rx_phy, chan);
2152 
2153 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2154 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2155 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2156 						       rx_q->rx_tail_addr,
2157 						       chan);
2158 		}
2159 
2160 		/* DMA TX Channel Configuration */
2161 		for (chan = 0; chan < tx_channels_count; chan++) {
2162 			tx_q = &priv->tx_queue[chan];
2163 
2164 			priv->hw->dma->init_chan(priv->ioaddr,
2165 						 priv->plat->dma_cfg,
2166 						 chan);
2167 
2168 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2169 						    priv->plat->dma_cfg,
2170 						    tx_q->dma_tx_phy, chan);
2171 
2172 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2173 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2174 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2175 						       tx_q->tx_tail_addr,
2176 						       chan);
2177 		}
2178 	} else {
2179 		rx_q = &priv->rx_queue[chan];
2180 		tx_q = &priv->tx_queue[chan];
2181 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2182 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2183 	}
2184 
2185 	if (priv->plat->axi && priv->hw->dma->axi)
2186 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2187 
2188 	return ret;
2189 }
2190 
2191 /**
2192  * stmmac_tx_timer - mitigation sw timer for tx.
2193  * @data: data pointer
2194  * Description:
2195  * This is the timer handler to directly invoke the stmmac_tx_clean.
2196  */
2197 static void stmmac_tx_timer(unsigned long data)
2198 {
2199 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
2200 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2201 	u32 queue;
2202 
2203 	/* let's scan all the tx queues */
2204 	for (queue = 0; queue < tx_queues_count; queue++)
2205 		stmmac_tx_clean(priv, queue);
2206 }
2207 
2208 /**
2209  * stmmac_init_tx_coalesce - init tx mitigation options.
2210  * @priv: driver private structure
2211  * Description:
2212  * This inits the transmit coalesce parameters: i.e. timer rate,
2213  * timer handler and default threshold used for enabling the
2214  * interrupt on completion bit.
2215  */
2216 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2217 {
2218 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2219 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2220 	init_timer(&priv->txtimer);
2221 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2222 	priv->txtimer.data = (unsigned long)priv;
2223 	priv->txtimer.function = stmmac_tx_timer;
2224 	add_timer(&priv->txtimer);
2225 }
2226 
2227 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2228 {
2229 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2230 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2231 	u32 chan;
2232 
2233 	/* set TX ring length */
2234 	if (priv->hw->dma->set_tx_ring_len) {
2235 		for (chan = 0; chan < tx_channels_count; chan++)
2236 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2237 						       (DMA_TX_SIZE - 1), chan);
2238 	}
2239 
2240 	/* set RX ring length */
2241 	if (priv->hw->dma->set_rx_ring_len) {
2242 		for (chan = 0; chan < rx_channels_count; chan++)
2243 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2244 						       (DMA_RX_SIZE - 1), chan);
2245 	}
2246 }
2247 
2248 /**
2249  *  stmmac_set_tx_queue_weight - Set TX queue weight
2250  *  @priv: driver private structure
2251  *  Description: It is used for setting TX queues weight
2252  */
2253 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2254 {
2255 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2256 	u32 weight;
2257 	u32 queue;
2258 
2259 	for (queue = 0; queue < tx_queues_count; queue++) {
2260 		weight = priv->plat->tx_queues_cfg[queue].weight;
2261 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2262 	}
2263 }
2264 
2265 /**
2266  *  stmmac_configure_cbs - Configure CBS in TX queue
2267  *  @priv: driver private structure
2268  *  Description: It is used for configuring CBS in AVB TX queues
2269  */
2270 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2271 {
2272 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2273 	u32 mode_to_use;
2274 	u32 queue;
2275 
2276 	/* queue 0 is reserved for legacy traffic */
2277 	for (queue = 1; queue < tx_queues_count; queue++) {
2278 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2279 		if (mode_to_use == MTL_QUEUE_DCB)
2280 			continue;
2281 
2282 		priv->hw->mac->config_cbs(priv->hw,
2283 				priv->plat->tx_queues_cfg[queue].send_slope,
2284 				priv->plat->tx_queues_cfg[queue].idle_slope,
2285 				priv->plat->tx_queues_cfg[queue].high_credit,
2286 				priv->plat->tx_queues_cfg[queue].low_credit,
2287 				queue);
2288 	}
2289 }
2290 
2291 /**
2292  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2293  *  @priv: driver private structure
2294  *  Description: It is used for mapping RX queues to RX dma channels
2295  */
2296 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2297 {
2298 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299 	u32 queue;
2300 	u32 chan;
2301 
2302 	for (queue = 0; queue < rx_queues_count; queue++) {
2303 		chan = priv->plat->rx_queues_cfg[queue].chan;
2304 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2305 	}
2306 }
2307 
2308 /**
2309  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2310  *  @priv: driver private structure
2311  *  Description: It is used for configuring the RX Queue Priority
2312  */
2313 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2314 {
2315 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2316 	u32 queue;
2317 	u32 prio;
2318 
2319 	for (queue = 0; queue < rx_queues_count; queue++) {
2320 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2321 			continue;
2322 
2323 		prio = priv->plat->rx_queues_cfg[queue].prio;
2324 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2325 	}
2326 }
2327 
2328 /**
2329  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2330  *  @priv: driver private structure
2331  *  Description: It is used for configuring the TX Queue Priority
2332  */
2333 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2334 {
2335 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2336 	u32 queue;
2337 	u32 prio;
2338 
2339 	for (queue = 0; queue < tx_queues_count; queue++) {
2340 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2341 			continue;
2342 
2343 		prio = priv->plat->tx_queues_cfg[queue].prio;
2344 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2345 	}
2346 }
2347 
2348 /**
2349  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2350  *  @priv: driver private structure
2351  *  Description: It is used for configuring the RX queue routing
2352  */
2353 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2354 {
2355 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356 	u32 queue;
2357 	u8 packet;
2358 
2359 	for (queue = 0; queue < rx_queues_count; queue++) {
2360 		/* no specific packet type routing specified for the queue */
2361 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2362 			continue;
2363 
2364 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2365 		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2366 	}
2367 }
2368 
2369 /**
2370  *  stmmac_mtl_configuration - Configure MTL
2371  *  @priv: driver private structure
2372  *  Description: It is used for configurring MTL
2373  */
2374 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2375 {
2376 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378 
2379 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2380 		stmmac_set_tx_queue_weight(priv);
2381 
2382 	/* Configure MTL RX algorithms */
2383 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2384 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2385 						priv->plat->rx_sched_algorithm);
2386 
2387 	/* Configure MTL TX algorithms */
2388 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2389 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2390 						priv->plat->tx_sched_algorithm);
2391 
2392 	/* Configure CBS in AVB TX queues */
2393 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2394 		stmmac_configure_cbs(priv);
2395 
2396 	/* Map RX MTL to DMA channels */
2397 	if (priv->hw->mac->map_mtl_to_dma)
2398 		stmmac_rx_queue_dma_chan_map(priv);
2399 
2400 	/* Enable MAC RX Queues */
2401 	if (priv->hw->mac->rx_queue_enable)
2402 		stmmac_mac_enable_rx_queues(priv);
2403 
2404 	/* Set RX priorities */
2405 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2406 		stmmac_mac_config_rx_queues_prio(priv);
2407 
2408 	/* Set TX priorities */
2409 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2410 		stmmac_mac_config_tx_queues_prio(priv);
2411 
2412 	/* Set RX routing */
2413 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2414 		stmmac_mac_config_rx_queues_routing(priv);
2415 }
2416 
2417 /**
2418  * stmmac_hw_setup - setup mac in a usable state.
2419  *  @dev : pointer to the device structure.
2420  *  Description:
2421  *  this is the main function to setup the HW in a usable state because the
2422  *  dma engine is reset, the core registers are configured (e.g. AXI,
2423  *  Checksum features, timers). The DMA is ready to start receiving and
2424  *  transmitting.
2425  *  Return value:
2426  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2427  *  file on failure.
2428  */
2429 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2430 {
2431 	struct stmmac_priv *priv = netdev_priv(dev);
2432 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2433 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2434 	u32 chan;
2435 	int ret;
2436 
2437 	/* DMA initialization and SW reset */
2438 	ret = stmmac_init_dma_engine(priv);
2439 	if (ret < 0) {
2440 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2441 			   __func__);
2442 		return ret;
2443 	}
2444 
2445 	/* Copy the MAC addr into the HW  */
2446 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2447 
2448 	/* PS and related bits will be programmed according to the speed */
2449 	if (priv->hw->pcs) {
2450 		int speed = priv->plat->mac_port_sel_speed;
2451 
2452 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2453 		    (speed == SPEED_1000)) {
2454 			priv->hw->ps = speed;
2455 		} else {
2456 			dev_warn(priv->device, "invalid port speed\n");
2457 			priv->hw->ps = 0;
2458 		}
2459 	}
2460 
2461 	/* Initialize the MAC Core */
2462 	priv->hw->mac->core_init(priv->hw, dev->mtu);
2463 
2464 	/* Initialize MTL*/
2465 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2466 		stmmac_mtl_configuration(priv);
2467 
2468 	ret = priv->hw->mac->rx_ipc(priv->hw);
2469 	if (!ret) {
2470 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2471 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2472 		priv->hw->rx_csum = 0;
2473 	}
2474 
2475 	/* Enable the MAC Rx/Tx */
2476 	priv->hw->mac->set_mac(priv->ioaddr, true);
2477 
2478 	/* Set the HW DMA mode and the COE */
2479 	stmmac_dma_operation_mode(priv);
2480 
2481 	stmmac_mmc_setup(priv);
2482 
2483 	if (init_ptp) {
2484 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2485 		if (ret < 0)
2486 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2487 
2488 		ret = stmmac_init_ptp(priv);
2489 		if (ret == -EOPNOTSUPP)
2490 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2491 		else if (ret)
2492 			netdev_warn(priv->dev, "PTP init failed\n");
2493 	}
2494 
2495 #ifdef CONFIG_DEBUG_FS
2496 	ret = stmmac_init_fs(dev);
2497 	if (ret < 0)
2498 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2499 			    __func__);
2500 #endif
2501 	/* Start the ball rolling... */
2502 	stmmac_start_all_dma(priv);
2503 
2504 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2505 
2506 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2507 		priv->rx_riwt = MAX_DMA_RIWT;
2508 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2509 	}
2510 
2511 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2512 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2513 
2514 	/* set TX and RX rings length */
2515 	stmmac_set_rings_length(priv);
2516 
2517 	/* Enable TSO */
2518 	if (priv->tso) {
2519 		for (chan = 0; chan < tx_cnt; chan++)
2520 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 static void stmmac_hw_teardown(struct net_device *dev)
2527 {
2528 	struct stmmac_priv *priv = netdev_priv(dev);
2529 
2530 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2531 }
2532 
2533 /**
2534  *  stmmac_open - open entry point of the driver
2535  *  @dev : pointer to the device structure.
2536  *  Description:
2537  *  This function is the open entry point of the driver.
2538  *  Return value:
2539  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2540  *  file on failure.
2541  */
2542 static int stmmac_open(struct net_device *dev)
2543 {
2544 	struct stmmac_priv *priv = netdev_priv(dev);
2545 	int ret;
2546 
2547 	stmmac_check_ether_addr(priv);
2548 
2549 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2550 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2551 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2552 		ret = stmmac_init_phy(dev);
2553 		if (ret) {
2554 			netdev_err(priv->dev,
2555 				   "%s: Cannot attach to PHY (error: %d)\n",
2556 				   __func__, ret);
2557 			return ret;
2558 		}
2559 	}
2560 
2561 	/* Extra statistics */
2562 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2563 	priv->xstats.threshold = tc;
2564 
2565 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2566 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2567 
2568 	ret = alloc_dma_desc_resources(priv);
2569 	if (ret < 0) {
2570 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2571 			   __func__);
2572 		goto dma_desc_error;
2573 	}
2574 
2575 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2576 	if (ret < 0) {
2577 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2578 			   __func__);
2579 		goto init_error;
2580 	}
2581 
2582 	ret = stmmac_hw_setup(dev, true);
2583 	if (ret < 0) {
2584 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2585 		goto init_error;
2586 	}
2587 
2588 	stmmac_init_tx_coalesce(priv);
2589 
2590 	if (dev->phydev)
2591 		phy_start(dev->phydev);
2592 
2593 	/* Request the IRQ lines */
2594 	ret = request_irq(dev->irq, stmmac_interrupt,
2595 			  IRQF_SHARED, dev->name, dev);
2596 	if (unlikely(ret < 0)) {
2597 		netdev_err(priv->dev,
2598 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2599 			   __func__, dev->irq, ret);
2600 		goto irq_error;
2601 	}
2602 
2603 	/* Request the Wake IRQ in case of another line is used for WoL */
2604 	if (priv->wol_irq != dev->irq) {
2605 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2606 				  IRQF_SHARED, dev->name, dev);
2607 		if (unlikely(ret < 0)) {
2608 			netdev_err(priv->dev,
2609 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2610 				   __func__, priv->wol_irq, ret);
2611 			goto wolirq_error;
2612 		}
2613 	}
2614 
2615 	/* Request the IRQ lines */
2616 	if (priv->lpi_irq > 0) {
2617 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2618 				  dev->name, dev);
2619 		if (unlikely(ret < 0)) {
2620 			netdev_err(priv->dev,
2621 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2622 				   __func__, priv->lpi_irq, ret);
2623 			goto lpiirq_error;
2624 		}
2625 	}
2626 
2627 	stmmac_enable_all_queues(priv);
2628 	stmmac_start_all_queues(priv);
2629 
2630 	return 0;
2631 
2632 lpiirq_error:
2633 	if (priv->wol_irq != dev->irq)
2634 		free_irq(priv->wol_irq, dev);
2635 wolirq_error:
2636 	free_irq(dev->irq, dev);
2637 irq_error:
2638 	if (dev->phydev)
2639 		phy_stop(dev->phydev);
2640 
2641 	del_timer_sync(&priv->txtimer);
2642 	stmmac_hw_teardown(dev);
2643 init_error:
2644 	free_dma_desc_resources(priv);
2645 dma_desc_error:
2646 	if (dev->phydev)
2647 		phy_disconnect(dev->phydev);
2648 
2649 	return ret;
2650 }
2651 
2652 /**
2653  *  stmmac_release - close entry point of the driver
2654  *  @dev : device pointer.
2655  *  Description:
2656  *  This is the stop entry point of the driver.
2657  */
2658 static int stmmac_release(struct net_device *dev)
2659 {
2660 	struct stmmac_priv *priv = netdev_priv(dev);
2661 
2662 	if (priv->eee_enabled)
2663 		del_timer_sync(&priv->eee_ctrl_timer);
2664 
2665 	/* Stop and disconnect the PHY */
2666 	if (dev->phydev) {
2667 		phy_stop(dev->phydev);
2668 		phy_disconnect(dev->phydev);
2669 	}
2670 
2671 	stmmac_stop_all_queues(priv);
2672 
2673 	stmmac_disable_all_queues(priv);
2674 
2675 	del_timer_sync(&priv->txtimer);
2676 
2677 	/* Free the IRQ lines */
2678 	free_irq(dev->irq, dev);
2679 	if (priv->wol_irq != dev->irq)
2680 		free_irq(priv->wol_irq, dev);
2681 	if (priv->lpi_irq > 0)
2682 		free_irq(priv->lpi_irq, dev);
2683 
2684 	/* Stop TX/RX DMA and clear the descriptors */
2685 	stmmac_stop_all_dma(priv);
2686 
2687 	/* Release and free the Rx/Tx resources */
2688 	free_dma_desc_resources(priv);
2689 
2690 	/* Disable the MAC Rx/Tx */
2691 	priv->hw->mac->set_mac(priv->ioaddr, false);
2692 
2693 	netif_carrier_off(dev);
2694 
2695 #ifdef CONFIG_DEBUG_FS
2696 	stmmac_exit_fs(dev);
2697 #endif
2698 
2699 	stmmac_release_ptp(priv);
2700 
2701 	return 0;
2702 }
2703 
2704 /**
2705  *  stmmac_tso_allocator - close entry point of the driver
2706  *  @priv: driver private structure
2707  *  @des: buffer start address
2708  *  @total_len: total length to fill in descriptors
2709  *  @last_segmant: condition for the last descriptor
2710  *  @queue: TX queue index
2711  *  Description:
2712  *  This function fills descriptor and request new descriptors according to
2713  *  buffer length to fill
2714  */
2715 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2716 				 int total_len, bool last_segment, u32 queue)
2717 {
2718 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2719 	struct dma_desc *desc;
2720 	u32 buff_size;
2721 	int tmp_len;
2722 
2723 	tmp_len = total_len;
2724 
2725 	while (tmp_len > 0) {
2726 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2727 		desc = tx_q->dma_tx + tx_q->cur_tx;
2728 
2729 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2730 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2731 			    TSO_MAX_BUFF_SIZE : tmp_len;
2732 
2733 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2734 			0, 1,
2735 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2736 			0, 0);
2737 
2738 		tmp_len -= TSO_MAX_BUFF_SIZE;
2739 	}
2740 }
2741 
2742 /**
2743  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2744  *  @skb : the socket buffer
2745  *  @dev : device pointer
2746  *  Description: this is the transmit function that is called on TSO frames
2747  *  (support available on GMAC4 and newer chips).
2748  *  Diagram below show the ring programming in case of TSO frames:
2749  *
2750  *  First Descriptor
2751  *   --------
2752  *   | DES0 |---> buffer1 = L2/L3/L4 header
2753  *   | DES1 |---> TCP Payload (can continue on next descr...)
2754  *   | DES2 |---> buffer 1 and 2 len
2755  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2756  *   --------
2757  *	|
2758  *     ...
2759  *	|
2760  *   --------
2761  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2762  *   | DES1 | --|
2763  *   | DES2 | --> buffer 1 and 2 len
2764  *   | DES3 |
2765  *   --------
2766  *
2767  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2768  */
2769 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2770 {
2771 	struct dma_desc *desc, *first, *mss_desc = NULL;
2772 	struct stmmac_priv *priv = netdev_priv(dev);
2773 	int nfrags = skb_shinfo(skb)->nr_frags;
2774 	u32 queue = skb_get_queue_mapping(skb);
2775 	unsigned int first_entry, des;
2776 	struct stmmac_tx_queue *tx_q;
2777 	int tmp_pay_len = 0;
2778 	u32 pay_len, mss;
2779 	u8 proto_hdr_len;
2780 	int i;
2781 
2782 	tx_q = &priv->tx_queue[queue];
2783 
2784 	/* Compute header lengths */
2785 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2786 
2787 	/* Desc availability based on threshold should be enough safe */
2788 	if (unlikely(stmmac_tx_avail(priv, queue) <
2789 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2790 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2791 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2792 								queue));
2793 			/* This is a hard error, log it. */
2794 			netdev_err(priv->dev,
2795 				   "%s: Tx Ring full when queue awake\n",
2796 				   __func__);
2797 		}
2798 		return NETDEV_TX_BUSY;
2799 	}
2800 
2801 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2802 
2803 	mss = skb_shinfo(skb)->gso_size;
2804 
2805 	/* set new MSS value if needed */
2806 	if (mss != priv->mss) {
2807 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2808 		priv->hw->desc->set_mss(mss_desc, mss);
2809 		priv->mss = mss;
2810 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2811 	}
2812 
2813 	if (netif_msg_tx_queued(priv)) {
2814 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2815 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2816 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2817 			skb->data_len);
2818 	}
2819 
2820 	first_entry = tx_q->cur_tx;
2821 
2822 	desc = tx_q->dma_tx + first_entry;
2823 	first = desc;
2824 
2825 	/* first descriptor: fill Headers on Buf1 */
2826 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2827 			     DMA_TO_DEVICE);
2828 	if (dma_mapping_error(priv->device, des))
2829 		goto dma_map_err;
2830 
2831 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2832 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2833 
2834 	first->des0 = cpu_to_le32(des);
2835 
2836 	/* Fill start of payload in buff2 of first descriptor */
2837 	if (pay_len)
2838 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2839 
2840 	/* If needed take extra descriptors to fill the remaining payload */
2841 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2842 
2843 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2844 
2845 	/* Prepare fragments */
2846 	for (i = 0; i < nfrags; i++) {
2847 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2848 
2849 		des = skb_frag_dma_map(priv->device, frag, 0,
2850 				       skb_frag_size(frag),
2851 				       DMA_TO_DEVICE);
2852 		if (dma_mapping_error(priv->device, des))
2853 			goto dma_map_err;
2854 
2855 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2856 				     (i == nfrags - 1), queue);
2857 
2858 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2859 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2860 		tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2861 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2862 	}
2863 
2864 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2865 
2866 	/* Only the last descriptor gets to point to the skb. */
2867 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2868 
2869 	/* We've used all descriptors we need for this skb, however,
2870 	 * advance cur_tx so that it references a fresh descriptor.
2871 	 * ndo_start_xmit will fill this descriptor the next time it's
2872 	 * called and stmmac_tx_clean may clean up to this descriptor.
2873 	 */
2874 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2875 
2876 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2877 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2878 			  __func__);
2879 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2880 	}
2881 
2882 	dev->stats.tx_bytes += skb->len;
2883 	priv->xstats.tx_tso_frames++;
2884 	priv->xstats.tx_tso_nfrags += nfrags;
2885 
2886 	/* Manage tx mitigation */
2887 	priv->tx_count_frames += nfrags + 1;
2888 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2889 		mod_timer(&priv->txtimer,
2890 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2891 	} else {
2892 		priv->tx_count_frames = 0;
2893 		priv->hw->desc->set_tx_ic(desc);
2894 		priv->xstats.tx_set_ic_bit++;
2895 	}
2896 
2897 	skb_tx_timestamp(skb);
2898 
2899 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2900 		     priv->hwts_tx_en)) {
2901 		/* declare that device is doing timestamping */
2902 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2903 		priv->hw->desc->enable_tx_timestamp(first);
2904 	}
2905 
2906 	/* Complete the first descriptor before granting the DMA */
2907 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2908 			proto_hdr_len,
2909 			pay_len,
2910 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2911 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2912 
2913 	/* If context desc is used to change MSS */
2914 	if (mss_desc)
2915 		priv->hw->desc->set_tx_owner(mss_desc);
2916 
2917 	/* The own bit must be the latest setting done when prepare the
2918 	 * descriptor and then barrier is needed to make sure that
2919 	 * all is coherent before granting the DMA engine.
2920 	 */
2921 	dma_wmb();
2922 
2923 	if (netif_msg_pktdata(priv)) {
2924 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2925 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2926 			tx_q->cur_tx, first, nfrags);
2927 
2928 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2929 					     0);
2930 
2931 		pr_info(">>> frame to be transmitted: ");
2932 		print_pkt(skb->data, skb_headlen(skb));
2933 	}
2934 
2935 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2936 
2937 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2938 				       queue);
2939 
2940 	return NETDEV_TX_OK;
2941 
2942 dma_map_err:
2943 	dev_err(priv->device, "Tx dma map failed\n");
2944 	dev_kfree_skb(skb);
2945 	priv->dev->stats.tx_dropped++;
2946 	return NETDEV_TX_OK;
2947 }
2948 
2949 /**
2950  *  stmmac_xmit - Tx entry point of the driver
2951  *  @skb : the socket buffer
2952  *  @dev : device pointer
2953  *  Description : this is the tx entry point of the driver.
2954  *  It programs the chain or the ring and supports oversized frames
2955  *  and SG feature.
2956  */
2957 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2958 {
2959 	struct stmmac_priv *priv = netdev_priv(dev);
2960 	unsigned int nopaged_len = skb_headlen(skb);
2961 	int i, csum_insertion = 0, is_jumbo = 0;
2962 	u32 queue = skb_get_queue_mapping(skb);
2963 	int nfrags = skb_shinfo(skb)->nr_frags;
2964 	int entry;
2965 	unsigned int first_entry;
2966 	struct dma_desc *desc, *first;
2967 	struct stmmac_tx_queue *tx_q;
2968 	unsigned int enh_desc;
2969 	unsigned int des;
2970 
2971 	tx_q = &priv->tx_queue[queue];
2972 
2973 	/* Manage oversized TCP frames for GMAC4 device */
2974 	if (skb_is_gso(skb) && priv->tso) {
2975 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2976 			return stmmac_tso_xmit(skb, dev);
2977 	}
2978 
2979 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2980 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2981 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2982 								queue));
2983 			/* This is a hard error, log it. */
2984 			netdev_err(priv->dev,
2985 				   "%s: Tx Ring full when queue awake\n",
2986 				   __func__);
2987 		}
2988 		return NETDEV_TX_BUSY;
2989 	}
2990 
2991 	if (priv->tx_path_in_lpi_mode)
2992 		stmmac_disable_eee_mode(priv);
2993 
2994 	entry = tx_q->cur_tx;
2995 	first_entry = entry;
2996 
2997 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2998 
2999 	if (likely(priv->extend_desc))
3000 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3001 	else
3002 		desc = tx_q->dma_tx + entry;
3003 
3004 	first = desc;
3005 
3006 	enh_desc = priv->plat->enh_desc;
3007 	/* To program the descriptors according to the size of the frame */
3008 	if (enh_desc)
3009 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3010 
3011 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3012 					 DWMAC_CORE_4_00)) {
3013 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3014 		if (unlikely(entry < 0))
3015 			goto dma_map_err;
3016 	}
3017 
3018 	for (i = 0; i < nfrags; i++) {
3019 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3020 		int len = skb_frag_size(frag);
3021 		bool last_segment = (i == (nfrags - 1));
3022 
3023 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3024 
3025 		if (likely(priv->extend_desc))
3026 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3027 		else
3028 			desc = tx_q->dma_tx + entry;
3029 
3030 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3031 				       DMA_TO_DEVICE);
3032 		if (dma_mapping_error(priv->device, des))
3033 			goto dma_map_err; /* should reuse desc w/o issues */
3034 
3035 		tx_q->tx_skbuff[entry] = NULL;
3036 
3037 		tx_q->tx_skbuff_dma[entry].buf = des;
3038 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3039 			desc->des0 = cpu_to_le32(des);
3040 		else
3041 			desc->des2 = cpu_to_le32(des);
3042 
3043 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3044 		tx_q->tx_skbuff_dma[entry].len = len;
3045 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3046 
3047 		/* Prepare the descriptor and set the own bit too */
3048 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3049 						priv->mode, 1, last_segment,
3050 						skb->len);
3051 	}
3052 
3053 	/* Only the last descriptor gets to point to the skb. */
3054 	tx_q->tx_skbuff[entry] = skb;
3055 
3056 	/* We've used all descriptors we need for this skb, however,
3057 	 * advance cur_tx so that it references a fresh descriptor.
3058 	 * ndo_start_xmit will fill this descriptor the next time it's
3059 	 * called and stmmac_tx_clean may clean up to this descriptor.
3060 	 */
3061 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3062 	tx_q->cur_tx = entry;
3063 
3064 	if (netif_msg_pktdata(priv)) {
3065 		void *tx_head;
3066 
3067 		netdev_dbg(priv->dev,
3068 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3069 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3070 			   entry, first, nfrags);
3071 
3072 		if (priv->extend_desc)
3073 			tx_head = (void *)tx_q->dma_etx;
3074 		else
3075 			tx_head = (void *)tx_q->dma_tx;
3076 
3077 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3078 
3079 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3080 		print_pkt(skb->data, skb->len);
3081 	}
3082 
3083 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3084 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3085 			  __func__);
3086 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3087 	}
3088 
3089 	dev->stats.tx_bytes += skb->len;
3090 
3091 	/* According to the coalesce parameter the IC bit for the latest
3092 	 * segment is reset and the timer re-started to clean the tx status.
3093 	 * This approach takes care about the fragments: desc is the first
3094 	 * element in case of no SG.
3095 	 */
3096 	priv->tx_count_frames += nfrags + 1;
3097 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3098 		mod_timer(&priv->txtimer,
3099 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3100 	} else {
3101 		priv->tx_count_frames = 0;
3102 		priv->hw->desc->set_tx_ic(desc);
3103 		priv->xstats.tx_set_ic_bit++;
3104 	}
3105 
3106 	skb_tx_timestamp(skb);
3107 
3108 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3109 	 * problems because all the descriptors are actually ready to be
3110 	 * passed to the DMA engine.
3111 	 */
3112 	if (likely(!is_jumbo)) {
3113 		bool last_segment = (nfrags == 0);
3114 
3115 		des = dma_map_single(priv->device, skb->data,
3116 				     nopaged_len, DMA_TO_DEVICE);
3117 		if (dma_mapping_error(priv->device, des))
3118 			goto dma_map_err;
3119 
3120 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3121 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3122 			first->des0 = cpu_to_le32(des);
3123 		else
3124 			first->des2 = cpu_to_le32(des);
3125 
3126 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3127 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3128 
3129 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3130 			     priv->hwts_tx_en)) {
3131 			/* declare that device is doing timestamping */
3132 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3133 			priv->hw->desc->enable_tx_timestamp(first);
3134 		}
3135 
3136 		/* Prepare the first descriptor setting the OWN bit too */
3137 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3138 						csum_insertion, priv->mode, 1,
3139 						last_segment, skb->len);
3140 
3141 		/* The own bit must be the latest setting done when prepare the
3142 		 * descriptor and then barrier is needed to make sure that
3143 		 * all is coherent before granting the DMA engine.
3144 		 */
3145 		dma_wmb();
3146 	}
3147 
3148 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3149 
3150 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3151 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3152 	else
3153 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3154 					       queue);
3155 
3156 	return NETDEV_TX_OK;
3157 
3158 dma_map_err:
3159 	netdev_err(priv->dev, "Tx DMA map failed\n");
3160 	dev_kfree_skb(skb);
3161 	priv->dev->stats.tx_dropped++;
3162 	return NETDEV_TX_OK;
3163 }
3164 
3165 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3166 {
3167 	struct ethhdr *ehdr;
3168 	u16 vlanid;
3169 
3170 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3171 	    NETIF_F_HW_VLAN_CTAG_RX &&
3172 	    !__vlan_get_tag(skb, &vlanid)) {
3173 		/* pop the vlan tag */
3174 		ehdr = (struct ethhdr *)skb->data;
3175 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3176 		skb_pull(skb, VLAN_HLEN);
3177 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3178 	}
3179 }
3180 
3181 
3182 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3183 {
3184 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3185 		return 0;
3186 
3187 	return 1;
3188 }
3189 
3190 /**
3191  * stmmac_rx_refill - refill used skb preallocated buffers
3192  * @priv: driver private structure
3193  * @queue: RX queue index
3194  * Description : this is to reallocate the skb for the reception process
3195  * that is based on zero-copy.
3196  */
3197 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3198 {
3199 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3200 	int dirty = stmmac_rx_dirty(priv, queue);
3201 	unsigned int entry = rx_q->dirty_rx;
3202 
3203 	int bfsize = priv->dma_buf_sz;
3204 
3205 	while (dirty-- > 0) {
3206 		struct dma_desc *p;
3207 
3208 		if (priv->extend_desc)
3209 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3210 		else
3211 			p = rx_q->dma_rx + entry;
3212 
3213 		if (likely(!rx_q->rx_skbuff[entry])) {
3214 			struct sk_buff *skb;
3215 
3216 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3217 			if (unlikely(!skb)) {
3218 				/* so for a while no zero-copy! */
3219 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3220 				if (unlikely(net_ratelimit()))
3221 					dev_err(priv->device,
3222 						"fail to alloc skb entry %d\n",
3223 						entry);
3224 				break;
3225 			}
3226 
3227 			rx_q->rx_skbuff[entry] = skb;
3228 			rx_q->rx_skbuff_dma[entry] =
3229 			    dma_map_single(priv->device, skb->data, bfsize,
3230 					   DMA_FROM_DEVICE);
3231 			if (dma_mapping_error(priv->device,
3232 					      rx_q->rx_skbuff_dma[entry])) {
3233 				netdev_err(priv->dev, "Rx DMA map failed\n");
3234 				dev_kfree_skb(skb);
3235 				break;
3236 			}
3237 
3238 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3239 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3240 				p->des1 = 0;
3241 			} else {
3242 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3243 			}
3244 			if (priv->hw->mode->refill_desc3)
3245 				priv->hw->mode->refill_desc3(rx_q, p);
3246 
3247 			if (rx_q->rx_zeroc_thresh > 0)
3248 				rx_q->rx_zeroc_thresh--;
3249 
3250 			netif_dbg(priv, rx_status, priv->dev,
3251 				  "refill entry #%d\n", entry);
3252 		}
3253 		dma_wmb();
3254 
3255 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3256 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3257 		else
3258 			priv->hw->desc->set_rx_owner(p);
3259 
3260 		dma_wmb();
3261 
3262 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3263 	}
3264 	rx_q->dirty_rx = entry;
3265 }
3266 
3267 /**
3268  * stmmac_rx - manage the receive process
3269  * @priv: driver private structure
3270  * @limit: napi bugget
3271  * @queue: RX queue index.
3272  * Description :  this the function called by the napi poll method.
3273  * It gets all the frames inside the ring.
3274  */
3275 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3276 {
3277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3278 	unsigned int entry = rx_q->cur_rx;
3279 	int coe = priv->hw->rx_csum;
3280 	unsigned int next_entry;
3281 	unsigned int count = 0;
3282 
3283 	if (netif_msg_rx_status(priv)) {
3284 		void *rx_head;
3285 
3286 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3287 		if (priv->extend_desc)
3288 			rx_head = (void *)rx_q->dma_erx;
3289 		else
3290 			rx_head = (void *)rx_q->dma_rx;
3291 
3292 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3293 	}
3294 	while (count < limit) {
3295 		int status;
3296 		struct dma_desc *p;
3297 		struct dma_desc *np;
3298 
3299 		if (priv->extend_desc)
3300 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3301 		else
3302 			p = rx_q->dma_rx + entry;
3303 
3304 		/* read the status of the incoming frame */
3305 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3306 						   &priv->xstats, p);
3307 		/* check if managed by the DMA otherwise go ahead */
3308 		if (unlikely(status & dma_own))
3309 			break;
3310 
3311 		count++;
3312 
3313 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3314 		next_entry = rx_q->cur_rx;
3315 
3316 		if (priv->extend_desc)
3317 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3318 		else
3319 			np = rx_q->dma_rx + next_entry;
3320 
3321 		prefetch(np);
3322 
3323 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3324 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3325 							   &priv->xstats,
3326 							   rx_q->dma_erx +
3327 							   entry);
3328 		if (unlikely(status == discard_frame)) {
3329 			priv->dev->stats.rx_errors++;
3330 			if (priv->hwts_rx_en && !priv->extend_desc) {
3331 				/* DESC2 & DESC3 will be overwritten by device
3332 				 * with timestamp value, hence reinitialize
3333 				 * them in stmmac_rx_refill() function so that
3334 				 * device can reuse it.
3335 				 */
3336 				rx_q->rx_skbuff[entry] = NULL;
3337 				dma_unmap_single(priv->device,
3338 						 rx_q->rx_skbuff_dma[entry],
3339 						 priv->dma_buf_sz,
3340 						 DMA_FROM_DEVICE);
3341 			}
3342 		} else {
3343 			struct sk_buff *skb;
3344 			int frame_len;
3345 			unsigned int des;
3346 
3347 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3348 				des = le32_to_cpu(p->des0);
3349 			else
3350 				des = le32_to_cpu(p->des2);
3351 
3352 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3353 
3354 			/*  If frame length is greater than skb buffer size
3355 			 *  (preallocated during init) then the packet is
3356 			 *  ignored
3357 			 */
3358 			if (frame_len > priv->dma_buf_sz) {
3359 				netdev_err(priv->dev,
3360 					   "len %d larger than size (%d)\n",
3361 					   frame_len, priv->dma_buf_sz);
3362 				priv->dev->stats.rx_length_errors++;
3363 				break;
3364 			}
3365 
3366 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3367 			 * Type frames (LLC/LLC-SNAP)
3368 			 */
3369 			if (unlikely(status != llc_snap))
3370 				frame_len -= ETH_FCS_LEN;
3371 
3372 			if (netif_msg_rx_status(priv)) {
3373 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3374 					   p, entry, des);
3375 				if (frame_len > ETH_FRAME_LEN)
3376 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3377 						   frame_len, status);
3378 			}
3379 
3380 			/* The zero-copy is always used for all the sizes
3381 			 * in case of GMAC4 because it needs
3382 			 * to refill the used descriptors, always.
3383 			 */
3384 			if (unlikely(!priv->plat->has_gmac4 &&
3385 				     ((frame_len < priv->rx_copybreak) ||
3386 				     stmmac_rx_threshold_count(rx_q)))) {
3387 				skb = netdev_alloc_skb_ip_align(priv->dev,
3388 								frame_len);
3389 				if (unlikely(!skb)) {
3390 					if (net_ratelimit())
3391 						dev_warn(priv->device,
3392 							 "packet dropped\n");
3393 					priv->dev->stats.rx_dropped++;
3394 					break;
3395 				}
3396 
3397 				dma_sync_single_for_cpu(priv->device,
3398 							rx_q->rx_skbuff_dma
3399 							[entry], frame_len,
3400 							DMA_FROM_DEVICE);
3401 				skb_copy_to_linear_data(skb,
3402 							rx_q->
3403 							rx_skbuff[entry]->data,
3404 							frame_len);
3405 
3406 				skb_put(skb, frame_len);
3407 				dma_sync_single_for_device(priv->device,
3408 							   rx_q->rx_skbuff_dma
3409 							   [entry], frame_len,
3410 							   DMA_FROM_DEVICE);
3411 			} else {
3412 				skb = rx_q->rx_skbuff[entry];
3413 				if (unlikely(!skb)) {
3414 					netdev_err(priv->dev,
3415 						   "%s: Inconsistent Rx chain\n",
3416 						   priv->dev->name);
3417 					priv->dev->stats.rx_dropped++;
3418 					break;
3419 				}
3420 				prefetch(skb->data - NET_IP_ALIGN);
3421 				rx_q->rx_skbuff[entry] = NULL;
3422 				rx_q->rx_zeroc_thresh++;
3423 
3424 				skb_put(skb, frame_len);
3425 				dma_unmap_single(priv->device,
3426 						 rx_q->rx_skbuff_dma[entry],
3427 						 priv->dma_buf_sz,
3428 						 DMA_FROM_DEVICE);
3429 			}
3430 
3431 			if (netif_msg_pktdata(priv)) {
3432 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3433 					   frame_len);
3434 				print_pkt(skb->data, frame_len);
3435 			}
3436 
3437 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3438 
3439 			stmmac_rx_vlan(priv->dev, skb);
3440 
3441 			skb->protocol = eth_type_trans(skb, priv->dev);
3442 
3443 			if (unlikely(!coe))
3444 				skb_checksum_none_assert(skb);
3445 			else
3446 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3447 
3448 			napi_gro_receive(&rx_q->napi, skb);
3449 
3450 			priv->dev->stats.rx_packets++;
3451 			priv->dev->stats.rx_bytes += frame_len;
3452 		}
3453 		entry = next_entry;
3454 	}
3455 
3456 	stmmac_rx_refill(priv, queue);
3457 
3458 	priv->xstats.rx_pkt_n += count;
3459 
3460 	return count;
3461 }
3462 
3463 /**
3464  *  stmmac_poll - stmmac poll method (NAPI)
3465  *  @napi : pointer to the napi structure.
3466  *  @budget : maximum number of packets that the current CPU can receive from
3467  *	      all interfaces.
3468  *  Description :
3469  *  To look at the incoming frames and clear the tx resources.
3470  */
3471 static int stmmac_poll(struct napi_struct *napi, int budget)
3472 {
3473 	struct stmmac_rx_queue *rx_q =
3474 		container_of(napi, struct stmmac_rx_queue, napi);
3475 	struct stmmac_priv *priv = rx_q->priv_data;
3476 	u32 tx_count = priv->plat->tx_queues_to_use;
3477 	u32 chan = rx_q->queue_index;
3478 	int work_done = 0;
3479 	u32 queue;
3480 
3481 	priv->xstats.napi_poll++;
3482 
3483 	/* check all the queues */
3484 	for (queue = 0; queue < tx_count; queue++)
3485 		stmmac_tx_clean(priv, queue);
3486 
3487 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3488 	if (work_done < budget) {
3489 		napi_complete_done(napi, work_done);
3490 		stmmac_enable_dma_irq(priv, chan);
3491 	}
3492 	return work_done;
3493 }
3494 
3495 /**
3496  *  stmmac_tx_timeout
3497  *  @dev : Pointer to net device structure
3498  *  Description: this function is called when a packet transmission fails to
3499  *   complete within a reasonable time. The driver will mark the error in the
3500  *   netdev structure and arrange for the device to be reset to a sane state
3501  *   in order to transmit a new packet.
3502  */
3503 static void stmmac_tx_timeout(struct net_device *dev)
3504 {
3505 	struct stmmac_priv *priv = netdev_priv(dev);
3506 	u32 tx_count = priv->plat->tx_queues_to_use;
3507 	u32 chan;
3508 
3509 	/* Clear Tx resources and restart transmitting again */
3510 	for (chan = 0; chan < tx_count; chan++)
3511 		stmmac_tx_err(priv, chan);
3512 }
3513 
3514 /**
3515  *  stmmac_set_rx_mode - entry point for multicast addressing
3516  *  @dev : pointer to the device structure
3517  *  Description:
3518  *  This function is a driver entry point which gets called by the kernel
3519  *  whenever multicast addresses must be enabled/disabled.
3520  *  Return value:
3521  *  void.
3522  */
3523 static void stmmac_set_rx_mode(struct net_device *dev)
3524 {
3525 	struct stmmac_priv *priv = netdev_priv(dev);
3526 
3527 	priv->hw->mac->set_filter(priv->hw, dev);
3528 }
3529 
3530 /**
3531  *  stmmac_change_mtu - entry point to change MTU size for the device.
3532  *  @dev : device pointer.
3533  *  @new_mtu : the new MTU size for the device.
3534  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3535  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3536  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3537  *  Return value:
3538  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3539  *  file on failure.
3540  */
3541 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3542 {
3543 	struct stmmac_priv *priv = netdev_priv(dev);
3544 
3545 	if (netif_running(dev)) {
3546 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3547 		return -EBUSY;
3548 	}
3549 
3550 	dev->mtu = new_mtu;
3551 
3552 	netdev_update_features(dev);
3553 
3554 	return 0;
3555 }
3556 
3557 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3558 					     netdev_features_t features)
3559 {
3560 	struct stmmac_priv *priv = netdev_priv(dev);
3561 
3562 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3563 		features &= ~NETIF_F_RXCSUM;
3564 
3565 	if (!priv->plat->tx_coe)
3566 		features &= ~NETIF_F_CSUM_MASK;
3567 
3568 	/* Some GMAC devices have a bugged Jumbo frame support that
3569 	 * needs to have the Tx COE disabled for oversized frames
3570 	 * (due to limited buffer sizes). In this case we disable
3571 	 * the TX csum insertion in the TDES and not use SF.
3572 	 */
3573 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3574 		features &= ~NETIF_F_CSUM_MASK;
3575 
3576 	/* Disable tso if asked by ethtool */
3577 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3578 		if (features & NETIF_F_TSO)
3579 			priv->tso = true;
3580 		else
3581 			priv->tso = false;
3582 	}
3583 
3584 	return features;
3585 }
3586 
3587 static int stmmac_set_features(struct net_device *netdev,
3588 			       netdev_features_t features)
3589 {
3590 	struct stmmac_priv *priv = netdev_priv(netdev);
3591 
3592 	/* Keep the COE Type in case of csum is supporting */
3593 	if (features & NETIF_F_RXCSUM)
3594 		priv->hw->rx_csum = priv->plat->rx_coe;
3595 	else
3596 		priv->hw->rx_csum = 0;
3597 	/* No check needed because rx_coe has been set before and it will be
3598 	 * fixed in case of issue.
3599 	 */
3600 	priv->hw->mac->rx_ipc(priv->hw);
3601 
3602 	return 0;
3603 }
3604 
3605 /**
3606  *  stmmac_interrupt - main ISR
3607  *  @irq: interrupt number.
3608  *  @dev_id: to pass the net device pointer.
3609  *  Description: this is the main driver interrupt service routine.
3610  *  It can call:
3611  *  o DMA service routine (to manage incoming frame reception and transmission
3612  *    status)
3613  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3614  *    interrupts.
3615  */
3616 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3617 {
3618 	struct net_device *dev = (struct net_device *)dev_id;
3619 	struct stmmac_priv *priv = netdev_priv(dev);
3620 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3621 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3622 	u32 queues_count;
3623 	u32 queue;
3624 
3625 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3626 
3627 	if (priv->irq_wake)
3628 		pm_wakeup_event(priv->device, 0);
3629 
3630 	if (unlikely(!dev)) {
3631 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3632 		return IRQ_NONE;
3633 	}
3634 
3635 	/* To handle GMAC own interrupts */
3636 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3637 		int status = priv->hw->mac->host_irq_status(priv->hw,
3638 							    &priv->xstats);
3639 
3640 		if (unlikely(status)) {
3641 			/* For LPI we need to save the tx status */
3642 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3643 				priv->tx_path_in_lpi_mode = true;
3644 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3645 				priv->tx_path_in_lpi_mode = false;
3646 		}
3647 
3648 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3649 			for (queue = 0; queue < queues_count; queue++) {
3650 				struct stmmac_rx_queue *rx_q =
3651 				&priv->rx_queue[queue];
3652 
3653 				status |=
3654 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3655 								   queue);
3656 
3657 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3658 				    priv->hw->dma->set_rx_tail_ptr)
3659 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3660 								rx_q->rx_tail_addr,
3661 								queue);
3662 			}
3663 		}
3664 
3665 		/* PCS link status */
3666 		if (priv->hw->pcs) {
3667 			if (priv->xstats.pcs_link)
3668 				netif_carrier_on(dev);
3669 			else
3670 				netif_carrier_off(dev);
3671 		}
3672 	}
3673 
3674 	/* To handle DMA interrupts */
3675 	stmmac_dma_interrupt(priv);
3676 
3677 	return IRQ_HANDLED;
3678 }
3679 
3680 #ifdef CONFIG_NET_POLL_CONTROLLER
3681 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3682  * to allow network I/O with interrupts disabled.
3683  */
3684 static void stmmac_poll_controller(struct net_device *dev)
3685 {
3686 	disable_irq(dev->irq);
3687 	stmmac_interrupt(dev->irq, dev);
3688 	enable_irq(dev->irq);
3689 }
3690 #endif
3691 
3692 /**
3693  *  stmmac_ioctl - Entry point for the Ioctl
3694  *  @dev: Device pointer.
3695  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3696  *  a proprietary structure used to pass information to the driver.
3697  *  @cmd: IOCTL command
3698  *  Description:
3699  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3700  */
3701 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3702 {
3703 	int ret = -EOPNOTSUPP;
3704 
3705 	if (!netif_running(dev))
3706 		return -EINVAL;
3707 
3708 	switch (cmd) {
3709 	case SIOCGMIIPHY:
3710 	case SIOCGMIIREG:
3711 	case SIOCSMIIREG:
3712 		if (!dev->phydev)
3713 			return -EINVAL;
3714 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3715 		break;
3716 	case SIOCSHWTSTAMP:
3717 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3718 		break;
3719 	default:
3720 		break;
3721 	}
3722 
3723 	return ret;
3724 }
3725 
3726 #ifdef CONFIG_DEBUG_FS
3727 static struct dentry *stmmac_fs_dir;
3728 
3729 static void sysfs_display_ring(void *head, int size, int extend_desc,
3730 			       struct seq_file *seq)
3731 {
3732 	int i;
3733 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3734 	struct dma_desc *p = (struct dma_desc *)head;
3735 
3736 	for (i = 0; i < size; i++) {
3737 		if (extend_desc) {
3738 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3739 				   i, (unsigned int)virt_to_phys(ep),
3740 				   le32_to_cpu(ep->basic.des0),
3741 				   le32_to_cpu(ep->basic.des1),
3742 				   le32_to_cpu(ep->basic.des2),
3743 				   le32_to_cpu(ep->basic.des3));
3744 			ep++;
3745 		} else {
3746 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3747 				   i, (unsigned int)virt_to_phys(p),
3748 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3749 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3750 			p++;
3751 		}
3752 		seq_printf(seq, "\n");
3753 	}
3754 }
3755 
3756 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3757 {
3758 	struct net_device *dev = seq->private;
3759 	struct stmmac_priv *priv = netdev_priv(dev);
3760 	u32 rx_count = priv->plat->rx_queues_to_use;
3761 	u32 tx_count = priv->plat->tx_queues_to_use;
3762 	u32 queue;
3763 
3764 	for (queue = 0; queue < rx_count; queue++) {
3765 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3766 
3767 		seq_printf(seq, "RX Queue %d:\n", queue);
3768 
3769 		if (priv->extend_desc) {
3770 			seq_printf(seq, "Extended descriptor ring:\n");
3771 			sysfs_display_ring((void *)rx_q->dma_erx,
3772 					   DMA_RX_SIZE, 1, seq);
3773 		} else {
3774 			seq_printf(seq, "Descriptor ring:\n");
3775 			sysfs_display_ring((void *)rx_q->dma_rx,
3776 					   DMA_RX_SIZE, 0, seq);
3777 		}
3778 	}
3779 
3780 	for (queue = 0; queue < tx_count; queue++) {
3781 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3782 
3783 		seq_printf(seq, "TX Queue %d:\n", queue);
3784 
3785 		if (priv->extend_desc) {
3786 			seq_printf(seq, "Extended descriptor ring:\n");
3787 			sysfs_display_ring((void *)tx_q->dma_etx,
3788 					   DMA_TX_SIZE, 1, seq);
3789 		} else {
3790 			seq_printf(seq, "Descriptor ring:\n");
3791 			sysfs_display_ring((void *)tx_q->dma_tx,
3792 					   DMA_TX_SIZE, 0, seq);
3793 		}
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3800 {
3801 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3802 }
3803 
3804 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3805 
3806 static const struct file_operations stmmac_rings_status_fops = {
3807 	.owner = THIS_MODULE,
3808 	.open = stmmac_sysfs_ring_open,
3809 	.read = seq_read,
3810 	.llseek = seq_lseek,
3811 	.release = single_release,
3812 };
3813 
3814 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3815 {
3816 	struct net_device *dev = seq->private;
3817 	struct stmmac_priv *priv = netdev_priv(dev);
3818 
3819 	if (!priv->hw_cap_support) {
3820 		seq_printf(seq, "DMA HW features not supported\n");
3821 		return 0;
3822 	}
3823 
3824 	seq_printf(seq, "==============================\n");
3825 	seq_printf(seq, "\tDMA HW features\n");
3826 	seq_printf(seq, "==============================\n");
3827 
3828 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3829 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3830 	seq_printf(seq, "\t1000 Mbps: %s\n",
3831 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3832 	seq_printf(seq, "\tHalf duplex: %s\n",
3833 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3834 	seq_printf(seq, "\tHash Filter: %s\n",
3835 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3836 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3837 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3838 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3839 		   (priv->dma_cap.pcs) ? "Y" : "N");
3840 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3841 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3842 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3843 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3844 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3845 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3846 	seq_printf(seq, "\tRMON module: %s\n",
3847 		   (priv->dma_cap.rmon) ? "Y" : "N");
3848 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3849 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3850 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3851 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3852 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3853 		   (priv->dma_cap.eee) ? "Y" : "N");
3854 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3855 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3856 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3857 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3858 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3859 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3860 	} else {
3861 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3862 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3863 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3864 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3865 	}
3866 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3867 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3868 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3869 		   priv->dma_cap.number_rx_channel);
3870 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3871 		   priv->dma_cap.number_tx_channel);
3872 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3873 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3874 
3875 	return 0;
3876 }
3877 
3878 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3879 {
3880 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3881 }
3882 
3883 static const struct file_operations stmmac_dma_cap_fops = {
3884 	.owner = THIS_MODULE,
3885 	.open = stmmac_sysfs_dma_cap_open,
3886 	.read = seq_read,
3887 	.llseek = seq_lseek,
3888 	.release = single_release,
3889 };
3890 
3891 static int stmmac_init_fs(struct net_device *dev)
3892 {
3893 	struct stmmac_priv *priv = netdev_priv(dev);
3894 
3895 	/* Create per netdev entries */
3896 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3897 
3898 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3899 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3900 
3901 		return -ENOMEM;
3902 	}
3903 
3904 	/* Entry to report DMA RX/TX rings */
3905 	priv->dbgfs_rings_status =
3906 		debugfs_create_file("descriptors_status", S_IRUGO,
3907 				    priv->dbgfs_dir, dev,
3908 				    &stmmac_rings_status_fops);
3909 
3910 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3911 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3912 		debugfs_remove_recursive(priv->dbgfs_dir);
3913 
3914 		return -ENOMEM;
3915 	}
3916 
3917 	/* Entry to report the DMA HW features */
3918 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3919 					    priv->dbgfs_dir,
3920 					    dev, &stmmac_dma_cap_fops);
3921 
3922 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3923 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3924 		debugfs_remove_recursive(priv->dbgfs_dir);
3925 
3926 		return -ENOMEM;
3927 	}
3928 
3929 	return 0;
3930 }
3931 
3932 static void stmmac_exit_fs(struct net_device *dev)
3933 {
3934 	struct stmmac_priv *priv = netdev_priv(dev);
3935 
3936 	debugfs_remove_recursive(priv->dbgfs_dir);
3937 }
3938 #endif /* CONFIG_DEBUG_FS */
3939 
3940 static const struct net_device_ops stmmac_netdev_ops = {
3941 	.ndo_open = stmmac_open,
3942 	.ndo_start_xmit = stmmac_xmit,
3943 	.ndo_stop = stmmac_release,
3944 	.ndo_change_mtu = stmmac_change_mtu,
3945 	.ndo_fix_features = stmmac_fix_features,
3946 	.ndo_set_features = stmmac_set_features,
3947 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3948 	.ndo_tx_timeout = stmmac_tx_timeout,
3949 	.ndo_do_ioctl = stmmac_ioctl,
3950 #ifdef CONFIG_NET_POLL_CONTROLLER
3951 	.ndo_poll_controller = stmmac_poll_controller,
3952 #endif
3953 	.ndo_set_mac_address = eth_mac_addr,
3954 };
3955 
3956 /**
3957  *  stmmac_hw_init - Init the MAC device
3958  *  @priv: driver private structure
3959  *  Description: this function is to configure the MAC device according to
3960  *  some platform parameters or the HW capability register. It prepares the
3961  *  driver to use either ring or chain modes and to setup either enhanced or
3962  *  normal descriptors.
3963  */
3964 static int stmmac_hw_init(struct stmmac_priv *priv)
3965 {
3966 	struct mac_device_info *mac;
3967 
3968 	/* Identify the MAC HW device */
3969 	if (priv->plat->setup) {
3970 		mac = priv->plat->setup(priv);
3971 	} else if (priv->plat->has_gmac) {
3972 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3973 		mac = dwmac1000_setup(priv->ioaddr,
3974 				      priv->plat->multicast_filter_bins,
3975 				      priv->plat->unicast_filter_entries,
3976 				      &priv->synopsys_id);
3977 	} else if (priv->plat->has_gmac4) {
3978 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3979 		mac = dwmac4_setup(priv->ioaddr,
3980 				   priv->plat->multicast_filter_bins,
3981 				   priv->plat->unicast_filter_entries,
3982 				   &priv->synopsys_id);
3983 	} else {
3984 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3985 	}
3986 	if (!mac)
3987 		return -ENOMEM;
3988 
3989 	priv->hw = mac;
3990 
3991 	/* dwmac-sun8i only work in chain mode */
3992 	if (priv->plat->has_sun8i)
3993 		chain_mode = 1;
3994 
3995 	/* To use the chained or ring mode */
3996 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3997 		priv->hw->mode = &dwmac4_ring_mode_ops;
3998 	} else {
3999 		if (chain_mode) {
4000 			priv->hw->mode = &chain_mode_ops;
4001 			dev_info(priv->device, "Chain mode enabled\n");
4002 			priv->mode = STMMAC_CHAIN_MODE;
4003 		} else {
4004 			priv->hw->mode = &ring_mode_ops;
4005 			dev_info(priv->device, "Ring mode enabled\n");
4006 			priv->mode = STMMAC_RING_MODE;
4007 		}
4008 	}
4009 
4010 	/* Get the HW capability (new GMAC newer than 3.50a) */
4011 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4012 	if (priv->hw_cap_support) {
4013 		dev_info(priv->device, "DMA HW capability register supported\n");
4014 
4015 		/* We can override some gmac/dma configuration fields: e.g.
4016 		 * enh_desc, tx_coe (e.g. that are passed through the
4017 		 * platform) with the values from the HW capability
4018 		 * register (if supported).
4019 		 */
4020 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4021 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4022 		priv->hw->pmt = priv->plat->pmt;
4023 
4024 		/* TXCOE doesn't work in thresh DMA mode */
4025 		if (priv->plat->force_thresh_dma_mode)
4026 			priv->plat->tx_coe = 0;
4027 		else
4028 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4029 
4030 		/* In case of GMAC4 rx_coe is from HW cap register. */
4031 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4032 
4033 		if (priv->dma_cap.rx_coe_type2)
4034 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4035 		else if (priv->dma_cap.rx_coe_type1)
4036 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4037 
4038 	} else {
4039 		dev_info(priv->device, "No HW DMA feature register supported\n");
4040 	}
4041 
4042 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4043 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4044 		priv->hw->desc = &dwmac4_desc_ops;
4045 	else
4046 		stmmac_selec_desc_mode(priv);
4047 
4048 	if (priv->plat->rx_coe) {
4049 		priv->hw->rx_csum = priv->plat->rx_coe;
4050 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4051 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4052 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4053 	}
4054 	if (priv->plat->tx_coe)
4055 		dev_info(priv->device, "TX Checksum insertion supported\n");
4056 
4057 	if (priv->plat->pmt) {
4058 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4059 		device_set_wakeup_capable(priv->device, 1);
4060 	}
4061 
4062 	if (priv->dma_cap.tsoen)
4063 		dev_info(priv->device, "TSO supported\n");
4064 
4065 	return 0;
4066 }
4067 
4068 /**
4069  * stmmac_dvr_probe
4070  * @device: device pointer
4071  * @plat_dat: platform data pointer
4072  * @res: stmmac resource pointer
4073  * Description: this is the main probe function used to
4074  * call the alloc_etherdev, allocate the priv structure.
4075  * Return:
4076  * returns 0 on success, otherwise errno.
4077  */
4078 int stmmac_dvr_probe(struct device *device,
4079 		     struct plat_stmmacenet_data *plat_dat,
4080 		     struct stmmac_resources *res)
4081 {
4082 	struct net_device *ndev = NULL;
4083 	struct stmmac_priv *priv;
4084 	int ret = 0;
4085 	u32 queue;
4086 
4087 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4088 				  MTL_MAX_TX_QUEUES,
4089 				  MTL_MAX_RX_QUEUES);
4090 	if (!ndev)
4091 		return -ENOMEM;
4092 
4093 	SET_NETDEV_DEV(ndev, device);
4094 
4095 	priv = netdev_priv(ndev);
4096 	priv->device = device;
4097 	priv->dev = ndev;
4098 
4099 	stmmac_set_ethtool_ops(ndev);
4100 	priv->pause = pause;
4101 	priv->plat = plat_dat;
4102 	priv->ioaddr = res->addr;
4103 	priv->dev->base_addr = (unsigned long)res->addr;
4104 
4105 	priv->dev->irq = res->irq;
4106 	priv->wol_irq = res->wol_irq;
4107 	priv->lpi_irq = res->lpi_irq;
4108 
4109 	if (res->mac)
4110 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4111 
4112 	dev_set_drvdata(device, priv->dev);
4113 
4114 	/* Verify driver arguments */
4115 	stmmac_verify_args();
4116 
4117 	/* Override with kernel parameters if supplied XXX CRS XXX
4118 	 * this needs to have multiple instances
4119 	 */
4120 	if ((phyaddr >= 0) && (phyaddr <= 31))
4121 		priv->plat->phy_addr = phyaddr;
4122 
4123 	if (priv->plat->stmmac_rst) {
4124 		ret = reset_control_assert(priv->plat->stmmac_rst);
4125 		reset_control_deassert(priv->plat->stmmac_rst);
4126 		/* Some reset controllers have only reset callback instead of
4127 		 * assert + deassert callbacks pair.
4128 		 */
4129 		if (ret == -ENOTSUPP)
4130 			reset_control_reset(priv->plat->stmmac_rst);
4131 	}
4132 
4133 	/* Init MAC and get the capabilities */
4134 	ret = stmmac_hw_init(priv);
4135 	if (ret)
4136 		goto error_hw_init;
4137 
4138 	/* Configure real RX and TX queues */
4139 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4140 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4141 
4142 	ndev->netdev_ops = &stmmac_netdev_ops;
4143 
4144 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4145 			    NETIF_F_RXCSUM;
4146 
4147 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4148 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4149 		priv->tso = true;
4150 		dev_info(priv->device, "TSO feature enabled\n");
4151 	}
4152 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4153 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4154 #ifdef STMMAC_VLAN_TAG_USED
4155 	/* Both mac100 and gmac support receive VLAN tag detection */
4156 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4157 #endif
4158 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4159 
4160 	/* MTU range: 46 - hw-specific max */
4161 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4162 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4163 		ndev->max_mtu = JUMBO_LEN;
4164 	else
4165 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4166 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4167 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4168 	 */
4169 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4170 	    (priv->plat->maxmtu >= ndev->min_mtu))
4171 		ndev->max_mtu = priv->plat->maxmtu;
4172 	else if (priv->plat->maxmtu < ndev->min_mtu)
4173 		dev_warn(priv->device,
4174 			 "%s: warning: maxmtu having invalid value (%d)\n",
4175 			 __func__, priv->plat->maxmtu);
4176 
4177 	if (flow_ctrl)
4178 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4179 
4180 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4181 	 * In some case, for example on bugged HW this feature
4182 	 * has to be disable and this can be done by passing the
4183 	 * riwt_off field from the platform.
4184 	 */
4185 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4186 		priv->use_riwt = 1;
4187 		dev_info(priv->device,
4188 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4189 	}
4190 
4191 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4192 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4193 
4194 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4195 			       (8 * priv->plat->rx_queues_to_use));
4196 	}
4197 
4198 	spin_lock_init(&priv->lock);
4199 
4200 	/* If a specific clk_csr value is passed from the platform
4201 	 * this means that the CSR Clock Range selection cannot be
4202 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4203 	 * set the MDC clock dynamically according to the csr actual
4204 	 * clock input.
4205 	 */
4206 	if (!priv->plat->clk_csr)
4207 		stmmac_clk_csr_set(priv);
4208 	else
4209 		priv->clk_csr = priv->plat->clk_csr;
4210 
4211 	stmmac_check_pcs_mode(priv);
4212 
4213 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4214 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4215 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4216 		/* MDIO bus Registration */
4217 		ret = stmmac_mdio_register(ndev);
4218 		if (ret < 0) {
4219 			dev_err(priv->device,
4220 				"%s: MDIO bus (id: %d) registration failed",
4221 				__func__, priv->plat->bus_id);
4222 			goto error_mdio_register;
4223 		}
4224 	}
4225 
4226 	ret = register_netdev(ndev);
4227 	if (ret) {
4228 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4229 			__func__, ret);
4230 		goto error_netdev_register;
4231 	}
4232 
4233 	return ret;
4234 
4235 error_netdev_register:
4236 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4237 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4238 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4239 		stmmac_mdio_unregister(ndev);
4240 error_mdio_register:
4241 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4242 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4243 
4244 		netif_napi_del(&rx_q->napi);
4245 	}
4246 error_hw_init:
4247 	free_netdev(ndev);
4248 
4249 	return ret;
4250 }
4251 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4252 
4253 /**
4254  * stmmac_dvr_remove
4255  * @dev: device pointer
4256  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4257  * changes the link status, releases the DMA descriptor rings.
4258  */
4259 int stmmac_dvr_remove(struct device *dev)
4260 {
4261 	struct net_device *ndev = dev_get_drvdata(dev);
4262 	struct stmmac_priv *priv = netdev_priv(ndev);
4263 
4264 	netdev_info(priv->dev, "%s: removing driver", __func__);
4265 
4266 	stmmac_stop_all_dma(priv);
4267 
4268 	priv->hw->mac->set_mac(priv->ioaddr, false);
4269 	netif_carrier_off(ndev);
4270 	unregister_netdev(ndev);
4271 	if (priv->plat->stmmac_rst)
4272 		reset_control_assert(priv->plat->stmmac_rst);
4273 	clk_disable_unprepare(priv->plat->pclk);
4274 	clk_disable_unprepare(priv->plat->stmmac_clk);
4275 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4276 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4277 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4278 		stmmac_mdio_unregister(ndev);
4279 	free_netdev(ndev);
4280 
4281 	return 0;
4282 }
4283 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4284 
4285 /**
4286  * stmmac_suspend - suspend callback
4287  * @dev: device pointer
4288  * Description: this is the function to suspend the device and it is called
4289  * by the platform driver to stop the network queue, release the resources,
4290  * program the PMT register (for WoL), clean and release driver resources.
4291  */
4292 int stmmac_suspend(struct device *dev)
4293 {
4294 	struct net_device *ndev = dev_get_drvdata(dev);
4295 	struct stmmac_priv *priv = netdev_priv(ndev);
4296 	unsigned long flags;
4297 
4298 	if (!ndev || !netif_running(ndev))
4299 		return 0;
4300 
4301 	if (ndev->phydev)
4302 		phy_stop(ndev->phydev);
4303 
4304 	spin_lock_irqsave(&priv->lock, flags);
4305 
4306 	netif_device_detach(ndev);
4307 	stmmac_stop_all_queues(priv);
4308 
4309 	stmmac_disable_all_queues(priv);
4310 
4311 	/* Stop TX/RX DMA */
4312 	stmmac_stop_all_dma(priv);
4313 
4314 	/* Enable Power down mode by programming the PMT regs */
4315 	if (device_may_wakeup(priv->device)) {
4316 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4317 		priv->irq_wake = 1;
4318 	} else {
4319 		priv->hw->mac->set_mac(priv->ioaddr, false);
4320 		pinctrl_pm_select_sleep_state(priv->device);
4321 		/* Disable clock in case of PWM is off */
4322 		clk_disable(priv->plat->pclk);
4323 		clk_disable(priv->plat->stmmac_clk);
4324 	}
4325 	spin_unlock_irqrestore(&priv->lock, flags);
4326 
4327 	priv->oldlink = false;
4328 	priv->speed = SPEED_UNKNOWN;
4329 	priv->oldduplex = DUPLEX_UNKNOWN;
4330 	return 0;
4331 }
4332 EXPORT_SYMBOL_GPL(stmmac_suspend);
4333 
4334 /**
4335  * stmmac_reset_queues_param - reset queue parameters
4336  * @dev: device pointer
4337  */
4338 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4339 {
4340 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4341 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4342 	u32 queue;
4343 
4344 	for (queue = 0; queue < rx_cnt; queue++) {
4345 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4346 
4347 		rx_q->cur_rx = 0;
4348 		rx_q->dirty_rx = 0;
4349 	}
4350 
4351 	for (queue = 0; queue < tx_cnt; queue++) {
4352 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4353 
4354 		tx_q->cur_tx = 0;
4355 		tx_q->dirty_tx = 0;
4356 	}
4357 }
4358 
4359 /**
4360  * stmmac_resume - resume callback
4361  * @dev: device pointer
4362  * Description: when resume this function is invoked to setup the DMA and CORE
4363  * in a usable state.
4364  */
4365 int stmmac_resume(struct device *dev)
4366 {
4367 	struct net_device *ndev = dev_get_drvdata(dev);
4368 	struct stmmac_priv *priv = netdev_priv(ndev);
4369 	unsigned long flags;
4370 
4371 	if (!netif_running(ndev))
4372 		return 0;
4373 
4374 	/* Power Down bit, into the PM register, is cleared
4375 	 * automatically as soon as a magic packet or a Wake-up frame
4376 	 * is received. Anyway, it's better to manually clear
4377 	 * this bit because it can generate problems while resuming
4378 	 * from another devices (e.g. serial console).
4379 	 */
4380 	if (device_may_wakeup(priv->device)) {
4381 		spin_lock_irqsave(&priv->lock, flags);
4382 		priv->hw->mac->pmt(priv->hw, 0);
4383 		spin_unlock_irqrestore(&priv->lock, flags);
4384 		priv->irq_wake = 0;
4385 	} else {
4386 		pinctrl_pm_select_default_state(priv->device);
4387 		/* enable the clk previously disabled */
4388 		clk_enable(priv->plat->stmmac_clk);
4389 		clk_enable(priv->plat->pclk);
4390 		/* reset the phy so that it's ready */
4391 		if (priv->mii)
4392 			stmmac_mdio_reset(priv->mii);
4393 	}
4394 
4395 	netif_device_attach(ndev);
4396 
4397 	spin_lock_irqsave(&priv->lock, flags);
4398 
4399 	stmmac_reset_queues_param(priv);
4400 
4401 	/* reset private mss value to force mss context settings at
4402 	 * next tso xmit (only used for gmac4).
4403 	 */
4404 	priv->mss = 0;
4405 
4406 	stmmac_clear_descriptors(priv);
4407 
4408 	stmmac_hw_setup(ndev, false);
4409 	stmmac_init_tx_coalesce(priv);
4410 	stmmac_set_rx_mode(ndev);
4411 
4412 	stmmac_enable_all_queues(priv);
4413 
4414 	stmmac_start_all_queues(priv);
4415 
4416 	spin_unlock_irqrestore(&priv->lock, flags);
4417 
4418 	if (ndev->phydev)
4419 		phy_start(ndev->phydev);
4420 
4421 	return 0;
4422 }
4423 EXPORT_SYMBOL_GPL(stmmac_resume);
4424 
4425 #ifndef MODULE
4426 static int __init stmmac_cmdline_opt(char *str)
4427 {
4428 	char *opt;
4429 
4430 	if (!str || !*str)
4431 		return -EINVAL;
4432 	while ((opt = strsep(&str, ",")) != NULL) {
4433 		if (!strncmp(opt, "debug:", 6)) {
4434 			if (kstrtoint(opt + 6, 0, &debug))
4435 				goto err;
4436 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4437 			if (kstrtoint(opt + 8, 0, &phyaddr))
4438 				goto err;
4439 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4440 			if (kstrtoint(opt + 7, 0, &buf_sz))
4441 				goto err;
4442 		} else if (!strncmp(opt, "tc:", 3)) {
4443 			if (kstrtoint(opt + 3, 0, &tc))
4444 				goto err;
4445 		} else if (!strncmp(opt, "watchdog:", 9)) {
4446 			if (kstrtoint(opt + 9, 0, &watchdog))
4447 				goto err;
4448 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4449 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4450 				goto err;
4451 		} else if (!strncmp(opt, "pause:", 6)) {
4452 			if (kstrtoint(opt + 6, 0, &pause))
4453 				goto err;
4454 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4455 			if (kstrtoint(opt + 10, 0, &eee_timer))
4456 				goto err;
4457 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4458 			if (kstrtoint(opt + 11, 0, &chain_mode))
4459 				goto err;
4460 		}
4461 	}
4462 	return 0;
4463 
4464 err:
4465 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4466 	return -EINVAL;
4467 }
4468 
4469 __setup("stmmaceth=", stmmac_cmdline_opt);
4470 #endif /* MODULE */
4471 
4472 static int __init stmmac_init(void)
4473 {
4474 #ifdef CONFIG_DEBUG_FS
4475 	/* Create debugfs main directory if it doesn't exist yet */
4476 	if (!stmmac_fs_dir) {
4477 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4478 
4479 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4480 			pr_err("ERROR %s, debugfs create directory failed\n",
4481 			       STMMAC_RESOURCE_NAME);
4482 
4483 			return -ENOMEM;
4484 		}
4485 	}
4486 #endif
4487 
4488 	return 0;
4489 }
4490 
4491 static void __exit stmmac_exit(void)
4492 {
4493 #ifdef CONFIG_DEBUG_FS
4494 	debugfs_remove_recursive(stmmac_fs_dir);
4495 #endif
4496 }
4497 
4498 module_init(stmmac_init)
4499 module_exit(stmmac_exit)
4500 
4501 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4502 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4503 MODULE_LICENSE("GPL");
4504