1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	bool ret = false;
385 
386 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388 	    !phy_interface_mode_is_rgmii(interface))
389 		goto out;
390 
391 	/* Using PCS we cannot dial with the phy registers at this stage
392 	 * so we do not support extra feature like EEE.
393 	 */
394 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397 		goto out;
398 
399 	/* MAC core supports the EEE feature. */
400 	if (priv->dma_cap.eee) {
401 		int tx_lpi_timer = priv->tx_lpi_timer;
402 
403 		/* Check if the PHY supports EEE */
404 		if (phy_init_eee(ndev->phydev, 1)) {
405 			/* To manage at run-time if the EEE cannot be supported
406 			 * anymore (for example because the lp caps have been
407 			 * changed).
408 			 * In that case the driver disable own timers.
409 			 */
410 			mutex_lock(&priv->lock);
411 			if (priv->eee_active) {
412 				netdev_dbg(priv->dev, "disable EEE\n");
413 				del_timer_sync(&priv->eee_ctrl_timer);
414 				stmmac_set_eee_timer(priv, priv->hw, 0,
415 						tx_lpi_timer);
416 			}
417 			priv->eee_active = 0;
418 			mutex_unlock(&priv->lock);
419 			goto out;
420 		}
421 		/* Activate the EEE and start timers */
422 		mutex_lock(&priv->lock);
423 		if (!priv->eee_active) {
424 			priv->eee_active = 1;
425 			timer_setup(&priv->eee_ctrl_timer,
426 				    stmmac_eee_ctrl_timer, 0);
427 			mod_timer(&priv->eee_ctrl_timer,
428 				  STMMAC_LPI_T(eee_timer));
429 
430 			stmmac_set_eee_timer(priv, priv->hw,
431 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 		}
433 		/* Set HW EEE according to the speed */
434 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435 
436 		ret = true;
437 		mutex_unlock(&priv->lock);
438 
439 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 	}
441 out:
442 	return ret;
443 }
444 
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 				   struct dma_desc *p, struct sk_buff *skb)
455 {
456 	struct skb_shared_hwtstamps shhwtstamp;
457 	u64 ns;
458 
459 	if (!priv->hwts_tx_en)
460 		return;
461 
462 	/* exit if skb doesn't support hw tstamp */
463 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 		return;
465 
466 	/* check tx tstamp status */
467 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468 		/* get the valid tstamp */
469 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470 
471 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473 
474 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475 		/* pass tstamp to stack */
476 		skb_tstamp_tx(skb, &shhwtstamp);
477 	}
478 
479 	return;
480 }
481 
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 				   struct dma_desc *np, struct sk_buff *skb)
493 {
494 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 	struct dma_desc *desc = p;
496 	u64 ns;
497 
498 	if (!priv->hwts_rx_en)
499 		return;
500 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501 	if (priv->plat->has_gmac4)
502 		desc = np;
503 
504 	/* Check if timestamp is available */
505 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 		shhwtstamp = skb_hwtstamps(skb);
509 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 	} else  {
512 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 	}
514 }
515 
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529 	struct stmmac_priv *priv = netdev_priv(dev);
530 	struct hwtstamp_config config;
531 	struct timespec64 now;
532 	u64 temp = 0;
533 	u32 ptp_v2 = 0;
534 	u32 tstamp_all = 0;
535 	u32 ptp_over_ipv4_udp = 0;
536 	u32 ptp_over_ipv6_udp = 0;
537 	u32 ptp_over_ethernet = 0;
538 	u32 snap_type_sel = 0;
539 	u32 ts_master_en = 0;
540 	u32 ts_event_en = 0;
541 	u32 value = 0;
542 	u32 sec_inc;
543 
544 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546 		priv->hwts_tx_en = 0;
547 		priv->hwts_rx_en = 0;
548 
549 		return -EOPNOTSUPP;
550 	}
551 
552 	if (copy_from_user(&config, ifr->ifr_data,
553 			   sizeof(struct hwtstamp_config)))
554 		return -EFAULT;
555 
556 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 		   __func__, config.flags, config.tx_type, config.rx_filter);
558 
559 	/* reserved for future extensions */
560 	if (config.flags)
561 		return -EINVAL;
562 
563 	if (config.tx_type != HWTSTAMP_TX_OFF &&
564 	    config.tx_type != HWTSTAMP_TX_ON)
565 		return -ERANGE;
566 
567 	if (priv->adv_ts) {
568 		switch (config.rx_filter) {
569 		case HWTSTAMP_FILTER_NONE:
570 			/* time stamp no incoming packet at all */
571 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572 			break;
573 
574 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575 			/* PTP v1, UDP, any kind of event packet */
576 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 			/* take time stamp for all event messages */
578 			if (priv->plat->has_gmac4)
579 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 			else
581 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588 			/* PTP v1, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590 			/* take time stamp for SYNC messages only */
591 			ts_event_en = PTP_TCR_TSEVNTENA;
592 
593 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 			break;
596 
597 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598 			/* PTP v1, UDP, Delay_req packet */
599 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600 			/* take time stamp for Delay_Req messages only */
601 			ts_master_en = PTP_TCR_TSMSTRENA;
602 			ts_event_en = PTP_TCR_TSEVNTENA;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609 			/* PTP v2, UDP, any kind of event packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for all event messages */
613 			if (priv->plat->has_gmac4)
614 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 			else
616 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617 
618 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623 			/* PTP v2, UDP, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634 			/* PTP v2, UDP, Delay_req packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for Delay_Req messages only */
638 			ts_master_en = PTP_TCR_TSMSTRENA;
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for all event messages */
650 			if (priv->plat->has_gmac4)
651 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 			else
653 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 
655 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661 			/* PTP v2/802.AS1, any layer, Sync packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for SYNC messages only */
665 			ts_event_en = PTP_TCR_TSEVNTENA;
666 
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			/* take time stamp for Delay_Req messages only */
677 			ts_master_en = PTP_TCR_TSMSTRENA;
678 			ts_event_en = PTP_TCR_TSEVNTENA;
679 
680 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_NTP_ALL:
686 		case HWTSTAMP_FILTER_ALL:
687 			/* time stamp any incoming packet */
688 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689 			tstamp_all = PTP_TCR_TSENALL;
690 			break;
691 
692 		default:
693 			return -ERANGE;
694 		}
695 	} else {
696 		switch (config.rx_filter) {
697 		case HWTSTAMP_FILTER_NONE:
698 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699 			break;
700 		default:
701 			/* PTP v1, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 			break;
704 		}
705 	}
706 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708 
709 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 	else {
712 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 			 ts_master_en | snap_type_sel);
716 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717 
718 		/* program Sub Second Increment reg */
719 		stmmac_config_sub_second_increment(priv,
720 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721 				priv->plat->has_gmac4, &sec_inc);
722 		temp = div_u64(1000000000ULL, sec_inc);
723 
724 		/* Store sub second increment and flags for later use */
725 		priv->sub_second_inc = sec_inc;
726 		priv->systime_flags = value;
727 
728 		/* calculate default added value:
729 		 * formula is :
730 		 * addend = (2^32)/freq_div_ratio;
731 		 * where, freq_div_ratio = 1e9ns/sec_inc
732 		 */
733 		temp = (u64)(temp << 32);
734 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736 
737 		/* initialize system time */
738 		ktime_get_real_ts64(&now);
739 
740 		/* lower 32 bits of tv_sec are safe until y2106 */
741 		stmmac_init_systime(priv, priv->ptpaddr,
742 				(u32)now.tv_sec, now.tv_nsec);
743 	}
744 
745 	return copy_to_user(ifr->ifr_data, &config,
746 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747 }
748 
749 /**
750  * stmmac_init_ptp - init PTP
751  * @priv: driver private structure
752  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
753  * This is done by looking at the HW cap. register.
754  * This function also registers the ptp driver.
755  */
756 static int stmmac_init_ptp(struct stmmac_priv *priv)
757 {
758 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759 		return -EOPNOTSUPP;
760 
761 	priv->adv_ts = 0;
762 	/* Check if adv_ts can be enabled for dwmac 4.x core */
763 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764 		priv->adv_ts = 1;
765 	/* Dwmac 3.x core with extend_desc can support adv_ts */
766 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767 		priv->adv_ts = 1;
768 
769 	if (priv->dma_cap.time_stamp)
770 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771 
772 	if (priv->adv_ts)
773 		netdev_info(priv->dev,
774 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
775 
776 	priv->hwts_tx_en = 0;
777 	priv->hwts_rx_en = 0;
778 
779 	stmmac_ptp_register(priv);
780 
781 	return 0;
782 }
783 
784 static void stmmac_release_ptp(struct stmmac_priv *priv)
785 {
786 	if (priv->plat->clk_ptp_ref)
787 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
788 	stmmac_ptp_unregister(priv);
789 }
790 
791 /**
792  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
793  *  @priv: driver private structure
794  *  Description: It is used for configuring the flow control in all queues
795  */
796 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797 {
798 	u32 tx_cnt = priv->plat->tx_queues_to_use;
799 
800 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801 			priv->pause, tx_cnt);
802 }
803 
804 /**
805  * stmmac_adjust_link - adjusts the link parameters
806  * @dev: net device structure
807  * Description: this is the helper called by the physical abstraction layer
808  * drivers to communicate the phy link status. According the speed and duplex
809  * this driver can invoke registered glue-logic as well.
810  * It also invoke the eee initialization because it could happen when switch
811  * on different networks (that are eee capable).
812  */
813 static void stmmac_adjust_link(struct net_device *dev)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct phy_device *phydev = dev->phydev;
817 	bool new_state = false;
818 
819 	if (!phydev)
820 		return;
821 
822 	mutex_lock(&priv->lock);
823 
824 	if (phydev->link) {
825 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826 
827 		/* Now we make sure that we can be in full duplex mode.
828 		 * If not, we operate in half-duplex mode. */
829 		if (phydev->duplex != priv->oldduplex) {
830 			new_state = true;
831 			if (!phydev->duplex)
832 				ctrl &= ~priv->hw->link.duplex;
833 			else
834 				ctrl |= priv->hw->link.duplex;
835 			priv->oldduplex = phydev->duplex;
836 		}
837 		/* Flow Control operation */
838 		if (phydev->pause)
839 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
840 
841 		if (phydev->speed != priv->speed) {
842 			new_state = true;
843 			ctrl &= ~priv->hw->link.speed_mask;
844 			switch (phydev->speed) {
845 			case SPEED_1000:
846 				ctrl |= priv->hw->link.speed1000;
847 				break;
848 			case SPEED_100:
849 				ctrl |= priv->hw->link.speed100;
850 				break;
851 			case SPEED_10:
852 				ctrl |= priv->hw->link.speed10;
853 				break;
854 			default:
855 				netif_warn(priv, link, priv->dev,
856 					   "broken speed: %d\n", phydev->speed);
857 				phydev->speed = SPEED_UNKNOWN;
858 				break;
859 			}
860 			if (phydev->speed != SPEED_UNKNOWN)
861 				stmmac_hw_fix_mac_speed(priv);
862 			priv->speed = phydev->speed;
863 		}
864 
865 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866 
867 		if (!priv->oldlink) {
868 			new_state = true;
869 			priv->oldlink = true;
870 		}
871 	} else if (priv->oldlink) {
872 		new_state = true;
873 		priv->oldlink = false;
874 		priv->speed = SPEED_UNKNOWN;
875 		priv->oldduplex = DUPLEX_UNKNOWN;
876 	}
877 
878 	if (new_state && netif_msg_link(priv))
879 		phy_print_status(phydev);
880 
881 	mutex_unlock(&priv->lock);
882 
883 	if (phydev->is_pseudo_fixed_link)
884 		/* Stop PHY layer to call the hook to adjust the link in case
885 		 * of a switch is attached to the stmmac driver.
886 		 */
887 		phydev->irq = PHY_IGNORE_INTERRUPT;
888 	else
889 		/* At this stage, init the EEE if supported.
890 		 * Never called in case of fixed_link.
891 		 */
892 		priv->eee_enabled = stmmac_eee_init(priv);
893 }
894 
895 /**
896  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
897  * @priv: driver private structure
898  * Description: this is to verify if the HW supports the PCS.
899  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
900  * configured for the TBI, RTBI, or SGMII PHY interface.
901  */
902 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903 {
904 	int interface = priv->plat->interface;
905 
906 	if (priv->dma_cap.pcs) {
907 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912 			priv->hw->pcs = STMMAC_PCS_RGMII;
913 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
914 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915 			priv->hw->pcs = STMMAC_PCS_SGMII;
916 		}
917 	}
918 }
919 
920 /**
921  * stmmac_init_phy - PHY initialization
922  * @dev: net device structure
923  * Description: it initializes the driver's PHY state, and attaches the PHY
924  * to the mac driver.
925  *  Return value:
926  *  0 on success
927  */
928 static int stmmac_init_phy(struct net_device *dev)
929 {
930 	struct stmmac_priv *priv = netdev_priv(dev);
931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
932 	struct phy_device *phydev;
933 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
934 	char bus_id[MII_BUS_ID_SIZE];
935 	int interface = priv->plat->interface;
936 	int max_speed = priv->plat->max_speed;
937 	priv->oldlink = false;
938 	priv->speed = SPEED_UNKNOWN;
939 	priv->oldduplex = DUPLEX_UNKNOWN;
940 
941 	if (priv->plat->phy_node) {
942 		phydev = of_phy_connect(dev, priv->plat->phy_node,
943 					&stmmac_adjust_link, 0, interface);
944 	} else {
945 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
946 			 priv->plat->bus_id);
947 
948 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
949 			 priv->plat->phy_addr);
950 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
951 			   phy_id_fmt);
952 
953 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
954 				     interface);
955 	}
956 
957 	if (IS_ERR_OR_NULL(phydev)) {
958 		netdev_err(priv->dev, "Could not attach to PHY\n");
959 		if (!phydev)
960 			return -ENODEV;
961 
962 		return PTR_ERR(phydev);
963 	}
964 
965 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
966 	if ((interface == PHY_INTERFACE_MODE_MII) ||
967 	    (interface == PHY_INTERFACE_MODE_RMII) ||
968 		(max_speed < 1000 && max_speed > 0))
969 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
970 					 SUPPORTED_1000baseT_Full);
971 
972 	/*
973 	 * Half-duplex mode not supported with multiqueue
974 	 * half-duplex can only works with single queue
975 	 */
976 	if (tx_cnt > 1)
977 		phydev->supported &= ~(SUPPORTED_1000baseT_Half |
978 				       SUPPORTED_100baseT_Half |
979 				       SUPPORTED_10baseT_Half);
980 
981 	/*
982 	 * Broken HW is sometimes missing the pull-up resistor on the
983 	 * MDIO line, which results in reads to non-existent devices returning
984 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
985 	 * device as well.
986 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
987 	 */
988 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
989 		phy_disconnect(phydev);
990 		return -ENODEV;
991 	}
992 
993 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
994 	 * subsequent PHY polling, make sure we force a link transition if
995 	 * we have a UP/DOWN/UP transition
996 	 */
997 	if (phydev->is_pseudo_fixed_link)
998 		phydev->irq = PHY_POLL;
999 
1000 	phy_attached_info(phydev);
1001 	return 0;
1002 }
1003 
1004 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1005 {
1006 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1007 	void *head_rx;
1008 	u32 queue;
1009 
1010 	/* Display RX rings */
1011 	for (queue = 0; queue < rx_cnt; queue++) {
1012 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1013 
1014 		pr_info("\tRX Queue %u rings\n", queue);
1015 
1016 		if (priv->extend_desc)
1017 			head_rx = (void *)rx_q->dma_erx;
1018 		else
1019 			head_rx = (void *)rx_q->dma_rx;
1020 
1021 		/* Display RX ring */
1022 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1023 	}
1024 }
1025 
1026 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1027 {
1028 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1029 	void *head_tx;
1030 	u32 queue;
1031 
1032 	/* Display TX rings */
1033 	for (queue = 0; queue < tx_cnt; queue++) {
1034 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1035 
1036 		pr_info("\tTX Queue %d rings\n", queue);
1037 
1038 		if (priv->extend_desc)
1039 			head_tx = (void *)tx_q->dma_etx;
1040 		else
1041 			head_tx = (void *)tx_q->dma_tx;
1042 
1043 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1044 	}
1045 }
1046 
1047 static void stmmac_display_rings(struct stmmac_priv *priv)
1048 {
1049 	/* Display RX ring */
1050 	stmmac_display_rx_rings(priv);
1051 
1052 	/* Display TX ring */
1053 	stmmac_display_tx_rings(priv);
1054 }
1055 
1056 static int stmmac_set_bfsize(int mtu, int bufsize)
1057 {
1058 	int ret = bufsize;
1059 
1060 	if (mtu >= BUF_SIZE_4KiB)
1061 		ret = BUF_SIZE_8KiB;
1062 	else if (mtu >= BUF_SIZE_2KiB)
1063 		ret = BUF_SIZE_4KiB;
1064 	else if (mtu > DEFAULT_BUFSIZE)
1065 		ret = BUF_SIZE_2KiB;
1066 	else
1067 		ret = DEFAULT_BUFSIZE;
1068 
1069 	return ret;
1070 }
1071 
1072 /**
1073  * stmmac_clear_rx_descriptors - clear RX descriptors
1074  * @priv: driver private structure
1075  * @queue: RX queue index
1076  * Description: this function is called to clear the RX descriptors
1077  * in case of both basic and extended descriptors are used.
1078  */
1079 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1080 {
1081 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1082 	int i;
1083 
1084 	/* Clear the RX descriptors */
1085 	for (i = 0; i < DMA_RX_SIZE; i++)
1086 		if (priv->extend_desc)
1087 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1088 					priv->use_riwt, priv->mode,
1089 					(i == DMA_RX_SIZE - 1));
1090 		else
1091 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1092 					priv->use_riwt, priv->mode,
1093 					(i == DMA_RX_SIZE - 1));
1094 }
1095 
1096 /**
1097  * stmmac_clear_tx_descriptors - clear tx descriptors
1098  * @priv: driver private structure
1099  * @queue: TX queue index.
1100  * Description: this function is called to clear the TX descriptors
1101  * in case of both basic and extended descriptors are used.
1102  */
1103 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1104 {
1105 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1106 	int i;
1107 
1108 	/* Clear the TX descriptors */
1109 	for (i = 0; i < DMA_TX_SIZE; i++)
1110 		if (priv->extend_desc)
1111 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1112 					priv->mode, (i == DMA_TX_SIZE - 1));
1113 		else
1114 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1115 					priv->mode, (i == DMA_TX_SIZE - 1));
1116 }
1117 
1118 /**
1119  * stmmac_clear_descriptors - clear descriptors
1120  * @priv: driver private structure
1121  * Description: this function is called to clear the TX and RX descriptors
1122  * in case of both basic and extended descriptors are used.
1123  */
1124 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1125 {
1126 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1127 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1128 	u32 queue;
1129 
1130 	/* Clear the RX descriptors */
1131 	for (queue = 0; queue < rx_queue_cnt; queue++)
1132 		stmmac_clear_rx_descriptors(priv, queue);
1133 
1134 	/* Clear the TX descriptors */
1135 	for (queue = 0; queue < tx_queue_cnt; queue++)
1136 		stmmac_clear_tx_descriptors(priv, queue);
1137 }
1138 
1139 /**
1140  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1141  * @priv: driver private structure
1142  * @p: descriptor pointer
1143  * @i: descriptor index
1144  * @flags: gfp flag
1145  * @queue: RX queue index
1146  * Description: this function is called to allocate a receive buffer, perform
1147  * the DMA mapping and init the descriptor.
1148  */
1149 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1150 				  int i, gfp_t flags, u32 queue)
1151 {
1152 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1153 	struct sk_buff *skb;
1154 
1155 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1156 	if (!skb) {
1157 		netdev_err(priv->dev,
1158 			   "%s: Rx init fails; skb is NULL\n", __func__);
1159 		return -ENOMEM;
1160 	}
1161 	rx_q->rx_skbuff[i] = skb;
1162 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1163 						priv->dma_buf_sz,
1164 						DMA_FROM_DEVICE);
1165 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1166 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1167 		dev_kfree_skb_any(skb);
1168 		return -EINVAL;
1169 	}
1170 
1171 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1172 
1173 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1174 		stmmac_init_desc3(priv, p);
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * stmmac_free_rx_buffer - free RX dma buffers
1181  * @priv: private structure
1182  * @queue: RX queue index
1183  * @i: buffer index.
1184  */
1185 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1186 {
1187 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1188 
1189 	if (rx_q->rx_skbuff[i]) {
1190 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1191 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1192 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1193 	}
1194 	rx_q->rx_skbuff[i] = NULL;
1195 }
1196 
1197 /**
1198  * stmmac_free_tx_buffer - free RX dma buffers
1199  * @priv: private structure
1200  * @queue: RX queue index
1201  * @i: buffer index.
1202  */
1203 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1204 {
1205 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1206 
1207 	if (tx_q->tx_skbuff_dma[i].buf) {
1208 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1209 			dma_unmap_page(priv->device,
1210 				       tx_q->tx_skbuff_dma[i].buf,
1211 				       tx_q->tx_skbuff_dma[i].len,
1212 				       DMA_TO_DEVICE);
1213 		else
1214 			dma_unmap_single(priv->device,
1215 					 tx_q->tx_skbuff_dma[i].buf,
1216 					 tx_q->tx_skbuff_dma[i].len,
1217 					 DMA_TO_DEVICE);
1218 	}
1219 
1220 	if (tx_q->tx_skbuff[i]) {
1221 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1222 		tx_q->tx_skbuff[i] = NULL;
1223 		tx_q->tx_skbuff_dma[i].buf = 0;
1224 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1225 	}
1226 }
1227 
1228 /**
1229  * init_dma_rx_desc_rings - init the RX descriptor rings
1230  * @dev: net device structure
1231  * @flags: gfp flag.
1232  * Description: this function initializes the DMA RX descriptors
1233  * and allocates the socket buffers. It supports the chained and ring
1234  * modes.
1235  */
1236 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1237 {
1238 	struct stmmac_priv *priv = netdev_priv(dev);
1239 	u32 rx_count = priv->plat->rx_queues_to_use;
1240 	int ret = -ENOMEM;
1241 	int bfsize = 0;
1242 	int queue;
1243 	int i;
1244 
1245 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1246 	if (bfsize < 0)
1247 		bfsize = 0;
1248 
1249 	if (bfsize < BUF_SIZE_16KiB)
1250 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1251 
1252 	priv->dma_buf_sz = bfsize;
1253 
1254 	/* RX INITIALIZATION */
1255 	netif_dbg(priv, probe, priv->dev,
1256 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1257 
1258 	for (queue = 0; queue < rx_count; queue++) {
1259 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1260 
1261 		netif_dbg(priv, probe, priv->dev,
1262 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1263 			  (u32)rx_q->dma_rx_phy);
1264 
1265 		for (i = 0; i < DMA_RX_SIZE; i++) {
1266 			struct dma_desc *p;
1267 
1268 			if (priv->extend_desc)
1269 				p = &((rx_q->dma_erx + i)->basic);
1270 			else
1271 				p = rx_q->dma_rx + i;
1272 
1273 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1274 						     queue);
1275 			if (ret)
1276 				goto err_init_rx_buffers;
1277 
1278 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1279 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1280 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1281 		}
1282 
1283 		rx_q->cur_rx = 0;
1284 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1285 
1286 		stmmac_clear_rx_descriptors(priv, queue);
1287 
1288 		/* Setup the chained descriptor addresses */
1289 		if (priv->mode == STMMAC_CHAIN_MODE) {
1290 			if (priv->extend_desc)
1291 				stmmac_mode_init(priv, rx_q->dma_erx,
1292 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1293 			else
1294 				stmmac_mode_init(priv, rx_q->dma_rx,
1295 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1296 		}
1297 	}
1298 
1299 	buf_sz = bfsize;
1300 
1301 	return 0;
1302 
1303 err_init_rx_buffers:
1304 	while (queue >= 0) {
1305 		while (--i >= 0)
1306 			stmmac_free_rx_buffer(priv, queue, i);
1307 
1308 		if (queue == 0)
1309 			break;
1310 
1311 		i = DMA_RX_SIZE;
1312 		queue--;
1313 	}
1314 
1315 	return ret;
1316 }
1317 
1318 /**
1319  * init_dma_tx_desc_rings - init the TX descriptor rings
1320  * @dev: net device structure.
1321  * Description: this function initializes the DMA TX descriptors
1322  * and allocates the socket buffers. It supports the chained and ring
1323  * modes.
1324  */
1325 static int init_dma_tx_desc_rings(struct net_device *dev)
1326 {
1327 	struct stmmac_priv *priv = netdev_priv(dev);
1328 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1329 	u32 queue;
1330 	int i;
1331 
1332 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1333 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1334 
1335 		netif_dbg(priv, probe, priv->dev,
1336 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1337 			 (u32)tx_q->dma_tx_phy);
1338 
1339 		/* Setup the chained descriptor addresses */
1340 		if (priv->mode == STMMAC_CHAIN_MODE) {
1341 			if (priv->extend_desc)
1342 				stmmac_mode_init(priv, tx_q->dma_etx,
1343 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1344 			else
1345 				stmmac_mode_init(priv, tx_q->dma_tx,
1346 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1347 		}
1348 
1349 		for (i = 0; i < DMA_TX_SIZE; i++) {
1350 			struct dma_desc *p;
1351 			if (priv->extend_desc)
1352 				p = &((tx_q->dma_etx + i)->basic);
1353 			else
1354 				p = tx_q->dma_tx + i;
1355 
1356 			stmmac_clear_desc(priv, p);
1357 
1358 			tx_q->tx_skbuff_dma[i].buf = 0;
1359 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1360 			tx_q->tx_skbuff_dma[i].len = 0;
1361 			tx_q->tx_skbuff_dma[i].last_segment = false;
1362 			tx_q->tx_skbuff[i] = NULL;
1363 		}
1364 
1365 		tx_q->dirty_tx = 0;
1366 		tx_q->cur_tx = 0;
1367 		tx_q->mss = 0;
1368 
1369 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 /**
1376  * init_dma_desc_rings - init the RX/TX descriptor rings
1377  * @dev: net device structure
1378  * @flags: gfp flag.
1379  * Description: this function initializes the DMA RX/TX descriptors
1380  * and allocates the socket buffers. It supports the chained and ring
1381  * modes.
1382  */
1383 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1384 {
1385 	struct stmmac_priv *priv = netdev_priv(dev);
1386 	int ret;
1387 
1388 	ret = init_dma_rx_desc_rings(dev, flags);
1389 	if (ret)
1390 		return ret;
1391 
1392 	ret = init_dma_tx_desc_rings(dev);
1393 
1394 	stmmac_clear_descriptors(priv);
1395 
1396 	if (netif_msg_hw(priv))
1397 		stmmac_display_rings(priv);
1398 
1399 	return ret;
1400 }
1401 
1402 /**
1403  * dma_free_rx_skbufs - free RX dma buffers
1404  * @priv: private structure
1405  * @queue: RX queue index
1406  */
1407 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1408 {
1409 	int i;
1410 
1411 	for (i = 0; i < DMA_RX_SIZE; i++)
1412 		stmmac_free_rx_buffer(priv, queue, i);
1413 }
1414 
1415 /**
1416  * dma_free_tx_skbufs - free TX dma buffers
1417  * @priv: private structure
1418  * @queue: TX queue index
1419  */
1420 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1421 {
1422 	int i;
1423 
1424 	for (i = 0; i < DMA_TX_SIZE; i++)
1425 		stmmac_free_tx_buffer(priv, queue, i);
1426 }
1427 
1428 /**
1429  * free_dma_rx_desc_resources - free RX dma desc resources
1430  * @priv: private structure
1431  */
1432 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1433 {
1434 	u32 rx_count = priv->plat->rx_queues_to_use;
1435 	u32 queue;
1436 
1437 	/* Free RX queue resources */
1438 	for (queue = 0; queue < rx_count; queue++) {
1439 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1440 
1441 		/* Release the DMA RX socket buffers */
1442 		dma_free_rx_skbufs(priv, queue);
1443 
1444 		/* Free DMA regions of consistent memory previously allocated */
1445 		if (!priv->extend_desc)
1446 			dma_free_coherent(priv->device,
1447 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1448 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1449 		else
1450 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1451 					  sizeof(struct dma_extended_desc),
1452 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1453 
1454 		kfree(rx_q->rx_skbuff_dma);
1455 		kfree(rx_q->rx_skbuff);
1456 	}
1457 }
1458 
1459 /**
1460  * free_dma_tx_desc_resources - free TX dma desc resources
1461  * @priv: private structure
1462  */
1463 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1464 {
1465 	u32 tx_count = priv->plat->tx_queues_to_use;
1466 	u32 queue;
1467 
1468 	/* Free TX queue resources */
1469 	for (queue = 0; queue < tx_count; queue++) {
1470 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1471 
1472 		/* Release the DMA TX socket buffers */
1473 		dma_free_tx_skbufs(priv, queue);
1474 
1475 		/* Free DMA regions of consistent memory previously allocated */
1476 		if (!priv->extend_desc)
1477 			dma_free_coherent(priv->device,
1478 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1479 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1480 		else
1481 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1482 					  sizeof(struct dma_extended_desc),
1483 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1484 
1485 		kfree(tx_q->tx_skbuff_dma);
1486 		kfree(tx_q->tx_skbuff);
1487 	}
1488 }
1489 
1490 /**
1491  * alloc_dma_rx_desc_resources - alloc RX resources.
1492  * @priv: private structure
1493  * Description: according to which descriptor can be used (extend or basic)
1494  * this function allocates the resources for TX and RX paths. In case of
1495  * reception, for example, it pre-allocated the RX socket buffer in order to
1496  * allow zero-copy mechanism.
1497  */
1498 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1499 {
1500 	u32 rx_count = priv->plat->rx_queues_to_use;
1501 	int ret = -ENOMEM;
1502 	u32 queue;
1503 
1504 	/* RX queues buffers and DMA */
1505 	for (queue = 0; queue < rx_count; queue++) {
1506 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1507 
1508 		rx_q->queue_index = queue;
1509 		rx_q->priv_data = priv;
1510 
1511 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1512 						    sizeof(dma_addr_t),
1513 						    GFP_KERNEL);
1514 		if (!rx_q->rx_skbuff_dma)
1515 			goto err_dma;
1516 
1517 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1518 						sizeof(struct sk_buff *),
1519 						GFP_KERNEL);
1520 		if (!rx_q->rx_skbuff)
1521 			goto err_dma;
1522 
1523 		if (priv->extend_desc) {
1524 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1525 							    DMA_RX_SIZE *
1526 							    sizeof(struct
1527 							    dma_extended_desc),
1528 							    &rx_q->dma_rx_phy,
1529 							    GFP_KERNEL);
1530 			if (!rx_q->dma_erx)
1531 				goto err_dma;
1532 
1533 		} else {
1534 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1535 							   DMA_RX_SIZE *
1536 							   sizeof(struct
1537 							   dma_desc),
1538 							   &rx_q->dma_rx_phy,
1539 							   GFP_KERNEL);
1540 			if (!rx_q->dma_rx)
1541 				goto err_dma;
1542 		}
1543 	}
1544 
1545 	return 0;
1546 
1547 err_dma:
1548 	free_dma_rx_desc_resources(priv);
1549 
1550 	return ret;
1551 }
1552 
1553 /**
1554  * alloc_dma_tx_desc_resources - alloc TX resources.
1555  * @priv: private structure
1556  * Description: according to which descriptor can be used (extend or basic)
1557  * this function allocates the resources for TX and RX paths. In case of
1558  * reception, for example, it pre-allocated the RX socket buffer in order to
1559  * allow zero-copy mechanism.
1560  */
1561 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1562 {
1563 	u32 tx_count = priv->plat->tx_queues_to_use;
1564 	int ret = -ENOMEM;
1565 	u32 queue;
1566 
1567 	/* TX queues buffers and DMA */
1568 	for (queue = 0; queue < tx_count; queue++) {
1569 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1570 
1571 		tx_q->queue_index = queue;
1572 		tx_q->priv_data = priv;
1573 
1574 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1575 						    sizeof(*tx_q->tx_skbuff_dma),
1576 						    GFP_KERNEL);
1577 		if (!tx_q->tx_skbuff_dma)
1578 			goto err_dma;
1579 
1580 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1581 						sizeof(struct sk_buff *),
1582 						GFP_KERNEL);
1583 		if (!tx_q->tx_skbuff)
1584 			goto err_dma;
1585 
1586 		if (priv->extend_desc) {
1587 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1588 							    DMA_TX_SIZE *
1589 							    sizeof(struct
1590 							    dma_extended_desc),
1591 							    &tx_q->dma_tx_phy,
1592 							    GFP_KERNEL);
1593 			if (!tx_q->dma_etx)
1594 				goto err_dma;
1595 		} else {
1596 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1597 							   DMA_TX_SIZE *
1598 							   sizeof(struct
1599 								  dma_desc),
1600 							   &tx_q->dma_tx_phy,
1601 							   GFP_KERNEL);
1602 			if (!tx_q->dma_tx)
1603 				goto err_dma;
1604 		}
1605 	}
1606 
1607 	return 0;
1608 
1609 err_dma:
1610 	free_dma_tx_desc_resources(priv);
1611 
1612 	return ret;
1613 }
1614 
1615 /**
1616  * alloc_dma_desc_resources - alloc TX/RX resources.
1617  * @priv: private structure
1618  * Description: according to which descriptor can be used (extend or basic)
1619  * this function allocates the resources for TX and RX paths. In case of
1620  * reception, for example, it pre-allocated the RX socket buffer in order to
1621  * allow zero-copy mechanism.
1622  */
1623 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1624 {
1625 	/* RX Allocation */
1626 	int ret = alloc_dma_rx_desc_resources(priv);
1627 
1628 	if (ret)
1629 		return ret;
1630 
1631 	ret = alloc_dma_tx_desc_resources(priv);
1632 
1633 	return ret;
1634 }
1635 
1636 /**
1637  * free_dma_desc_resources - free dma desc resources
1638  * @priv: private structure
1639  */
1640 static void free_dma_desc_resources(struct stmmac_priv *priv)
1641 {
1642 	/* Release the DMA RX socket buffers */
1643 	free_dma_rx_desc_resources(priv);
1644 
1645 	/* Release the DMA TX socket buffers */
1646 	free_dma_tx_desc_resources(priv);
1647 }
1648 
1649 /**
1650  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1651  *  @priv: driver private structure
1652  *  Description: It is used for enabling the rx queues in the MAC
1653  */
1654 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1655 {
1656 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1657 	int queue;
1658 	u8 mode;
1659 
1660 	for (queue = 0; queue < rx_queues_count; queue++) {
1661 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1662 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1663 	}
1664 }
1665 
1666 /**
1667  * stmmac_start_rx_dma - start RX DMA channel
1668  * @priv: driver private structure
1669  * @chan: RX channel index
1670  * Description:
1671  * This starts a RX DMA channel
1672  */
1673 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1674 {
1675 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1676 	stmmac_start_rx(priv, priv->ioaddr, chan);
1677 }
1678 
1679 /**
1680  * stmmac_start_tx_dma - start TX DMA channel
1681  * @priv: driver private structure
1682  * @chan: TX channel index
1683  * Description:
1684  * This starts a TX DMA channel
1685  */
1686 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1687 {
1688 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1689 	stmmac_start_tx(priv, priv->ioaddr, chan);
1690 }
1691 
1692 /**
1693  * stmmac_stop_rx_dma - stop RX DMA channel
1694  * @priv: driver private structure
1695  * @chan: RX channel index
1696  * Description:
1697  * This stops a RX DMA channel
1698  */
1699 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1700 {
1701 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1702 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1703 }
1704 
1705 /**
1706  * stmmac_stop_tx_dma - stop TX DMA channel
1707  * @priv: driver private structure
1708  * @chan: TX channel index
1709  * Description:
1710  * This stops a TX DMA channel
1711  */
1712 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1713 {
1714 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1715 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1716 }
1717 
1718 /**
1719  * stmmac_start_all_dma - start all RX and TX DMA channels
1720  * @priv: driver private structure
1721  * Description:
1722  * This starts all the RX and TX DMA channels
1723  */
1724 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1725 {
1726 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1727 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1728 	u32 chan = 0;
1729 
1730 	for (chan = 0; chan < rx_channels_count; chan++)
1731 		stmmac_start_rx_dma(priv, chan);
1732 
1733 	for (chan = 0; chan < tx_channels_count; chan++)
1734 		stmmac_start_tx_dma(priv, chan);
1735 }
1736 
1737 /**
1738  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1739  * @priv: driver private structure
1740  * Description:
1741  * This stops the RX and TX DMA channels
1742  */
1743 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1744 {
1745 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1746 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1747 	u32 chan = 0;
1748 
1749 	for (chan = 0; chan < rx_channels_count; chan++)
1750 		stmmac_stop_rx_dma(priv, chan);
1751 
1752 	for (chan = 0; chan < tx_channels_count; chan++)
1753 		stmmac_stop_tx_dma(priv, chan);
1754 }
1755 
1756 /**
1757  *  stmmac_dma_operation_mode - HW DMA operation mode
1758  *  @priv: driver private structure
1759  *  Description: it is used for configuring the DMA operation mode register in
1760  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1761  */
1762 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1763 {
1764 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1765 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1766 	int rxfifosz = priv->plat->rx_fifo_size;
1767 	int txfifosz = priv->plat->tx_fifo_size;
1768 	u32 txmode = 0;
1769 	u32 rxmode = 0;
1770 	u32 chan = 0;
1771 	u8 qmode = 0;
1772 
1773 	if (rxfifosz == 0)
1774 		rxfifosz = priv->dma_cap.rx_fifo_size;
1775 	if (txfifosz == 0)
1776 		txfifosz = priv->dma_cap.tx_fifo_size;
1777 
1778 	/* Adjust for real per queue fifo size */
1779 	rxfifosz /= rx_channels_count;
1780 	txfifosz /= tx_channels_count;
1781 
1782 	if (priv->plat->force_thresh_dma_mode) {
1783 		txmode = tc;
1784 		rxmode = tc;
1785 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1786 		/*
1787 		 * In case of GMAC, SF mode can be enabled
1788 		 * to perform the TX COE in HW. This depends on:
1789 		 * 1) TX COE if actually supported
1790 		 * 2) There is no bugged Jumbo frame support
1791 		 *    that needs to not insert csum in the TDES.
1792 		 */
1793 		txmode = SF_DMA_MODE;
1794 		rxmode = SF_DMA_MODE;
1795 		priv->xstats.threshold = SF_DMA_MODE;
1796 	} else {
1797 		txmode = tc;
1798 		rxmode = SF_DMA_MODE;
1799 	}
1800 
1801 	/* configure all channels */
1802 	for (chan = 0; chan < rx_channels_count; chan++) {
1803 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1804 
1805 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1806 				rxfifosz, qmode);
1807 	}
1808 
1809 	for (chan = 0; chan < tx_channels_count; chan++) {
1810 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1811 
1812 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1813 				txfifosz, qmode);
1814 	}
1815 }
1816 
1817 /**
1818  * stmmac_tx_clean - to manage the transmission completion
1819  * @priv: driver private structure
1820  * @queue: TX queue index
1821  * Description: it reclaims the transmit resources after transmission completes.
1822  */
1823 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1824 {
1825 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1826 	unsigned int bytes_compl = 0, pkts_compl = 0;
1827 	unsigned int entry;
1828 
1829 	netif_tx_lock(priv->dev);
1830 
1831 	priv->xstats.tx_clean++;
1832 
1833 	entry = tx_q->dirty_tx;
1834 	while (entry != tx_q->cur_tx) {
1835 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1836 		struct dma_desc *p;
1837 		int status;
1838 
1839 		if (priv->extend_desc)
1840 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1841 		else
1842 			p = tx_q->dma_tx + entry;
1843 
1844 		status = stmmac_tx_status(priv, &priv->dev->stats,
1845 				&priv->xstats, p, priv->ioaddr);
1846 		/* Check if the descriptor is owned by the DMA */
1847 		if (unlikely(status & tx_dma_own))
1848 			break;
1849 
1850 		/* Make sure descriptor fields are read after reading
1851 		 * the own bit.
1852 		 */
1853 		dma_rmb();
1854 
1855 		/* Just consider the last segment and ...*/
1856 		if (likely(!(status & tx_not_ls))) {
1857 			/* ... verify the status error condition */
1858 			if (unlikely(status & tx_err)) {
1859 				priv->dev->stats.tx_errors++;
1860 			} else {
1861 				priv->dev->stats.tx_packets++;
1862 				priv->xstats.tx_pkt_n++;
1863 			}
1864 			stmmac_get_tx_hwtstamp(priv, p, skb);
1865 		}
1866 
1867 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1868 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1869 				dma_unmap_page(priv->device,
1870 					       tx_q->tx_skbuff_dma[entry].buf,
1871 					       tx_q->tx_skbuff_dma[entry].len,
1872 					       DMA_TO_DEVICE);
1873 			else
1874 				dma_unmap_single(priv->device,
1875 						 tx_q->tx_skbuff_dma[entry].buf,
1876 						 tx_q->tx_skbuff_dma[entry].len,
1877 						 DMA_TO_DEVICE);
1878 			tx_q->tx_skbuff_dma[entry].buf = 0;
1879 			tx_q->tx_skbuff_dma[entry].len = 0;
1880 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1881 		}
1882 
1883 		stmmac_clean_desc3(priv, tx_q, p);
1884 
1885 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1886 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1887 
1888 		if (likely(skb != NULL)) {
1889 			pkts_compl++;
1890 			bytes_compl += skb->len;
1891 			dev_consume_skb_any(skb);
1892 			tx_q->tx_skbuff[entry] = NULL;
1893 		}
1894 
1895 		stmmac_release_tx_desc(priv, p, priv->mode);
1896 
1897 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1898 	}
1899 	tx_q->dirty_tx = entry;
1900 
1901 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1902 				  pkts_compl, bytes_compl);
1903 
1904 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1905 								queue))) &&
1906 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1907 
1908 		netif_dbg(priv, tx_done, priv->dev,
1909 			  "%s: restart transmit\n", __func__);
1910 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1911 	}
1912 
1913 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1914 		stmmac_enable_eee_mode(priv);
1915 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1916 	}
1917 	netif_tx_unlock(priv->dev);
1918 }
1919 
1920 /**
1921  * stmmac_tx_err - to manage the tx error
1922  * @priv: driver private structure
1923  * @chan: channel index
1924  * Description: it cleans the descriptors and restarts the transmission
1925  * in case of transmission errors.
1926  */
1927 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1928 {
1929 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1930 	int i;
1931 
1932 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1933 
1934 	stmmac_stop_tx_dma(priv, chan);
1935 	dma_free_tx_skbufs(priv, chan);
1936 	for (i = 0; i < DMA_TX_SIZE; i++)
1937 		if (priv->extend_desc)
1938 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1939 					priv->mode, (i == DMA_TX_SIZE - 1));
1940 		else
1941 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1942 					priv->mode, (i == DMA_TX_SIZE - 1));
1943 	tx_q->dirty_tx = 0;
1944 	tx_q->cur_tx = 0;
1945 	tx_q->mss = 0;
1946 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1947 	stmmac_start_tx_dma(priv, chan);
1948 
1949 	priv->dev->stats.tx_errors++;
1950 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1951 }
1952 
1953 /**
1954  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1955  *  @priv: driver private structure
1956  *  @txmode: TX operating mode
1957  *  @rxmode: RX operating mode
1958  *  @chan: channel index
1959  *  Description: it is used for configuring of the DMA operation mode in
1960  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1961  *  mode.
1962  */
1963 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1964 					  u32 rxmode, u32 chan)
1965 {
1966 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1967 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1968 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1969 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1970 	int rxfifosz = priv->plat->rx_fifo_size;
1971 	int txfifosz = priv->plat->tx_fifo_size;
1972 
1973 	if (rxfifosz == 0)
1974 		rxfifosz = priv->dma_cap.rx_fifo_size;
1975 	if (txfifosz == 0)
1976 		txfifosz = priv->dma_cap.tx_fifo_size;
1977 
1978 	/* Adjust for real per queue fifo size */
1979 	rxfifosz /= rx_channels_count;
1980 	txfifosz /= tx_channels_count;
1981 
1982 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1983 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1984 }
1985 
1986 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1987 {
1988 	int ret;
1989 
1990 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1991 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1992 	if (ret && (ret != -EINVAL)) {
1993 		stmmac_global_err(priv);
1994 		return true;
1995 	}
1996 
1997 	return false;
1998 }
1999 
2000 /**
2001  * stmmac_dma_interrupt - DMA ISR
2002  * @priv: driver private structure
2003  * Description: this is the DMA ISR. It is called by the main ISR.
2004  * It calls the dwmac dma routine and schedule poll method in case of some
2005  * work can be done.
2006  */
2007 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2008 {
2009 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2010 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2011 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2012 				tx_channel_count : rx_channel_count;
2013 	u32 chan;
2014 	bool poll_scheduled = false;
2015 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2016 
2017 	/* Make sure we never check beyond our status buffer. */
2018 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2019 		channels_to_check = ARRAY_SIZE(status);
2020 
2021 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2022 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2023 	 * stmmac_channel struct.
2024 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2025 	 * all tx queues rather than just a single tx queue.
2026 	 */
2027 	for (chan = 0; chan < channels_to_check; chan++)
2028 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2029 				&priv->xstats, chan);
2030 
2031 	for (chan = 0; chan < rx_channel_count; chan++) {
2032 		if (likely(status[chan] & handle_rx)) {
2033 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2034 
2035 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2036 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2037 				__napi_schedule(&rx_q->napi);
2038 				poll_scheduled = true;
2039 			}
2040 		}
2041 	}
2042 
2043 	/* If we scheduled poll, we already know that tx queues will be checked.
2044 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2045 	 * completed transmission, if so, call stmmac_poll (once).
2046 	 */
2047 	if (!poll_scheduled) {
2048 		for (chan = 0; chan < tx_channel_count; chan++) {
2049 			if (status[chan] & handle_tx) {
2050 				/* It doesn't matter what rx queue we choose
2051 				 * here. We use 0 since it always exists.
2052 				 */
2053 				struct stmmac_rx_queue *rx_q =
2054 					&priv->rx_queue[0];
2055 
2056 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2057 					stmmac_disable_dma_irq(priv,
2058 							priv->ioaddr, chan);
2059 					__napi_schedule(&rx_q->napi);
2060 				}
2061 				break;
2062 			}
2063 		}
2064 	}
2065 
2066 	for (chan = 0; chan < tx_channel_count; chan++) {
2067 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2068 			/* Try to bump up the dma threshold on this failure */
2069 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2070 			    (tc <= 256)) {
2071 				tc += 64;
2072 				if (priv->plat->force_thresh_dma_mode)
2073 					stmmac_set_dma_operation_mode(priv,
2074 								      tc,
2075 								      tc,
2076 								      chan);
2077 				else
2078 					stmmac_set_dma_operation_mode(priv,
2079 								    tc,
2080 								    SF_DMA_MODE,
2081 								    chan);
2082 				priv->xstats.threshold = tc;
2083 			}
2084 		} else if (unlikely(status[chan] == tx_hard_error)) {
2085 			stmmac_tx_err(priv, chan);
2086 		}
2087 	}
2088 }
2089 
2090 /**
2091  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2092  * @priv: driver private structure
2093  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2094  */
2095 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2096 {
2097 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2098 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2099 
2100 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2101 
2102 	if (priv->dma_cap.rmon) {
2103 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2104 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2105 	} else
2106 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2107 }
2108 
2109 /**
2110  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2111  * @priv: driver private structure
2112  * Description:
2113  *  new GMAC chip generations have a new register to indicate the
2114  *  presence of the optional feature/functions.
2115  *  This can be also used to override the value passed through the
2116  *  platform and necessary for old MAC10/100 and GMAC chips.
2117  */
2118 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2119 {
2120 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2121 }
2122 
2123 /**
2124  * stmmac_check_ether_addr - check if the MAC addr is valid
2125  * @priv: driver private structure
2126  * Description:
2127  * it is to verify if the MAC address is valid, in case of failures it
2128  * generates a random MAC address
2129  */
2130 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2131 {
2132 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2133 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2134 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2135 			eth_hw_addr_random(priv->dev);
2136 		netdev_info(priv->dev, "device MAC address %pM\n",
2137 			    priv->dev->dev_addr);
2138 	}
2139 }
2140 
2141 /**
2142  * stmmac_init_dma_engine - DMA init.
2143  * @priv: driver private structure
2144  * Description:
2145  * It inits the DMA invoking the specific MAC/GMAC callback.
2146  * Some DMA parameters can be passed from the platform;
2147  * in case of these are not passed a default is kept for the MAC or GMAC.
2148  */
2149 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2150 {
2151 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2152 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2153 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2154 	struct stmmac_rx_queue *rx_q;
2155 	struct stmmac_tx_queue *tx_q;
2156 	u32 chan = 0;
2157 	int atds = 0;
2158 	int ret = 0;
2159 
2160 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2161 		dev_err(priv->device, "Invalid DMA configuration\n");
2162 		return -EINVAL;
2163 	}
2164 
2165 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2166 		atds = 1;
2167 
2168 	ret = stmmac_reset(priv, priv->ioaddr);
2169 	if (ret) {
2170 		dev_err(priv->device, "Failed to reset the dma\n");
2171 		return ret;
2172 	}
2173 
2174 	/* DMA RX Channel Configuration */
2175 	for (chan = 0; chan < rx_channels_count; chan++) {
2176 		rx_q = &priv->rx_queue[chan];
2177 
2178 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2179 				    rx_q->dma_rx_phy, chan);
2180 
2181 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2182 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2183 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2184 				       rx_q->rx_tail_addr, chan);
2185 	}
2186 
2187 	/* DMA TX Channel Configuration */
2188 	for (chan = 0; chan < tx_channels_count; chan++) {
2189 		tx_q = &priv->tx_queue[chan];
2190 
2191 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2192 				    tx_q->dma_tx_phy, chan);
2193 
2194 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2195 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2196 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2197 				       tx_q->tx_tail_addr, chan);
2198 	}
2199 
2200 	/* DMA CSR Channel configuration */
2201 	for (chan = 0; chan < dma_csr_ch; chan++)
2202 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2203 
2204 	/* DMA Configuration */
2205 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2206 
2207 	if (priv->plat->axi)
2208 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2209 
2210 	return ret;
2211 }
2212 
2213 /**
2214  * stmmac_tx_timer - mitigation sw timer for tx.
2215  * @data: data pointer
2216  * Description:
2217  * This is the timer handler to directly invoke the stmmac_tx_clean.
2218  */
2219 static void stmmac_tx_timer(struct timer_list *t)
2220 {
2221 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2222 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2223 	u32 queue;
2224 
2225 	/* let's scan all the tx queues */
2226 	for (queue = 0; queue < tx_queues_count; queue++)
2227 		stmmac_tx_clean(priv, queue);
2228 }
2229 
2230 /**
2231  * stmmac_init_tx_coalesce - init tx mitigation options.
2232  * @priv: driver private structure
2233  * Description:
2234  * This inits the transmit coalesce parameters: i.e. timer rate,
2235  * timer handler and default threshold used for enabling the
2236  * interrupt on completion bit.
2237  */
2238 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2239 {
2240 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2241 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2242 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2243 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2244 	add_timer(&priv->txtimer);
2245 }
2246 
2247 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2248 {
2249 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2250 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2251 	u32 chan;
2252 
2253 	/* set TX ring length */
2254 	for (chan = 0; chan < tx_channels_count; chan++)
2255 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2256 				(DMA_TX_SIZE - 1), chan);
2257 
2258 	/* set RX ring length */
2259 	for (chan = 0; chan < rx_channels_count; chan++)
2260 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2261 				(DMA_RX_SIZE - 1), chan);
2262 }
2263 
2264 /**
2265  *  stmmac_set_tx_queue_weight - Set TX queue weight
2266  *  @priv: driver private structure
2267  *  Description: It is used for setting TX queues weight
2268  */
2269 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2270 {
2271 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2272 	u32 weight;
2273 	u32 queue;
2274 
2275 	for (queue = 0; queue < tx_queues_count; queue++) {
2276 		weight = priv->plat->tx_queues_cfg[queue].weight;
2277 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2278 	}
2279 }
2280 
2281 /**
2282  *  stmmac_configure_cbs - Configure CBS in TX queue
2283  *  @priv: driver private structure
2284  *  Description: It is used for configuring CBS in AVB TX queues
2285  */
2286 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2287 {
2288 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2289 	u32 mode_to_use;
2290 	u32 queue;
2291 
2292 	/* queue 0 is reserved for legacy traffic */
2293 	for (queue = 1; queue < tx_queues_count; queue++) {
2294 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2295 		if (mode_to_use == MTL_QUEUE_DCB)
2296 			continue;
2297 
2298 		stmmac_config_cbs(priv, priv->hw,
2299 				priv->plat->tx_queues_cfg[queue].send_slope,
2300 				priv->plat->tx_queues_cfg[queue].idle_slope,
2301 				priv->plat->tx_queues_cfg[queue].high_credit,
2302 				priv->plat->tx_queues_cfg[queue].low_credit,
2303 				queue);
2304 	}
2305 }
2306 
2307 /**
2308  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2309  *  @priv: driver private structure
2310  *  Description: It is used for mapping RX queues to RX dma channels
2311  */
2312 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2313 {
2314 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2315 	u32 queue;
2316 	u32 chan;
2317 
2318 	for (queue = 0; queue < rx_queues_count; queue++) {
2319 		chan = priv->plat->rx_queues_cfg[queue].chan;
2320 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2321 	}
2322 }
2323 
2324 /**
2325  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2326  *  @priv: driver private structure
2327  *  Description: It is used for configuring the RX Queue Priority
2328  */
2329 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2330 {
2331 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2332 	u32 queue;
2333 	u32 prio;
2334 
2335 	for (queue = 0; queue < rx_queues_count; queue++) {
2336 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2337 			continue;
2338 
2339 		prio = priv->plat->rx_queues_cfg[queue].prio;
2340 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2341 	}
2342 }
2343 
2344 /**
2345  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2346  *  @priv: driver private structure
2347  *  Description: It is used for configuring the TX Queue Priority
2348  */
2349 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2350 {
2351 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2352 	u32 queue;
2353 	u32 prio;
2354 
2355 	for (queue = 0; queue < tx_queues_count; queue++) {
2356 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2357 			continue;
2358 
2359 		prio = priv->plat->tx_queues_cfg[queue].prio;
2360 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2361 	}
2362 }
2363 
2364 /**
2365  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2366  *  @priv: driver private structure
2367  *  Description: It is used for configuring the RX queue routing
2368  */
2369 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2370 {
2371 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2372 	u32 queue;
2373 	u8 packet;
2374 
2375 	for (queue = 0; queue < rx_queues_count; queue++) {
2376 		/* no specific packet type routing specified for the queue */
2377 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2378 			continue;
2379 
2380 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2381 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2382 	}
2383 }
2384 
2385 /**
2386  *  stmmac_mtl_configuration - Configure MTL
2387  *  @priv: driver private structure
2388  *  Description: It is used for configurring MTL
2389  */
2390 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2391 {
2392 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2393 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2394 
2395 	if (tx_queues_count > 1)
2396 		stmmac_set_tx_queue_weight(priv);
2397 
2398 	/* Configure MTL RX algorithms */
2399 	if (rx_queues_count > 1)
2400 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2401 				priv->plat->rx_sched_algorithm);
2402 
2403 	/* Configure MTL TX algorithms */
2404 	if (tx_queues_count > 1)
2405 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2406 				priv->plat->tx_sched_algorithm);
2407 
2408 	/* Configure CBS in AVB TX queues */
2409 	if (tx_queues_count > 1)
2410 		stmmac_configure_cbs(priv);
2411 
2412 	/* Map RX MTL to DMA channels */
2413 	stmmac_rx_queue_dma_chan_map(priv);
2414 
2415 	/* Enable MAC RX Queues */
2416 	stmmac_mac_enable_rx_queues(priv);
2417 
2418 	/* Set RX priorities */
2419 	if (rx_queues_count > 1)
2420 		stmmac_mac_config_rx_queues_prio(priv);
2421 
2422 	/* Set TX priorities */
2423 	if (tx_queues_count > 1)
2424 		stmmac_mac_config_tx_queues_prio(priv);
2425 
2426 	/* Set RX routing */
2427 	if (rx_queues_count > 1)
2428 		stmmac_mac_config_rx_queues_routing(priv);
2429 }
2430 
2431 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2432 {
2433 	if (priv->dma_cap.asp) {
2434 		netdev_info(priv->dev, "Enabling Safety Features\n");
2435 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2436 	} else {
2437 		netdev_info(priv->dev, "No Safety Features support found\n");
2438 	}
2439 }
2440 
2441 /**
2442  * stmmac_hw_setup - setup mac in a usable state.
2443  *  @dev : pointer to the device structure.
2444  *  Description:
2445  *  this is the main function to setup the HW in a usable state because the
2446  *  dma engine is reset, the core registers are configured (e.g. AXI,
2447  *  Checksum features, timers). The DMA is ready to start receiving and
2448  *  transmitting.
2449  *  Return value:
2450  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2451  *  file on failure.
2452  */
2453 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2454 {
2455 	struct stmmac_priv *priv = netdev_priv(dev);
2456 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2457 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2458 	u32 chan;
2459 	int ret;
2460 
2461 	/* DMA initialization and SW reset */
2462 	ret = stmmac_init_dma_engine(priv);
2463 	if (ret < 0) {
2464 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2465 			   __func__);
2466 		return ret;
2467 	}
2468 
2469 	/* Copy the MAC addr into the HW  */
2470 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2471 
2472 	/* PS and related bits will be programmed according to the speed */
2473 	if (priv->hw->pcs) {
2474 		int speed = priv->plat->mac_port_sel_speed;
2475 
2476 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2477 		    (speed == SPEED_1000)) {
2478 			priv->hw->ps = speed;
2479 		} else {
2480 			dev_warn(priv->device, "invalid port speed\n");
2481 			priv->hw->ps = 0;
2482 		}
2483 	}
2484 
2485 	/* Initialize the MAC Core */
2486 	stmmac_core_init(priv, priv->hw, dev);
2487 
2488 	/* Initialize MTL*/
2489 	stmmac_mtl_configuration(priv);
2490 
2491 	/* Initialize Safety Features */
2492 	stmmac_safety_feat_configuration(priv);
2493 
2494 	ret = stmmac_rx_ipc(priv, priv->hw);
2495 	if (!ret) {
2496 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2497 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2498 		priv->hw->rx_csum = 0;
2499 	}
2500 
2501 	/* Enable the MAC Rx/Tx */
2502 	stmmac_mac_set(priv, priv->ioaddr, true);
2503 
2504 	/* Set the HW DMA mode and the COE */
2505 	stmmac_dma_operation_mode(priv);
2506 
2507 	stmmac_mmc_setup(priv);
2508 
2509 	if (init_ptp) {
2510 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2511 		if (ret < 0)
2512 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2513 
2514 		ret = stmmac_init_ptp(priv);
2515 		if (ret == -EOPNOTSUPP)
2516 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2517 		else if (ret)
2518 			netdev_warn(priv->dev, "PTP init failed\n");
2519 	}
2520 
2521 #ifdef CONFIG_DEBUG_FS
2522 	ret = stmmac_init_fs(dev);
2523 	if (ret < 0)
2524 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2525 			    __func__);
2526 #endif
2527 	/* Start the ball rolling... */
2528 	stmmac_start_all_dma(priv);
2529 
2530 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2531 
2532 	if (priv->use_riwt) {
2533 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2534 		if (!ret)
2535 			priv->rx_riwt = MAX_DMA_RIWT;
2536 	}
2537 
2538 	if (priv->hw->pcs)
2539 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2540 
2541 	/* set TX and RX rings length */
2542 	stmmac_set_rings_length(priv);
2543 
2544 	/* Enable TSO */
2545 	if (priv->tso) {
2546 		for (chan = 0; chan < tx_cnt; chan++)
2547 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 static void stmmac_hw_teardown(struct net_device *dev)
2554 {
2555 	struct stmmac_priv *priv = netdev_priv(dev);
2556 
2557 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2558 }
2559 
2560 /**
2561  *  stmmac_open - open entry point of the driver
2562  *  @dev : pointer to the device structure.
2563  *  Description:
2564  *  This function is the open entry point of the driver.
2565  *  Return value:
2566  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2567  *  file on failure.
2568  */
2569 static int stmmac_open(struct net_device *dev)
2570 {
2571 	struct stmmac_priv *priv = netdev_priv(dev);
2572 	int ret;
2573 
2574 	stmmac_check_ether_addr(priv);
2575 
2576 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2577 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2578 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2579 		ret = stmmac_init_phy(dev);
2580 		if (ret) {
2581 			netdev_err(priv->dev,
2582 				   "%s: Cannot attach to PHY (error: %d)\n",
2583 				   __func__, ret);
2584 			return ret;
2585 		}
2586 	}
2587 
2588 	/* Extra statistics */
2589 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2590 	priv->xstats.threshold = tc;
2591 
2592 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2593 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2594 
2595 	ret = alloc_dma_desc_resources(priv);
2596 	if (ret < 0) {
2597 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2598 			   __func__);
2599 		goto dma_desc_error;
2600 	}
2601 
2602 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2603 	if (ret < 0) {
2604 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2605 			   __func__);
2606 		goto init_error;
2607 	}
2608 
2609 	ret = stmmac_hw_setup(dev, true);
2610 	if (ret < 0) {
2611 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2612 		goto init_error;
2613 	}
2614 
2615 	stmmac_init_tx_coalesce(priv);
2616 
2617 	if (dev->phydev)
2618 		phy_start(dev->phydev);
2619 
2620 	/* Request the IRQ lines */
2621 	ret = request_irq(dev->irq, stmmac_interrupt,
2622 			  IRQF_SHARED, dev->name, dev);
2623 	if (unlikely(ret < 0)) {
2624 		netdev_err(priv->dev,
2625 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2626 			   __func__, dev->irq, ret);
2627 		goto irq_error;
2628 	}
2629 
2630 	/* Request the Wake IRQ in case of another line is used for WoL */
2631 	if (priv->wol_irq != dev->irq) {
2632 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2633 				  IRQF_SHARED, dev->name, dev);
2634 		if (unlikely(ret < 0)) {
2635 			netdev_err(priv->dev,
2636 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2637 				   __func__, priv->wol_irq, ret);
2638 			goto wolirq_error;
2639 		}
2640 	}
2641 
2642 	/* Request the IRQ lines */
2643 	if (priv->lpi_irq > 0) {
2644 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2645 				  dev->name, dev);
2646 		if (unlikely(ret < 0)) {
2647 			netdev_err(priv->dev,
2648 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2649 				   __func__, priv->lpi_irq, ret);
2650 			goto lpiirq_error;
2651 		}
2652 	}
2653 
2654 	stmmac_enable_all_queues(priv);
2655 	stmmac_start_all_queues(priv);
2656 
2657 	return 0;
2658 
2659 lpiirq_error:
2660 	if (priv->wol_irq != dev->irq)
2661 		free_irq(priv->wol_irq, dev);
2662 wolirq_error:
2663 	free_irq(dev->irq, dev);
2664 irq_error:
2665 	if (dev->phydev)
2666 		phy_stop(dev->phydev);
2667 
2668 	del_timer_sync(&priv->txtimer);
2669 	stmmac_hw_teardown(dev);
2670 init_error:
2671 	free_dma_desc_resources(priv);
2672 dma_desc_error:
2673 	if (dev->phydev)
2674 		phy_disconnect(dev->phydev);
2675 
2676 	return ret;
2677 }
2678 
2679 /**
2680  *  stmmac_release - close entry point of the driver
2681  *  @dev : device pointer.
2682  *  Description:
2683  *  This is the stop entry point of the driver.
2684  */
2685 static int stmmac_release(struct net_device *dev)
2686 {
2687 	struct stmmac_priv *priv = netdev_priv(dev);
2688 
2689 	if (priv->eee_enabled)
2690 		del_timer_sync(&priv->eee_ctrl_timer);
2691 
2692 	/* Stop and disconnect the PHY */
2693 	if (dev->phydev) {
2694 		phy_stop(dev->phydev);
2695 		phy_disconnect(dev->phydev);
2696 	}
2697 
2698 	stmmac_stop_all_queues(priv);
2699 
2700 	stmmac_disable_all_queues(priv);
2701 
2702 	del_timer_sync(&priv->txtimer);
2703 
2704 	/* Free the IRQ lines */
2705 	free_irq(dev->irq, dev);
2706 	if (priv->wol_irq != dev->irq)
2707 		free_irq(priv->wol_irq, dev);
2708 	if (priv->lpi_irq > 0)
2709 		free_irq(priv->lpi_irq, dev);
2710 
2711 	/* Stop TX/RX DMA and clear the descriptors */
2712 	stmmac_stop_all_dma(priv);
2713 
2714 	/* Release and free the Rx/Tx resources */
2715 	free_dma_desc_resources(priv);
2716 
2717 	/* Disable the MAC Rx/Tx */
2718 	stmmac_mac_set(priv, priv->ioaddr, false);
2719 
2720 	netif_carrier_off(dev);
2721 
2722 #ifdef CONFIG_DEBUG_FS
2723 	stmmac_exit_fs(dev);
2724 #endif
2725 
2726 	stmmac_release_ptp(priv);
2727 
2728 	return 0;
2729 }
2730 
2731 /**
2732  *  stmmac_tso_allocator - close entry point of the driver
2733  *  @priv: driver private structure
2734  *  @des: buffer start address
2735  *  @total_len: total length to fill in descriptors
2736  *  @last_segmant: condition for the last descriptor
2737  *  @queue: TX queue index
2738  *  Description:
2739  *  This function fills descriptor and request new descriptors according to
2740  *  buffer length to fill
2741  */
2742 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2743 				 int total_len, bool last_segment, u32 queue)
2744 {
2745 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2746 	struct dma_desc *desc;
2747 	u32 buff_size;
2748 	int tmp_len;
2749 
2750 	tmp_len = total_len;
2751 
2752 	while (tmp_len > 0) {
2753 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2754 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2755 		desc = tx_q->dma_tx + tx_q->cur_tx;
2756 
2757 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2758 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2759 			    TSO_MAX_BUFF_SIZE : tmp_len;
2760 
2761 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2762 				0, 1,
2763 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2764 				0, 0);
2765 
2766 		tmp_len -= TSO_MAX_BUFF_SIZE;
2767 	}
2768 }
2769 
2770 /**
2771  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2772  *  @skb : the socket buffer
2773  *  @dev : device pointer
2774  *  Description: this is the transmit function that is called on TSO frames
2775  *  (support available on GMAC4 and newer chips).
2776  *  Diagram below show the ring programming in case of TSO frames:
2777  *
2778  *  First Descriptor
2779  *   --------
2780  *   | DES0 |---> buffer1 = L2/L3/L4 header
2781  *   | DES1 |---> TCP Payload (can continue on next descr...)
2782  *   | DES2 |---> buffer 1 and 2 len
2783  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2784  *   --------
2785  *	|
2786  *     ...
2787  *	|
2788  *   --------
2789  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2790  *   | DES1 | --|
2791  *   | DES2 | --> buffer 1 and 2 len
2792  *   | DES3 |
2793  *   --------
2794  *
2795  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2796  */
2797 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2798 {
2799 	struct dma_desc *desc, *first, *mss_desc = NULL;
2800 	struct stmmac_priv *priv = netdev_priv(dev);
2801 	int nfrags = skb_shinfo(skb)->nr_frags;
2802 	u32 queue = skb_get_queue_mapping(skb);
2803 	unsigned int first_entry, des;
2804 	struct stmmac_tx_queue *tx_q;
2805 	int tmp_pay_len = 0;
2806 	u32 pay_len, mss;
2807 	u8 proto_hdr_len;
2808 	int i;
2809 
2810 	tx_q = &priv->tx_queue[queue];
2811 
2812 	/* Compute header lengths */
2813 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2814 
2815 	/* Desc availability based on threshold should be enough safe */
2816 	if (unlikely(stmmac_tx_avail(priv, queue) <
2817 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2818 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2819 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2820 								queue));
2821 			/* This is a hard error, log it. */
2822 			netdev_err(priv->dev,
2823 				   "%s: Tx Ring full when queue awake\n",
2824 				   __func__);
2825 		}
2826 		return NETDEV_TX_BUSY;
2827 	}
2828 
2829 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2830 
2831 	mss = skb_shinfo(skb)->gso_size;
2832 
2833 	/* set new MSS value if needed */
2834 	if (mss != tx_q->mss) {
2835 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2836 		stmmac_set_mss(priv, mss_desc, mss);
2837 		tx_q->mss = mss;
2838 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2839 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2840 	}
2841 
2842 	if (netif_msg_tx_queued(priv)) {
2843 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2844 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2845 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2846 			skb->data_len);
2847 	}
2848 
2849 	first_entry = tx_q->cur_tx;
2850 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2851 
2852 	desc = tx_q->dma_tx + first_entry;
2853 	first = desc;
2854 
2855 	/* first descriptor: fill Headers on Buf1 */
2856 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2857 			     DMA_TO_DEVICE);
2858 	if (dma_mapping_error(priv->device, des))
2859 		goto dma_map_err;
2860 
2861 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2862 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2863 
2864 	first->des0 = cpu_to_le32(des);
2865 
2866 	/* Fill start of payload in buff2 of first descriptor */
2867 	if (pay_len)
2868 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2869 
2870 	/* If needed take extra descriptors to fill the remaining payload */
2871 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2872 
2873 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2874 
2875 	/* Prepare fragments */
2876 	for (i = 0; i < nfrags; i++) {
2877 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2878 
2879 		des = skb_frag_dma_map(priv->device, frag, 0,
2880 				       skb_frag_size(frag),
2881 				       DMA_TO_DEVICE);
2882 		if (dma_mapping_error(priv->device, des))
2883 			goto dma_map_err;
2884 
2885 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2886 				     (i == nfrags - 1), queue);
2887 
2888 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2889 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2890 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2891 	}
2892 
2893 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2894 
2895 	/* Only the last descriptor gets to point to the skb. */
2896 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2897 
2898 	/* We've used all descriptors we need for this skb, however,
2899 	 * advance cur_tx so that it references a fresh descriptor.
2900 	 * ndo_start_xmit will fill this descriptor the next time it's
2901 	 * called and stmmac_tx_clean may clean up to this descriptor.
2902 	 */
2903 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2904 
2905 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2906 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2907 			  __func__);
2908 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2909 	}
2910 
2911 	dev->stats.tx_bytes += skb->len;
2912 	priv->xstats.tx_tso_frames++;
2913 	priv->xstats.tx_tso_nfrags += nfrags;
2914 
2915 	/* Manage tx mitigation */
2916 	priv->tx_count_frames += nfrags + 1;
2917 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2918 		mod_timer(&priv->txtimer,
2919 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2920 	} else {
2921 		priv->tx_count_frames = 0;
2922 		stmmac_set_tx_ic(priv, desc);
2923 		priv->xstats.tx_set_ic_bit++;
2924 	}
2925 
2926 	skb_tx_timestamp(skb);
2927 
2928 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2929 		     priv->hwts_tx_en)) {
2930 		/* declare that device is doing timestamping */
2931 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2932 		stmmac_enable_tx_timestamp(priv, first);
2933 	}
2934 
2935 	/* Complete the first descriptor before granting the DMA */
2936 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2937 			proto_hdr_len,
2938 			pay_len,
2939 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2940 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2941 
2942 	/* If context desc is used to change MSS */
2943 	if (mss_desc) {
2944 		/* Make sure that first descriptor has been completely
2945 		 * written, including its own bit. This is because MSS is
2946 		 * actually before first descriptor, so we need to make
2947 		 * sure that MSS's own bit is the last thing written.
2948 		 */
2949 		dma_wmb();
2950 		stmmac_set_tx_owner(priv, mss_desc);
2951 	}
2952 
2953 	/* The own bit must be the latest setting done when prepare the
2954 	 * descriptor and then barrier is needed to make sure that
2955 	 * all is coherent before granting the DMA engine.
2956 	 */
2957 	wmb();
2958 
2959 	if (netif_msg_pktdata(priv)) {
2960 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2961 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2962 			tx_q->cur_tx, first, nfrags);
2963 
2964 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2965 
2966 		pr_info(">>> frame to be transmitted: ");
2967 		print_pkt(skb->data, skb_headlen(skb));
2968 	}
2969 
2970 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2971 
2972 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2973 
2974 	return NETDEV_TX_OK;
2975 
2976 dma_map_err:
2977 	dev_err(priv->device, "Tx dma map failed\n");
2978 	dev_kfree_skb(skb);
2979 	priv->dev->stats.tx_dropped++;
2980 	return NETDEV_TX_OK;
2981 }
2982 
2983 /**
2984  *  stmmac_xmit - Tx entry point of the driver
2985  *  @skb : the socket buffer
2986  *  @dev : device pointer
2987  *  Description : this is the tx entry point of the driver.
2988  *  It programs the chain or the ring and supports oversized frames
2989  *  and SG feature.
2990  */
2991 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2992 {
2993 	struct stmmac_priv *priv = netdev_priv(dev);
2994 	unsigned int nopaged_len = skb_headlen(skb);
2995 	int i, csum_insertion = 0, is_jumbo = 0;
2996 	u32 queue = skb_get_queue_mapping(skb);
2997 	int nfrags = skb_shinfo(skb)->nr_frags;
2998 	int entry;
2999 	unsigned int first_entry;
3000 	struct dma_desc *desc, *first;
3001 	struct stmmac_tx_queue *tx_q;
3002 	unsigned int enh_desc;
3003 	unsigned int des;
3004 
3005 	tx_q = &priv->tx_queue[queue];
3006 
3007 	/* Manage oversized TCP frames for GMAC4 device */
3008 	if (skb_is_gso(skb) && priv->tso) {
3009 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3010 			return stmmac_tso_xmit(skb, dev);
3011 	}
3012 
3013 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3014 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3015 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3016 								queue));
3017 			/* This is a hard error, log it. */
3018 			netdev_err(priv->dev,
3019 				   "%s: Tx Ring full when queue awake\n",
3020 				   __func__);
3021 		}
3022 		return NETDEV_TX_BUSY;
3023 	}
3024 
3025 	if (priv->tx_path_in_lpi_mode)
3026 		stmmac_disable_eee_mode(priv);
3027 
3028 	entry = tx_q->cur_tx;
3029 	first_entry = entry;
3030 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3031 
3032 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3033 
3034 	if (likely(priv->extend_desc))
3035 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3036 	else
3037 		desc = tx_q->dma_tx + entry;
3038 
3039 	first = desc;
3040 
3041 	enh_desc = priv->plat->enh_desc;
3042 	/* To program the descriptors according to the size of the frame */
3043 	if (enh_desc)
3044 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3045 
3046 	if (unlikely(is_jumbo)) {
3047 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3048 		if (unlikely(entry < 0) && (entry != -EINVAL))
3049 			goto dma_map_err;
3050 	}
3051 
3052 	for (i = 0; i < nfrags; i++) {
3053 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3054 		int len = skb_frag_size(frag);
3055 		bool last_segment = (i == (nfrags - 1));
3056 
3057 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3058 		WARN_ON(tx_q->tx_skbuff[entry]);
3059 
3060 		if (likely(priv->extend_desc))
3061 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3062 		else
3063 			desc = tx_q->dma_tx + entry;
3064 
3065 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3066 				       DMA_TO_DEVICE);
3067 		if (dma_mapping_error(priv->device, des))
3068 			goto dma_map_err; /* should reuse desc w/o issues */
3069 
3070 		tx_q->tx_skbuff_dma[entry].buf = des;
3071 
3072 		stmmac_set_desc_addr(priv, desc, des);
3073 
3074 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3075 		tx_q->tx_skbuff_dma[entry].len = len;
3076 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3077 
3078 		/* Prepare the descriptor and set the own bit too */
3079 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3080 				priv->mode, 1, last_segment, skb->len);
3081 	}
3082 
3083 	/* Only the last descriptor gets to point to the skb. */
3084 	tx_q->tx_skbuff[entry] = skb;
3085 
3086 	/* We've used all descriptors we need for this skb, however,
3087 	 * advance cur_tx so that it references a fresh descriptor.
3088 	 * ndo_start_xmit will fill this descriptor the next time it's
3089 	 * called and stmmac_tx_clean may clean up to this descriptor.
3090 	 */
3091 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3092 	tx_q->cur_tx = entry;
3093 
3094 	if (netif_msg_pktdata(priv)) {
3095 		void *tx_head;
3096 
3097 		netdev_dbg(priv->dev,
3098 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3099 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3100 			   entry, first, nfrags);
3101 
3102 		if (priv->extend_desc)
3103 			tx_head = (void *)tx_q->dma_etx;
3104 		else
3105 			tx_head = (void *)tx_q->dma_tx;
3106 
3107 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3108 
3109 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3110 		print_pkt(skb->data, skb->len);
3111 	}
3112 
3113 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3114 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3115 			  __func__);
3116 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3117 	}
3118 
3119 	dev->stats.tx_bytes += skb->len;
3120 
3121 	/* According to the coalesce parameter the IC bit for the latest
3122 	 * segment is reset and the timer re-started to clean the tx status.
3123 	 * This approach takes care about the fragments: desc is the first
3124 	 * element in case of no SG.
3125 	 */
3126 	priv->tx_count_frames += nfrags + 1;
3127 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3128 	    !priv->tx_timer_armed) {
3129 		mod_timer(&priv->txtimer,
3130 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3131 		priv->tx_timer_armed = true;
3132 	} else {
3133 		priv->tx_count_frames = 0;
3134 		stmmac_set_tx_ic(priv, desc);
3135 		priv->xstats.tx_set_ic_bit++;
3136 		priv->tx_timer_armed = false;
3137 	}
3138 
3139 	skb_tx_timestamp(skb);
3140 
3141 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3142 	 * problems because all the descriptors are actually ready to be
3143 	 * passed to the DMA engine.
3144 	 */
3145 	if (likely(!is_jumbo)) {
3146 		bool last_segment = (nfrags == 0);
3147 
3148 		des = dma_map_single(priv->device, skb->data,
3149 				     nopaged_len, DMA_TO_DEVICE);
3150 		if (dma_mapping_error(priv->device, des))
3151 			goto dma_map_err;
3152 
3153 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3154 
3155 		stmmac_set_desc_addr(priv, first, des);
3156 
3157 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3158 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3159 
3160 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3161 			     priv->hwts_tx_en)) {
3162 			/* declare that device is doing timestamping */
3163 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3164 			stmmac_enable_tx_timestamp(priv, first);
3165 		}
3166 
3167 		/* Prepare the first descriptor setting the OWN bit too */
3168 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3169 				csum_insertion, priv->mode, 1, last_segment,
3170 				skb->len);
3171 
3172 		/* The own bit must be the latest setting done when prepare the
3173 		 * descriptor and then barrier is needed to make sure that
3174 		 * all is coherent before granting the DMA engine.
3175 		 */
3176 		wmb();
3177 	}
3178 
3179 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3180 
3181 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3182 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3183 
3184 	return NETDEV_TX_OK;
3185 
3186 dma_map_err:
3187 	netdev_err(priv->dev, "Tx DMA map failed\n");
3188 	dev_kfree_skb(skb);
3189 	priv->dev->stats.tx_dropped++;
3190 	return NETDEV_TX_OK;
3191 }
3192 
3193 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3194 {
3195 	struct vlan_ethhdr *veth;
3196 	__be16 vlan_proto;
3197 	u16 vlanid;
3198 
3199 	veth = (struct vlan_ethhdr *)skb->data;
3200 	vlan_proto = veth->h_vlan_proto;
3201 
3202 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3203 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3204 	    (vlan_proto == htons(ETH_P_8021AD) &&
3205 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3206 		/* pop the vlan tag */
3207 		vlanid = ntohs(veth->h_vlan_TCI);
3208 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3209 		skb_pull(skb, VLAN_HLEN);
3210 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3211 	}
3212 }
3213 
3214 
3215 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3216 {
3217 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3218 		return 0;
3219 
3220 	return 1;
3221 }
3222 
3223 /**
3224  * stmmac_rx_refill - refill used skb preallocated buffers
3225  * @priv: driver private structure
3226  * @queue: RX queue index
3227  * Description : this is to reallocate the skb for the reception process
3228  * that is based on zero-copy.
3229  */
3230 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3231 {
3232 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3233 	int dirty = stmmac_rx_dirty(priv, queue);
3234 	unsigned int entry = rx_q->dirty_rx;
3235 
3236 	int bfsize = priv->dma_buf_sz;
3237 
3238 	while (dirty-- > 0) {
3239 		struct dma_desc *p;
3240 
3241 		if (priv->extend_desc)
3242 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3243 		else
3244 			p = rx_q->dma_rx + entry;
3245 
3246 		if (likely(!rx_q->rx_skbuff[entry])) {
3247 			struct sk_buff *skb;
3248 
3249 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3250 			if (unlikely(!skb)) {
3251 				/* so for a while no zero-copy! */
3252 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3253 				if (unlikely(net_ratelimit()))
3254 					dev_err(priv->device,
3255 						"fail to alloc skb entry %d\n",
3256 						entry);
3257 				break;
3258 			}
3259 
3260 			rx_q->rx_skbuff[entry] = skb;
3261 			rx_q->rx_skbuff_dma[entry] =
3262 			    dma_map_single(priv->device, skb->data, bfsize,
3263 					   DMA_FROM_DEVICE);
3264 			if (dma_mapping_error(priv->device,
3265 					      rx_q->rx_skbuff_dma[entry])) {
3266 				netdev_err(priv->dev, "Rx DMA map failed\n");
3267 				dev_kfree_skb(skb);
3268 				break;
3269 			}
3270 
3271 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3272 			stmmac_refill_desc3(priv, rx_q, p);
3273 
3274 			if (rx_q->rx_zeroc_thresh > 0)
3275 				rx_q->rx_zeroc_thresh--;
3276 
3277 			netif_dbg(priv, rx_status, priv->dev,
3278 				  "refill entry #%d\n", entry);
3279 		}
3280 		dma_wmb();
3281 
3282 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3283 
3284 		dma_wmb();
3285 
3286 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3287 	}
3288 	rx_q->dirty_rx = entry;
3289 }
3290 
3291 /**
3292  * stmmac_rx - manage the receive process
3293  * @priv: driver private structure
3294  * @limit: napi bugget
3295  * @queue: RX queue index.
3296  * Description :  this the function called by the napi poll method.
3297  * It gets all the frames inside the ring.
3298  */
3299 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3300 {
3301 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3302 	unsigned int entry = rx_q->cur_rx;
3303 	int coe = priv->hw->rx_csum;
3304 	unsigned int next_entry;
3305 	unsigned int count = 0;
3306 
3307 	if (netif_msg_rx_status(priv)) {
3308 		void *rx_head;
3309 
3310 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3311 		if (priv->extend_desc)
3312 			rx_head = (void *)rx_q->dma_erx;
3313 		else
3314 			rx_head = (void *)rx_q->dma_rx;
3315 
3316 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3317 	}
3318 	while (count < limit) {
3319 		int status;
3320 		struct dma_desc *p;
3321 		struct dma_desc *np;
3322 
3323 		if (priv->extend_desc)
3324 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3325 		else
3326 			p = rx_q->dma_rx + entry;
3327 
3328 		/* read the status of the incoming frame */
3329 		status = stmmac_rx_status(priv, &priv->dev->stats,
3330 				&priv->xstats, p);
3331 		/* check if managed by the DMA otherwise go ahead */
3332 		if (unlikely(status & dma_own))
3333 			break;
3334 
3335 		count++;
3336 
3337 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3338 		next_entry = rx_q->cur_rx;
3339 
3340 		if (priv->extend_desc)
3341 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3342 		else
3343 			np = rx_q->dma_rx + next_entry;
3344 
3345 		prefetch(np);
3346 
3347 		if (priv->extend_desc)
3348 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3349 					&priv->xstats, rx_q->dma_erx + entry);
3350 		if (unlikely(status == discard_frame)) {
3351 			priv->dev->stats.rx_errors++;
3352 			if (priv->hwts_rx_en && !priv->extend_desc) {
3353 				/* DESC2 & DESC3 will be overwritten by device
3354 				 * with timestamp value, hence reinitialize
3355 				 * them in stmmac_rx_refill() function so that
3356 				 * device can reuse it.
3357 				 */
3358 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3359 				rx_q->rx_skbuff[entry] = NULL;
3360 				dma_unmap_single(priv->device,
3361 						 rx_q->rx_skbuff_dma[entry],
3362 						 priv->dma_buf_sz,
3363 						 DMA_FROM_DEVICE);
3364 			}
3365 		} else {
3366 			struct sk_buff *skb;
3367 			int frame_len;
3368 			unsigned int des;
3369 
3370 			stmmac_get_desc_addr(priv, p, &des);
3371 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3372 
3373 			/*  If frame length is greater than skb buffer size
3374 			 *  (preallocated during init) then the packet is
3375 			 *  ignored
3376 			 */
3377 			if (frame_len > priv->dma_buf_sz) {
3378 				netdev_err(priv->dev,
3379 					   "len %d larger than size (%d)\n",
3380 					   frame_len, priv->dma_buf_sz);
3381 				priv->dev->stats.rx_length_errors++;
3382 				break;
3383 			}
3384 
3385 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3386 			 * Type frames (LLC/LLC-SNAP)
3387 			 *
3388 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3389 			 * feature is always disabled and packets need to be
3390 			 * stripped manually.
3391 			 */
3392 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3393 			    unlikely(status != llc_snap))
3394 				frame_len -= ETH_FCS_LEN;
3395 
3396 			if (netif_msg_rx_status(priv)) {
3397 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3398 					   p, entry, des);
3399 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3400 					   frame_len, status);
3401 			}
3402 
3403 			/* The zero-copy is always used for all the sizes
3404 			 * in case of GMAC4 because it needs
3405 			 * to refill the used descriptors, always.
3406 			 */
3407 			if (unlikely(!priv->plat->has_gmac4 &&
3408 				     ((frame_len < priv->rx_copybreak) ||
3409 				     stmmac_rx_threshold_count(rx_q)))) {
3410 				skb = netdev_alloc_skb_ip_align(priv->dev,
3411 								frame_len);
3412 				if (unlikely(!skb)) {
3413 					if (net_ratelimit())
3414 						dev_warn(priv->device,
3415 							 "packet dropped\n");
3416 					priv->dev->stats.rx_dropped++;
3417 					break;
3418 				}
3419 
3420 				dma_sync_single_for_cpu(priv->device,
3421 							rx_q->rx_skbuff_dma
3422 							[entry], frame_len,
3423 							DMA_FROM_DEVICE);
3424 				skb_copy_to_linear_data(skb,
3425 							rx_q->
3426 							rx_skbuff[entry]->data,
3427 							frame_len);
3428 
3429 				skb_put(skb, frame_len);
3430 				dma_sync_single_for_device(priv->device,
3431 							   rx_q->rx_skbuff_dma
3432 							   [entry], frame_len,
3433 							   DMA_FROM_DEVICE);
3434 			} else {
3435 				skb = rx_q->rx_skbuff[entry];
3436 				if (unlikely(!skb)) {
3437 					netdev_err(priv->dev,
3438 						   "%s: Inconsistent Rx chain\n",
3439 						   priv->dev->name);
3440 					priv->dev->stats.rx_dropped++;
3441 					break;
3442 				}
3443 				prefetch(skb->data - NET_IP_ALIGN);
3444 				rx_q->rx_skbuff[entry] = NULL;
3445 				rx_q->rx_zeroc_thresh++;
3446 
3447 				skb_put(skb, frame_len);
3448 				dma_unmap_single(priv->device,
3449 						 rx_q->rx_skbuff_dma[entry],
3450 						 priv->dma_buf_sz,
3451 						 DMA_FROM_DEVICE);
3452 			}
3453 
3454 			if (netif_msg_pktdata(priv)) {
3455 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3456 					   frame_len);
3457 				print_pkt(skb->data, frame_len);
3458 			}
3459 
3460 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3461 
3462 			stmmac_rx_vlan(priv->dev, skb);
3463 
3464 			skb->protocol = eth_type_trans(skb, priv->dev);
3465 
3466 			if (unlikely(!coe))
3467 				skb_checksum_none_assert(skb);
3468 			else
3469 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3470 
3471 			napi_gro_receive(&rx_q->napi, skb);
3472 
3473 			priv->dev->stats.rx_packets++;
3474 			priv->dev->stats.rx_bytes += frame_len;
3475 		}
3476 		entry = next_entry;
3477 	}
3478 
3479 	stmmac_rx_refill(priv, queue);
3480 
3481 	priv->xstats.rx_pkt_n += count;
3482 
3483 	return count;
3484 }
3485 
3486 /**
3487  *  stmmac_poll - stmmac poll method (NAPI)
3488  *  @napi : pointer to the napi structure.
3489  *  @budget : maximum number of packets that the current CPU can receive from
3490  *	      all interfaces.
3491  *  Description :
3492  *  To look at the incoming frames and clear the tx resources.
3493  */
3494 static int stmmac_poll(struct napi_struct *napi, int budget)
3495 {
3496 	struct stmmac_rx_queue *rx_q =
3497 		container_of(napi, struct stmmac_rx_queue, napi);
3498 	struct stmmac_priv *priv = rx_q->priv_data;
3499 	u32 tx_count = priv->plat->tx_queues_to_use;
3500 	u32 chan = rx_q->queue_index;
3501 	int work_done = 0;
3502 	u32 queue;
3503 
3504 	priv->xstats.napi_poll++;
3505 
3506 	/* check all the queues */
3507 	for (queue = 0; queue < tx_count; queue++)
3508 		stmmac_tx_clean(priv, queue);
3509 
3510 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3511 	if (work_done < budget) {
3512 		napi_complete_done(napi, work_done);
3513 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3514 	}
3515 	return work_done;
3516 }
3517 
3518 /**
3519  *  stmmac_tx_timeout
3520  *  @dev : Pointer to net device structure
3521  *  Description: this function is called when a packet transmission fails to
3522  *   complete within a reasonable time. The driver will mark the error in the
3523  *   netdev structure and arrange for the device to be reset to a sane state
3524  *   in order to transmit a new packet.
3525  */
3526 static void stmmac_tx_timeout(struct net_device *dev)
3527 {
3528 	struct stmmac_priv *priv = netdev_priv(dev);
3529 
3530 	stmmac_global_err(priv);
3531 }
3532 
3533 /**
3534  *  stmmac_set_rx_mode - entry point for multicast addressing
3535  *  @dev : pointer to the device structure
3536  *  Description:
3537  *  This function is a driver entry point which gets called by the kernel
3538  *  whenever multicast addresses must be enabled/disabled.
3539  *  Return value:
3540  *  void.
3541  */
3542 static void stmmac_set_rx_mode(struct net_device *dev)
3543 {
3544 	struct stmmac_priv *priv = netdev_priv(dev);
3545 
3546 	stmmac_set_filter(priv, priv->hw, dev);
3547 }
3548 
3549 /**
3550  *  stmmac_change_mtu - entry point to change MTU size for the device.
3551  *  @dev : device pointer.
3552  *  @new_mtu : the new MTU size for the device.
3553  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3554  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3555  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3556  *  Return value:
3557  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3558  *  file on failure.
3559  */
3560 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3561 {
3562 	struct stmmac_priv *priv = netdev_priv(dev);
3563 
3564 	if (netif_running(dev)) {
3565 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3566 		return -EBUSY;
3567 	}
3568 
3569 	dev->mtu = new_mtu;
3570 
3571 	netdev_update_features(dev);
3572 
3573 	return 0;
3574 }
3575 
3576 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3577 					     netdev_features_t features)
3578 {
3579 	struct stmmac_priv *priv = netdev_priv(dev);
3580 
3581 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3582 		features &= ~NETIF_F_RXCSUM;
3583 
3584 	if (!priv->plat->tx_coe)
3585 		features &= ~NETIF_F_CSUM_MASK;
3586 
3587 	/* Some GMAC devices have a bugged Jumbo frame support that
3588 	 * needs to have the Tx COE disabled for oversized frames
3589 	 * (due to limited buffer sizes). In this case we disable
3590 	 * the TX csum insertion in the TDES and not use SF.
3591 	 */
3592 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3593 		features &= ~NETIF_F_CSUM_MASK;
3594 
3595 	/* Disable tso if asked by ethtool */
3596 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3597 		if (features & NETIF_F_TSO)
3598 			priv->tso = true;
3599 		else
3600 			priv->tso = false;
3601 	}
3602 
3603 	return features;
3604 }
3605 
3606 static int stmmac_set_features(struct net_device *netdev,
3607 			       netdev_features_t features)
3608 {
3609 	struct stmmac_priv *priv = netdev_priv(netdev);
3610 
3611 	/* Keep the COE Type in case of csum is supporting */
3612 	if (features & NETIF_F_RXCSUM)
3613 		priv->hw->rx_csum = priv->plat->rx_coe;
3614 	else
3615 		priv->hw->rx_csum = 0;
3616 	/* No check needed because rx_coe has been set before and it will be
3617 	 * fixed in case of issue.
3618 	 */
3619 	stmmac_rx_ipc(priv, priv->hw);
3620 
3621 	return 0;
3622 }
3623 
3624 /**
3625  *  stmmac_interrupt - main ISR
3626  *  @irq: interrupt number.
3627  *  @dev_id: to pass the net device pointer.
3628  *  Description: this is the main driver interrupt service routine.
3629  *  It can call:
3630  *  o DMA service routine (to manage incoming frame reception and transmission
3631  *    status)
3632  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3633  *    interrupts.
3634  */
3635 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3636 {
3637 	struct net_device *dev = (struct net_device *)dev_id;
3638 	struct stmmac_priv *priv = netdev_priv(dev);
3639 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3640 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3641 	u32 queues_count;
3642 	u32 queue;
3643 
3644 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3645 
3646 	if (priv->irq_wake)
3647 		pm_wakeup_event(priv->device, 0);
3648 
3649 	if (unlikely(!dev)) {
3650 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3651 		return IRQ_NONE;
3652 	}
3653 
3654 	/* Check if adapter is up */
3655 	if (test_bit(STMMAC_DOWN, &priv->state))
3656 		return IRQ_HANDLED;
3657 	/* Check if a fatal error happened */
3658 	if (stmmac_safety_feat_interrupt(priv))
3659 		return IRQ_HANDLED;
3660 
3661 	/* To handle GMAC own interrupts */
3662 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3663 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3664 		int mtl_status;
3665 
3666 		if (unlikely(status)) {
3667 			/* For LPI we need to save the tx status */
3668 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3669 				priv->tx_path_in_lpi_mode = true;
3670 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3671 				priv->tx_path_in_lpi_mode = false;
3672 		}
3673 
3674 		for (queue = 0; queue < queues_count; queue++) {
3675 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3676 
3677 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3678 								queue);
3679 			if (mtl_status != -EINVAL)
3680 				status |= mtl_status;
3681 
3682 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3683 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3684 						       rx_q->rx_tail_addr,
3685 						       queue);
3686 		}
3687 
3688 		/* PCS link status */
3689 		if (priv->hw->pcs) {
3690 			if (priv->xstats.pcs_link)
3691 				netif_carrier_on(dev);
3692 			else
3693 				netif_carrier_off(dev);
3694 		}
3695 	}
3696 
3697 	/* To handle DMA interrupts */
3698 	stmmac_dma_interrupt(priv);
3699 
3700 	return IRQ_HANDLED;
3701 }
3702 
3703 #ifdef CONFIG_NET_POLL_CONTROLLER
3704 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3705  * to allow network I/O with interrupts disabled.
3706  */
3707 static void stmmac_poll_controller(struct net_device *dev)
3708 {
3709 	disable_irq(dev->irq);
3710 	stmmac_interrupt(dev->irq, dev);
3711 	enable_irq(dev->irq);
3712 }
3713 #endif
3714 
3715 /**
3716  *  stmmac_ioctl - Entry point for the Ioctl
3717  *  @dev: Device pointer.
3718  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3719  *  a proprietary structure used to pass information to the driver.
3720  *  @cmd: IOCTL command
3721  *  Description:
3722  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3723  */
3724 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3725 {
3726 	int ret = -EOPNOTSUPP;
3727 
3728 	if (!netif_running(dev))
3729 		return -EINVAL;
3730 
3731 	switch (cmd) {
3732 	case SIOCGMIIPHY:
3733 	case SIOCGMIIREG:
3734 	case SIOCSMIIREG:
3735 		if (!dev->phydev)
3736 			return -EINVAL;
3737 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3738 		break;
3739 	case SIOCSHWTSTAMP:
3740 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3741 		break;
3742 	default:
3743 		break;
3744 	}
3745 
3746 	return ret;
3747 }
3748 
3749 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3750 				    void *cb_priv)
3751 {
3752 	struct stmmac_priv *priv = cb_priv;
3753 	int ret = -EOPNOTSUPP;
3754 
3755 	stmmac_disable_all_queues(priv);
3756 
3757 	switch (type) {
3758 	case TC_SETUP_CLSU32:
3759 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3760 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3761 		break;
3762 	default:
3763 		break;
3764 	}
3765 
3766 	stmmac_enable_all_queues(priv);
3767 	return ret;
3768 }
3769 
3770 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3771 				 struct tc_block_offload *f)
3772 {
3773 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3774 		return -EOPNOTSUPP;
3775 
3776 	switch (f->command) {
3777 	case TC_BLOCK_BIND:
3778 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3779 				priv, priv);
3780 	case TC_BLOCK_UNBIND:
3781 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3782 		return 0;
3783 	default:
3784 		return -EOPNOTSUPP;
3785 	}
3786 }
3787 
3788 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3789 			   void *type_data)
3790 {
3791 	struct stmmac_priv *priv = netdev_priv(ndev);
3792 
3793 	switch (type) {
3794 	case TC_SETUP_BLOCK:
3795 		return stmmac_setup_tc_block(priv, type_data);
3796 	default:
3797 		return -EOPNOTSUPP;
3798 	}
3799 }
3800 
3801 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3802 {
3803 	struct stmmac_priv *priv = netdev_priv(ndev);
3804 	int ret = 0;
3805 
3806 	ret = eth_mac_addr(ndev, addr);
3807 	if (ret)
3808 		return ret;
3809 
3810 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3811 
3812 	return ret;
3813 }
3814 
3815 #ifdef CONFIG_DEBUG_FS
3816 static struct dentry *stmmac_fs_dir;
3817 
3818 static void sysfs_display_ring(void *head, int size, int extend_desc,
3819 			       struct seq_file *seq)
3820 {
3821 	int i;
3822 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3823 	struct dma_desc *p = (struct dma_desc *)head;
3824 
3825 	for (i = 0; i < size; i++) {
3826 		if (extend_desc) {
3827 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3828 				   i, (unsigned int)virt_to_phys(ep),
3829 				   le32_to_cpu(ep->basic.des0),
3830 				   le32_to_cpu(ep->basic.des1),
3831 				   le32_to_cpu(ep->basic.des2),
3832 				   le32_to_cpu(ep->basic.des3));
3833 			ep++;
3834 		} else {
3835 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3836 				   i, (unsigned int)virt_to_phys(p),
3837 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3838 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3839 			p++;
3840 		}
3841 		seq_printf(seq, "\n");
3842 	}
3843 }
3844 
3845 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3846 {
3847 	struct net_device *dev = seq->private;
3848 	struct stmmac_priv *priv = netdev_priv(dev);
3849 	u32 rx_count = priv->plat->rx_queues_to_use;
3850 	u32 tx_count = priv->plat->tx_queues_to_use;
3851 	u32 queue;
3852 
3853 	for (queue = 0; queue < rx_count; queue++) {
3854 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3855 
3856 		seq_printf(seq, "RX Queue %d:\n", queue);
3857 
3858 		if (priv->extend_desc) {
3859 			seq_printf(seq, "Extended descriptor ring:\n");
3860 			sysfs_display_ring((void *)rx_q->dma_erx,
3861 					   DMA_RX_SIZE, 1, seq);
3862 		} else {
3863 			seq_printf(seq, "Descriptor ring:\n");
3864 			sysfs_display_ring((void *)rx_q->dma_rx,
3865 					   DMA_RX_SIZE, 0, seq);
3866 		}
3867 	}
3868 
3869 	for (queue = 0; queue < tx_count; queue++) {
3870 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3871 
3872 		seq_printf(seq, "TX Queue %d:\n", queue);
3873 
3874 		if (priv->extend_desc) {
3875 			seq_printf(seq, "Extended descriptor ring:\n");
3876 			sysfs_display_ring((void *)tx_q->dma_etx,
3877 					   DMA_TX_SIZE, 1, seq);
3878 		} else {
3879 			seq_printf(seq, "Descriptor ring:\n");
3880 			sysfs_display_ring((void *)tx_q->dma_tx,
3881 					   DMA_TX_SIZE, 0, seq);
3882 		}
3883 	}
3884 
3885 	return 0;
3886 }
3887 
3888 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3889 {
3890 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3891 }
3892 
3893 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3894 
3895 static const struct file_operations stmmac_rings_status_fops = {
3896 	.owner = THIS_MODULE,
3897 	.open = stmmac_sysfs_ring_open,
3898 	.read = seq_read,
3899 	.llseek = seq_lseek,
3900 	.release = single_release,
3901 };
3902 
3903 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3904 {
3905 	struct net_device *dev = seq->private;
3906 	struct stmmac_priv *priv = netdev_priv(dev);
3907 
3908 	if (!priv->hw_cap_support) {
3909 		seq_printf(seq, "DMA HW features not supported\n");
3910 		return 0;
3911 	}
3912 
3913 	seq_printf(seq, "==============================\n");
3914 	seq_printf(seq, "\tDMA HW features\n");
3915 	seq_printf(seq, "==============================\n");
3916 
3917 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3918 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3919 	seq_printf(seq, "\t1000 Mbps: %s\n",
3920 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3921 	seq_printf(seq, "\tHalf duplex: %s\n",
3922 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3923 	seq_printf(seq, "\tHash Filter: %s\n",
3924 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3925 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3926 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3927 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3928 		   (priv->dma_cap.pcs) ? "Y" : "N");
3929 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3930 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3931 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3932 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3933 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3934 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3935 	seq_printf(seq, "\tRMON module: %s\n",
3936 		   (priv->dma_cap.rmon) ? "Y" : "N");
3937 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3938 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3939 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3940 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3941 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3942 		   (priv->dma_cap.eee) ? "Y" : "N");
3943 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3944 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3945 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3946 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3947 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3948 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3949 	} else {
3950 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3951 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3952 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3953 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3954 	}
3955 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3956 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3957 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3958 		   priv->dma_cap.number_rx_channel);
3959 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3960 		   priv->dma_cap.number_tx_channel);
3961 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3962 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3963 
3964 	return 0;
3965 }
3966 
3967 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3968 {
3969 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3970 }
3971 
3972 static const struct file_operations stmmac_dma_cap_fops = {
3973 	.owner = THIS_MODULE,
3974 	.open = stmmac_sysfs_dma_cap_open,
3975 	.read = seq_read,
3976 	.llseek = seq_lseek,
3977 	.release = single_release,
3978 };
3979 
3980 static int stmmac_init_fs(struct net_device *dev)
3981 {
3982 	struct stmmac_priv *priv = netdev_priv(dev);
3983 
3984 	/* Create per netdev entries */
3985 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3986 
3987 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3988 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3989 
3990 		return -ENOMEM;
3991 	}
3992 
3993 	/* Entry to report DMA RX/TX rings */
3994 	priv->dbgfs_rings_status =
3995 		debugfs_create_file("descriptors_status", 0444,
3996 				    priv->dbgfs_dir, dev,
3997 				    &stmmac_rings_status_fops);
3998 
3999 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4000 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4001 		debugfs_remove_recursive(priv->dbgfs_dir);
4002 
4003 		return -ENOMEM;
4004 	}
4005 
4006 	/* Entry to report the DMA HW features */
4007 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4008 						  priv->dbgfs_dir,
4009 						  dev, &stmmac_dma_cap_fops);
4010 
4011 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4012 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4013 		debugfs_remove_recursive(priv->dbgfs_dir);
4014 
4015 		return -ENOMEM;
4016 	}
4017 
4018 	return 0;
4019 }
4020 
4021 static void stmmac_exit_fs(struct net_device *dev)
4022 {
4023 	struct stmmac_priv *priv = netdev_priv(dev);
4024 
4025 	debugfs_remove_recursive(priv->dbgfs_dir);
4026 }
4027 #endif /* CONFIG_DEBUG_FS */
4028 
4029 static const struct net_device_ops stmmac_netdev_ops = {
4030 	.ndo_open = stmmac_open,
4031 	.ndo_start_xmit = stmmac_xmit,
4032 	.ndo_stop = stmmac_release,
4033 	.ndo_change_mtu = stmmac_change_mtu,
4034 	.ndo_fix_features = stmmac_fix_features,
4035 	.ndo_set_features = stmmac_set_features,
4036 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4037 	.ndo_tx_timeout = stmmac_tx_timeout,
4038 	.ndo_do_ioctl = stmmac_ioctl,
4039 	.ndo_setup_tc = stmmac_setup_tc,
4040 #ifdef CONFIG_NET_POLL_CONTROLLER
4041 	.ndo_poll_controller = stmmac_poll_controller,
4042 #endif
4043 	.ndo_set_mac_address = stmmac_set_mac_address,
4044 };
4045 
4046 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4047 {
4048 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4049 		return;
4050 	if (test_bit(STMMAC_DOWN, &priv->state))
4051 		return;
4052 
4053 	netdev_err(priv->dev, "Reset adapter.\n");
4054 
4055 	rtnl_lock();
4056 	netif_trans_update(priv->dev);
4057 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4058 		usleep_range(1000, 2000);
4059 
4060 	set_bit(STMMAC_DOWN, &priv->state);
4061 	dev_close(priv->dev);
4062 	dev_open(priv->dev);
4063 	clear_bit(STMMAC_DOWN, &priv->state);
4064 	clear_bit(STMMAC_RESETING, &priv->state);
4065 	rtnl_unlock();
4066 }
4067 
4068 static void stmmac_service_task(struct work_struct *work)
4069 {
4070 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4071 			service_task);
4072 
4073 	stmmac_reset_subtask(priv);
4074 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4075 }
4076 
4077 /**
4078  *  stmmac_hw_init - Init the MAC device
4079  *  @priv: driver private structure
4080  *  Description: this function is to configure the MAC device according to
4081  *  some platform parameters or the HW capability register. It prepares the
4082  *  driver to use either ring or chain modes and to setup either enhanced or
4083  *  normal descriptors.
4084  */
4085 static int stmmac_hw_init(struct stmmac_priv *priv)
4086 {
4087 	int ret;
4088 
4089 	/* dwmac-sun8i only work in chain mode */
4090 	if (priv->plat->has_sun8i)
4091 		chain_mode = 1;
4092 	priv->chain_mode = chain_mode;
4093 
4094 	/* Initialize HW Interface */
4095 	ret = stmmac_hwif_init(priv);
4096 	if (ret)
4097 		return ret;
4098 
4099 	/* Get the HW capability (new GMAC newer than 3.50a) */
4100 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4101 	if (priv->hw_cap_support) {
4102 		dev_info(priv->device, "DMA HW capability register supported\n");
4103 
4104 		/* We can override some gmac/dma configuration fields: e.g.
4105 		 * enh_desc, tx_coe (e.g. that are passed through the
4106 		 * platform) with the values from the HW capability
4107 		 * register (if supported).
4108 		 */
4109 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4110 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4111 		priv->hw->pmt = priv->plat->pmt;
4112 
4113 		/* TXCOE doesn't work in thresh DMA mode */
4114 		if (priv->plat->force_thresh_dma_mode)
4115 			priv->plat->tx_coe = 0;
4116 		else
4117 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4118 
4119 		/* In case of GMAC4 rx_coe is from HW cap register. */
4120 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4121 
4122 		if (priv->dma_cap.rx_coe_type2)
4123 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4124 		else if (priv->dma_cap.rx_coe_type1)
4125 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4126 
4127 	} else {
4128 		dev_info(priv->device, "No HW DMA feature register supported\n");
4129 	}
4130 
4131 	if (priv->plat->rx_coe) {
4132 		priv->hw->rx_csum = priv->plat->rx_coe;
4133 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4134 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4135 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4136 	}
4137 	if (priv->plat->tx_coe)
4138 		dev_info(priv->device, "TX Checksum insertion supported\n");
4139 
4140 	if (priv->plat->pmt) {
4141 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4142 		device_set_wakeup_capable(priv->device, 1);
4143 	}
4144 
4145 	if (priv->dma_cap.tsoen)
4146 		dev_info(priv->device, "TSO supported\n");
4147 
4148 	/* Run HW quirks, if any */
4149 	if (priv->hwif_quirks) {
4150 		ret = priv->hwif_quirks(priv);
4151 		if (ret)
4152 			return ret;
4153 	}
4154 
4155 	return 0;
4156 }
4157 
4158 /**
4159  * stmmac_dvr_probe
4160  * @device: device pointer
4161  * @plat_dat: platform data pointer
4162  * @res: stmmac resource pointer
4163  * Description: this is the main probe function used to
4164  * call the alloc_etherdev, allocate the priv structure.
4165  * Return:
4166  * returns 0 on success, otherwise errno.
4167  */
4168 int stmmac_dvr_probe(struct device *device,
4169 		     struct plat_stmmacenet_data *plat_dat,
4170 		     struct stmmac_resources *res)
4171 {
4172 	struct net_device *ndev = NULL;
4173 	struct stmmac_priv *priv;
4174 	int ret = 0;
4175 	u32 queue;
4176 
4177 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4178 				  MTL_MAX_TX_QUEUES,
4179 				  MTL_MAX_RX_QUEUES);
4180 	if (!ndev)
4181 		return -ENOMEM;
4182 
4183 	SET_NETDEV_DEV(ndev, device);
4184 
4185 	priv = netdev_priv(ndev);
4186 	priv->device = device;
4187 	priv->dev = ndev;
4188 
4189 	stmmac_set_ethtool_ops(ndev);
4190 	priv->pause = pause;
4191 	priv->plat = plat_dat;
4192 	priv->ioaddr = res->addr;
4193 	priv->dev->base_addr = (unsigned long)res->addr;
4194 
4195 	priv->dev->irq = res->irq;
4196 	priv->wol_irq = res->wol_irq;
4197 	priv->lpi_irq = res->lpi_irq;
4198 
4199 	if (res->mac)
4200 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4201 
4202 	dev_set_drvdata(device, priv->dev);
4203 
4204 	/* Verify driver arguments */
4205 	stmmac_verify_args();
4206 
4207 	/* Allocate workqueue */
4208 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4209 	if (!priv->wq) {
4210 		dev_err(priv->device, "failed to create workqueue\n");
4211 		goto error_wq;
4212 	}
4213 
4214 	INIT_WORK(&priv->service_task, stmmac_service_task);
4215 
4216 	/* Override with kernel parameters if supplied XXX CRS XXX
4217 	 * this needs to have multiple instances
4218 	 */
4219 	if ((phyaddr >= 0) && (phyaddr <= 31))
4220 		priv->plat->phy_addr = phyaddr;
4221 
4222 	if (priv->plat->stmmac_rst) {
4223 		ret = reset_control_assert(priv->plat->stmmac_rst);
4224 		reset_control_deassert(priv->plat->stmmac_rst);
4225 		/* Some reset controllers have only reset callback instead of
4226 		 * assert + deassert callbacks pair.
4227 		 */
4228 		if (ret == -ENOTSUPP)
4229 			reset_control_reset(priv->plat->stmmac_rst);
4230 	}
4231 
4232 	/* Init MAC and get the capabilities */
4233 	ret = stmmac_hw_init(priv);
4234 	if (ret)
4235 		goto error_hw_init;
4236 
4237 	/* Configure real RX and TX queues */
4238 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4239 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4240 
4241 	ndev->netdev_ops = &stmmac_netdev_ops;
4242 
4243 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4244 			    NETIF_F_RXCSUM;
4245 
4246 	ret = stmmac_tc_init(priv, priv);
4247 	if (!ret) {
4248 		ndev->hw_features |= NETIF_F_HW_TC;
4249 	}
4250 
4251 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4252 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4253 		priv->tso = true;
4254 		dev_info(priv->device, "TSO feature enabled\n");
4255 	}
4256 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4257 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4258 #ifdef STMMAC_VLAN_TAG_USED
4259 	/* Both mac100 and gmac support receive VLAN tag detection */
4260 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4261 #endif
4262 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4263 
4264 	/* MTU range: 46 - hw-specific max */
4265 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4266 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4267 		ndev->max_mtu = JUMBO_LEN;
4268 	else
4269 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4270 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4271 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4272 	 */
4273 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4274 	    (priv->plat->maxmtu >= ndev->min_mtu))
4275 		ndev->max_mtu = priv->plat->maxmtu;
4276 	else if (priv->plat->maxmtu < ndev->min_mtu)
4277 		dev_warn(priv->device,
4278 			 "%s: warning: maxmtu having invalid value (%d)\n",
4279 			 __func__, priv->plat->maxmtu);
4280 
4281 	if (flow_ctrl)
4282 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4283 
4284 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4285 	 * In some case, for example on bugged HW this feature
4286 	 * has to be disable and this can be done by passing the
4287 	 * riwt_off field from the platform.
4288 	 */
4289 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4290 		priv->use_riwt = 1;
4291 		dev_info(priv->device,
4292 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4293 	}
4294 
4295 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4296 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4297 
4298 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4299 			       (8 * priv->plat->rx_queues_to_use));
4300 	}
4301 
4302 	mutex_init(&priv->lock);
4303 
4304 	/* If a specific clk_csr value is passed from the platform
4305 	 * this means that the CSR Clock Range selection cannot be
4306 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4307 	 * set the MDC clock dynamically according to the csr actual
4308 	 * clock input.
4309 	 */
4310 	if (!priv->plat->clk_csr)
4311 		stmmac_clk_csr_set(priv);
4312 	else
4313 		priv->clk_csr = priv->plat->clk_csr;
4314 
4315 	stmmac_check_pcs_mode(priv);
4316 
4317 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4318 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4319 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4320 		/* MDIO bus Registration */
4321 		ret = stmmac_mdio_register(ndev);
4322 		if (ret < 0) {
4323 			dev_err(priv->device,
4324 				"%s: MDIO bus (id: %d) registration failed",
4325 				__func__, priv->plat->bus_id);
4326 			goto error_mdio_register;
4327 		}
4328 	}
4329 
4330 	ret = register_netdev(ndev);
4331 	if (ret) {
4332 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4333 			__func__, ret);
4334 		goto error_netdev_register;
4335 	}
4336 
4337 	return ret;
4338 
4339 error_netdev_register:
4340 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4341 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4342 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4343 		stmmac_mdio_unregister(ndev);
4344 error_mdio_register:
4345 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4346 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4347 
4348 		netif_napi_del(&rx_q->napi);
4349 	}
4350 error_hw_init:
4351 	destroy_workqueue(priv->wq);
4352 error_wq:
4353 	free_netdev(ndev);
4354 
4355 	return ret;
4356 }
4357 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4358 
4359 /**
4360  * stmmac_dvr_remove
4361  * @dev: device pointer
4362  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4363  * changes the link status, releases the DMA descriptor rings.
4364  */
4365 int stmmac_dvr_remove(struct device *dev)
4366 {
4367 	struct net_device *ndev = dev_get_drvdata(dev);
4368 	struct stmmac_priv *priv = netdev_priv(ndev);
4369 
4370 	netdev_info(priv->dev, "%s: removing driver", __func__);
4371 
4372 	stmmac_stop_all_dma(priv);
4373 
4374 	stmmac_mac_set(priv, priv->ioaddr, false);
4375 	netif_carrier_off(ndev);
4376 	unregister_netdev(ndev);
4377 	if (priv->plat->stmmac_rst)
4378 		reset_control_assert(priv->plat->stmmac_rst);
4379 	clk_disable_unprepare(priv->plat->pclk);
4380 	clk_disable_unprepare(priv->plat->stmmac_clk);
4381 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4382 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4383 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4384 		stmmac_mdio_unregister(ndev);
4385 	destroy_workqueue(priv->wq);
4386 	mutex_destroy(&priv->lock);
4387 	free_netdev(ndev);
4388 
4389 	return 0;
4390 }
4391 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4392 
4393 /**
4394  * stmmac_suspend - suspend callback
4395  * @dev: device pointer
4396  * Description: this is the function to suspend the device and it is called
4397  * by the platform driver to stop the network queue, release the resources,
4398  * program the PMT register (for WoL), clean and release driver resources.
4399  */
4400 int stmmac_suspend(struct device *dev)
4401 {
4402 	struct net_device *ndev = dev_get_drvdata(dev);
4403 	struct stmmac_priv *priv = netdev_priv(ndev);
4404 
4405 	if (!ndev || !netif_running(ndev))
4406 		return 0;
4407 
4408 	if (ndev->phydev)
4409 		phy_stop(ndev->phydev);
4410 
4411 	mutex_lock(&priv->lock);
4412 
4413 	netif_device_detach(ndev);
4414 	stmmac_stop_all_queues(priv);
4415 
4416 	stmmac_disable_all_queues(priv);
4417 
4418 	/* Stop TX/RX DMA */
4419 	stmmac_stop_all_dma(priv);
4420 
4421 	/* Enable Power down mode by programming the PMT regs */
4422 	if (device_may_wakeup(priv->device)) {
4423 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4424 		priv->irq_wake = 1;
4425 	} else {
4426 		stmmac_mac_set(priv, priv->ioaddr, false);
4427 		pinctrl_pm_select_sleep_state(priv->device);
4428 		/* Disable clock in case of PWM is off */
4429 		clk_disable(priv->plat->pclk);
4430 		clk_disable(priv->plat->stmmac_clk);
4431 	}
4432 	mutex_unlock(&priv->lock);
4433 
4434 	priv->oldlink = false;
4435 	priv->speed = SPEED_UNKNOWN;
4436 	priv->oldduplex = DUPLEX_UNKNOWN;
4437 	return 0;
4438 }
4439 EXPORT_SYMBOL_GPL(stmmac_suspend);
4440 
4441 /**
4442  * stmmac_reset_queues_param - reset queue parameters
4443  * @dev: device pointer
4444  */
4445 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4446 {
4447 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4448 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4449 	u32 queue;
4450 
4451 	for (queue = 0; queue < rx_cnt; queue++) {
4452 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4453 
4454 		rx_q->cur_rx = 0;
4455 		rx_q->dirty_rx = 0;
4456 	}
4457 
4458 	for (queue = 0; queue < tx_cnt; queue++) {
4459 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4460 
4461 		tx_q->cur_tx = 0;
4462 		tx_q->dirty_tx = 0;
4463 		tx_q->mss = 0;
4464 	}
4465 }
4466 
4467 /**
4468  * stmmac_resume - resume callback
4469  * @dev: device pointer
4470  * Description: when resume this function is invoked to setup the DMA and CORE
4471  * in a usable state.
4472  */
4473 int stmmac_resume(struct device *dev)
4474 {
4475 	struct net_device *ndev = dev_get_drvdata(dev);
4476 	struct stmmac_priv *priv = netdev_priv(ndev);
4477 
4478 	if (!netif_running(ndev))
4479 		return 0;
4480 
4481 	/* Power Down bit, into the PM register, is cleared
4482 	 * automatically as soon as a magic packet or a Wake-up frame
4483 	 * is received. Anyway, it's better to manually clear
4484 	 * this bit because it can generate problems while resuming
4485 	 * from another devices (e.g. serial console).
4486 	 */
4487 	if (device_may_wakeup(priv->device)) {
4488 		mutex_lock(&priv->lock);
4489 		stmmac_pmt(priv, priv->hw, 0);
4490 		mutex_unlock(&priv->lock);
4491 		priv->irq_wake = 0;
4492 	} else {
4493 		pinctrl_pm_select_default_state(priv->device);
4494 		/* enable the clk previously disabled */
4495 		clk_enable(priv->plat->stmmac_clk);
4496 		clk_enable(priv->plat->pclk);
4497 		/* reset the phy so that it's ready */
4498 		if (priv->mii)
4499 			stmmac_mdio_reset(priv->mii);
4500 	}
4501 
4502 	netif_device_attach(ndev);
4503 
4504 	mutex_lock(&priv->lock);
4505 
4506 	stmmac_reset_queues_param(priv);
4507 
4508 	stmmac_clear_descriptors(priv);
4509 
4510 	stmmac_hw_setup(ndev, false);
4511 	stmmac_init_tx_coalesce(priv);
4512 	stmmac_set_rx_mode(ndev);
4513 
4514 	stmmac_enable_all_queues(priv);
4515 
4516 	stmmac_start_all_queues(priv);
4517 
4518 	mutex_unlock(&priv->lock);
4519 
4520 	if (ndev->phydev)
4521 		phy_start(ndev->phydev);
4522 
4523 	return 0;
4524 }
4525 EXPORT_SYMBOL_GPL(stmmac_resume);
4526 
4527 #ifndef MODULE
4528 static int __init stmmac_cmdline_opt(char *str)
4529 {
4530 	char *opt;
4531 
4532 	if (!str || !*str)
4533 		return -EINVAL;
4534 	while ((opt = strsep(&str, ",")) != NULL) {
4535 		if (!strncmp(opt, "debug:", 6)) {
4536 			if (kstrtoint(opt + 6, 0, &debug))
4537 				goto err;
4538 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4539 			if (kstrtoint(opt + 8, 0, &phyaddr))
4540 				goto err;
4541 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4542 			if (kstrtoint(opt + 7, 0, &buf_sz))
4543 				goto err;
4544 		} else if (!strncmp(opt, "tc:", 3)) {
4545 			if (kstrtoint(opt + 3, 0, &tc))
4546 				goto err;
4547 		} else if (!strncmp(opt, "watchdog:", 9)) {
4548 			if (kstrtoint(opt + 9, 0, &watchdog))
4549 				goto err;
4550 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4551 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4552 				goto err;
4553 		} else if (!strncmp(opt, "pause:", 6)) {
4554 			if (kstrtoint(opt + 6, 0, &pause))
4555 				goto err;
4556 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4557 			if (kstrtoint(opt + 10, 0, &eee_timer))
4558 				goto err;
4559 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4560 			if (kstrtoint(opt + 11, 0, &chain_mode))
4561 				goto err;
4562 		}
4563 	}
4564 	return 0;
4565 
4566 err:
4567 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4568 	return -EINVAL;
4569 }
4570 
4571 __setup("stmmaceth=", stmmac_cmdline_opt);
4572 #endif /* MODULE */
4573 
4574 static int __init stmmac_init(void)
4575 {
4576 #ifdef CONFIG_DEBUG_FS
4577 	/* Create debugfs main directory if it doesn't exist yet */
4578 	if (!stmmac_fs_dir) {
4579 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4580 
4581 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4582 			pr_err("ERROR %s, debugfs create directory failed\n",
4583 			       STMMAC_RESOURCE_NAME);
4584 
4585 			return -ENOMEM;
4586 		}
4587 	}
4588 #endif
4589 
4590 	return 0;
4591 }
4592 
4593 static void __exit stmmac_exit(void)
4594 {
4595 #ifdef CONFIG_DEBUG_FS
4596 	debugfs_remove_recursive(stmmac_fs_dir);
4597 #endif
4598 }
4599 
4600 module_init(stmmac_init)
4601 module_exit(stmmac_exit)
4602 
4603 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4604 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4605 MODULE_LICENSE("GPL");
4606