1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	bool ret = false;
385 
386 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388 	    !phy_interface_mode_is_rgmii(interface))
389 		goto out;
390 
391 	/* Using PCS we cannot dial with the phy registers at this stage
392 	 * so we do not support extra feature like EEE.
393 	 */
394 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397 		goto out;
398 
399 	/* MAC core supports the EEE feature. */
400 	if (priv->dma_cap.eee) {
401 		int tx_lpi_timer = priv->tx_lpi_timer;
402 
403 		/* Check if the PHY supports EEE */
404 		if (phy_init_eee(ndev->phydev, 1)) {
405 			/* To manage at run-time if the EEE cannot be supported
406 			 * anymore (for example because the lp caps have been
407 			 * changed).
408 			 * In that case the driver disable own timers.
409 			 */
410 			mutex_lock(&priv->lock);
411 			if (priv->eee_active) {
412 				netdev_dbg(priv->dev, "disable EEE\n");
413 				del_timer_sync(&priv->eee_ctrl_timer);
414 				stmmac_set_eee_timer(priv, priv->hw, 0,
415 						tx_lpi_timer);
416 			}
417 			priv->eee_active = 0;
418 			mutex_unlock(&priv->lock);
419 			goto out;
420 		}
421 		/* Activate the EEE and start timers */
422 		mutex_lock(&priv->lock);
423 		if (!priv->eee_active) {
424 			priv->eee_active = 1;
425 			timer_setup(&priv->eee_ctrl_timer,
426 				    stmmac_eee_ctrl_timer, 0);
427 			mod_timer(&priv->eee_ctrl_timer,
428 				  STMMAC_LPI_T(eee_timer));
429 
430 			stmmac_set_eee_timer(priv, priv->hw,
431 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 		}
433 		/* Set HW EEE according to the speed */
434 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435 
436 		ret = true;
437 		mutex_unlock(&priv->lock);
438 
439 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 	}
441 out:
442 	return ret;
443 }
444 
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 				   struct dma_desc *p, struct sk_buff *skb)
455 {
456 	struct skb_shared_hwtstamps shhwtstamp;
457 	u64 ns;
458 
459 	if (!priv->hwts_tx_en)
460 		return;
461 
462 	/* exit if skb doesn't support hw tstamp */
463 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 		return;
465 
466 	/* check tx tstamp status */
467 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468 		/* get the valid tstamp */
469 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470 
471 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473 
474 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475 		/* pass tstamp to stack */
476 		skb_tstamp_tx(skb, &shhwtstamp);
477 	}
478 
479 	return;
480 }
481 
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 				   struct dma_desc *np, struct sk_buff *skb)
493 {
494 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 	struct dma_desc *desc = p;
496 	u64 ns;
497 
498 	if (!priv->hwts_rx_en)
499 		return;
500 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501 	if (priv->plat->has_gmac4)
502 		desc = np;
503 
504 	/* Check if timestamp is available */
505 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 		shhwtstamp = skb_hwtstamps(skb);
509 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 	} else  {
512 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 	}
514 }
515 
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529 	struct stmmac_priv *priv = netdev_priv(dev);
530 	struct hwtstamp_config config;
531 	struct timespec64 now;
532 	u64 temp = 0;
533 	u32 ptp_v2 = 0;
534 	u32 tstamp_all = 0;
535 	u32 ptp_over_ipv4_udp = 0;
536 	u32 ptp_over_ipv6_udp = 0;
537 	u32 ptp_over_ethernet = 0;
538 	u32 snap_type_sel = 0;
539 	u32 ts_master_en = 0;
540 	u32 ts_event_en = 0;
541 	u32 value = 0;
542 	u32 sec_inc;
543 
544 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546 		priv->hwts_tx_en = 0;
547 		priv->hwts_rx_en = 0;
548 
549 		return -EOPNOTSUPP;
550 	}
551 
552 	if (copy_from_user(&config, ifr->ifr_data,
553 			   sizeof(struct hwtstamp_config)))
554 		return -EFAULT;
555 
556 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 		   __func__, config.flags, config.tx_type, config.rx_filter);
558 
559 	/* reserved for future extensions */
560 	if (config.flags)
561 		return -EINVAL;
562 
563 	if (config.tx_type != HWTSTAMP_TX_OFF &&
564 	    config.tx_type != HWTSTAMP_TX_ON)
565 		return -ERANGE;
566 
567 	if (priv->adv_ts) {
568 		switch (config.rx_filter) {
569 		case HWTSTAMP_FILTER_NONE:
570 			/* time stamp no incoming packet at all */
571 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572 			break;
573 
574 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575 			/* PTP v1, UDP, any kind of event packet */
576 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 			/* take time stamp for all event messages */
578 			if (priv->plat->has_gmac4)
579 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 			else
581 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588 			/* PTP v1, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590 			/* take time stamp for SYNC messages only */
591 			ts_event_en = PTP_TCR_TSEVNTENA;
592 
593 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 			break;
596 
597 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598 			/* PTP v1, UDP, Delay_req packet */
599 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600 			/* take time stamp for Delay_Req messages only */
601 			ts_master_en = PTP_TCR_TSMSTRENA;
602 			ts_event_en = PTP_TCR_TSEVNTENA;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609 			/* PTP v2, UDP, any kind of event packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for all event messages */
613 			if (priv->plat->has_gmac4)
614 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 			else
616 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617 
618 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623 			/* PTP v2, UDP, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634 			/* PTP v2, UDP, Delay_req packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for Delay_Req messages only */
638 			ts_master_en = PTP_TCR_TSMSTRENA;
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for all event messages */
650 			if (priv->plat->has_gmac4)
651 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 			else
653 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 
655 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661 			/* PTP v2/802.AS1, any layer, Sync packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for SYNC messages only */
665 			ts_event_en = PTP_TCR_TSEVNTENA;
666 
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			/* take time stamp for Delay_Req messages only */
677 			ts_master_en = PTP_TCR_TSMSTRENA;
678 			ts_event_en = PTP_TCR_TSEVNTENA;
679 
680 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_NTP_ALL:
686 		case HWTSTAMP_FILTER_ALL:
687 			/* time stamp any incoming packet */
688 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689 			tstamp_all = PTP_TCR_TSENALL;
690 			break;
691 
692 		default:
693 			return -ERANGE;
694 		}
695 	} else {
696 		switch (config.rx_filter) {
697 		case HWTSTAMP_FILTER_NONE:
698 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699 			break;
700 		default:
701 			/* PTP v1, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 			break;
704 		}
705 	}
706 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708 
709 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 	else {
712 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 			 ts_master_en | snap_type_sel);
716 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717 
718 		/* program Sub Second Increment reg */
719 		stmmac_config_sub_second_increment(priv,
720 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721 				priv->plat->has_gmac4, &sec_inc);
722 		temp = div_u64(1000000000ULL, sec_inc);
723 
724 		/* calculate default added value:
725 		 * formula is :
726 		 * addend = (2^32)/freq_div_ratio;
727 		 * where, freq_div_ratio = 1e9ns/sec_inc
728 		 */
729 		temp = (u64)(temp << 32);
730 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
731 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
732 
733 		/* initialize system time */
734 		ktime_get_real_ts64(&now);
735 
736 		/* lower 32 bits of tv_sec are safe until y2106 */
737 		stmmac_init_systime(priv, priv->ptpaddr,
738 				(u32)now.tv_sec, now.tv_nsec);
739 	}
740 
741 	return copy_to_user(ifr->ifr_data, &config,
742 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
743 }
744 
745 /**
746  * stmmac_init_ptp - init PTP
747  * @priv: driver private structure
748  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
749  * This is done by looking at the HW cap. register.
750  * This function also registers the ptp driver.
751  */
752 static int stmmac_init_ptp(struct stmmac_priv *priv)
753 {
754 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
755 		return -EOPNOTSUPP;
756 
757 	priv->adv_ts = 0;
758 	/* Check if adv_ts can be enabled for dwmac 4.x core */
759 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
760 		priv->adv_ts = 1;
761 	/* Dwmac 3.x core with extend_desc can support adv_ts */
762 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
763 		priv->adv_ts = 1;
764 
765 	if (priv->dma_cap.time_stamp)
766 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
767 
768 	if (priv->adv_ts)
769 		netdev_info(priv->dev,
770 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
771 
772 	priv->hwts_tx_en = 0;
773 	priv->hwts_rx_en = 0;
774 
775 	stmmac_ptp_register(priv);
776 
777 	return 0;
778 }
779 
780 static void stmmac_release_ptp(struct stmmac_priv *priv)
781 {
782 	if (priv->plat->clk_ptp_ref)
783 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
784 	stmmac_ptp_unregister(priv);
785 }
786 
787 /**
788  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
789  *  @priv: driver private structure
790  *  Description: It is used for configuring the flow control in all queues
791  */
792 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
793 {
794 	u32 tx_cnt = priv->plat->tx_queues_to_use;
795 
796 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
797 			priv->pause, tx_cnt);
798 }
799 
800 /**
801  * stmmac_adjust_link - adjusts the link parameters
802  * @dev: net device structure
803  * Description: this is the helper called by the physical abstraction layer
804  * drivers to communicate the phy link status. According the speed and duplex
805  * this driver can invoke registered glue-logic as well.
806  * It also invoke the eee initialization because it could happen when switch
807  * on different networks (that are eee capable).
808  */
809 static void stmmac_adjust_link(struct net_device *dev)
810 {
811 	struct stmmac_priv *priv = netdev_priv(dev);
812 	struct phy_device *phydev = dev->phydev;
813 	bool new_state = false;
814 
815 	if (!phydev)
816 		return;
817 
818 	mutex_lock(&priv->lock);
819 
820 	if (phydev->link) {
821 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
822 
823 		/* Now we make sure that we can be in full duplex mode.
824 		 * If not, we operate in half-duplex mode. */
825 		if (phydev->duplex != priv->oldduplex) {
826 			new_state = true;
827 			if (!phydev->duplex)
828 				ctrl &= ~priv->hw->link.duplex;
829 			else
830 				ctrl |= priv->hw->link.duplex;
831 			priv->oldduplex = phydev->duplex;
832 		}
833 		/* Flow Control operation */
834 		if (phydev->pause)
835 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
836 
837 		if (phydev->speed != priv->speed) {
838 			new_state = true;
839 			ctrl &= ~priv->hw->link.speed_mask;
840 			switch (phydev->speed) {
841 			case SPEED_1000:
842 				ctrl |= priv->hw->link.speed1000;
843 				break;
844 			case SPEED_100:
845 				ctrl |= priv->hw->link.speed100;
846 				break;
847 			case SPEED_10:
848 				ctrl |= priv->hw->link.speed10;
849 				break;
850 			default:
851 				netif_warn(priv, link, priv->dev,
852 					   "broken speed: %d\n", phydev->speed);
853 				phydev->speed = SPEED_UNKNOWN;
854 				break;
855 			}
856 			if (phydev->speed != SPEED_UNKNOWN)
857 				stmmac_hw_fix_mac_speed(priv);
858 			priv->speed = phydev->speed;
859 		}
860 
861 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
862 
863 		if (!priv->oldlink) {
864 			new_state = true;
865 			priv->oldlink = true;
866 		}
867 	} else if (priv->oldlink) {
868 		new_state = true;
869 		priv->oldlink = false;
870 		priv->speed = SPEED_UNKNOWN;
871 		priv->oldduplex = DUPLEX_UNKNOWN;
872 	}
873 
874 	if (new_state && netif_msg_link(priv))
875 		phy_print_status(phydev);
876 
877 	mutex_unlock(&priv->lock);
878 
879 	if (phydev->is_pseudo_fixed_link)
880 		/* Stop PHY layer to call the hook to adjust the link in case
881 		 * of a switch is attached to the stmmac driver.
882 		 */
883 		phydev->irq = PHY_IGNORE_INTERRUPT;
884 	else
885 		/* At this stage, init the EEE if supported.
886 		 * Never called in case of fixed_link.
887 		 */
888 		priv->eee_enabled = stmmac_eee_init(priv);
889 }
890 
891 /**
892  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
893  * @priv: driver private structure
894  * Description: this is to verify if the HW supports the PCS.
895  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
896  * configured for the TBI, RTBI, or SGMII PHY interface.
897  */
898 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
899 {
900 	int interface = priv->plat->interface;
901 
902 	if (priv->dma_cap.pcs) {
903 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
904 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
905 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
906 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
907 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
908 			priv->hw->pcs = STMMAC_PCS_RGMII;
909 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
910 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
911 			priv->hw->pcs = STMMAC_PCS_SGMII;
912 		}
913 	}
914 }
915 
916 /**
917  * stmmac_init_phy - PHY initialization
918  * @dev: net device structure
919  * Description: it initializes the driver's PHY state, and attaches the PHY
920  * to the mac driver.
921  *  Return value:
922  *  0 on success
923  */
924 static int stmmac_init_phy(struct net_device *dev)
925 {
926 	struct stmmac_priv *priv = netdev_priv(dev);
927 	struct phy_device *phydev;
928 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
929 	char bus_id[MII_BUS_ID_SIZE];
930 	int interface = priv->plat->interface;
931 	int max_speed = priv->plat->max_speed;
932 	priv->oldlink = false;
933 	priv->speed = SPEED_UNKNOWN;
934 	priv->oldduplex = DUPLEX_UNKNOWN;
935 
936 	if (priv->plat->phy_node) {
937 		phydev = of_phy_connect(dev, priv->plat->phy_node,
938 					&stmmac_adjust_link, 0, interface);
939 	} else {
940 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
941 			 priv->plat->bus_id);
942 
943 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
944 			 priv->plat->phy_addr);
945 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
946 			   phy_id_fmt);
947 
948 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
949 				     interface);
950 	}
951 
952 	if (IS_ERR_OR_NULL(phydev)) {
953 		netdev_err(priv->dev, "Could not attach to PHY\n");
954 		if (!phydev)
955 			return -ENODEV;
956 
957 		return PTR_ERR(phydev);
958 	}
959 
960 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
961 	if ((interface == PHY_INTERFACE_MODE_MII) ||
962 	    (interface == PHY_INTERFACE_MODE_RMII) ||
963 		(max_speed < 1000 && max_speed > 0))
964 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
965 					 SUPPORTED_1000baseT_Full);
966 
967 	/*
968 	 * Broken HW is sometimes missing the pull-up resistor on the
969 	 * MDIO line, which results in reads to non-existent devices returning
970 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
971 	 * device as well.
972 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
973 	 */
974 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
975 		phy_disconnect(phydev);
976 		return -ENODEV;
977 	}
978 
979 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
980 	 * subsequent PHY polling, make sure we force a link transition if
981 	 * we have a UP/DOWN/UP transition
982 	 */
983 	if (phydev->is_pseudo_fixed_link)
984 		phydev->irq = PHY_POLL;
985 
986 	phy_attached_info(phydev);
987 	return 0;
988 }
989 
990 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
991 {
992 	u32 rx_cnt = priv->plat->rx_queues_to_use;
993 	void *head_rx;
994 	u32 queue;
995 
996 	/* Display RX rings */
997 	for (queue = 0; queue < rx_cnt; queue++) {
998 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
999 
1000 		pr_info("\tRX Queue %u rings\n", queue);
1001 
1002 		if (priv->extend_desc)
1003 			head_rx = (void *)rx_q->dma_erx;
1004 		else
1005 			head_rx = (void *)rx_q->dma_rx;
1006 
1007 		/* Display RX ring */
1008 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1009 	}
1010 }
1011 
1012 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1013 {
1014 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1015 	void *head_tx;
1016 	u32 queue;
1017 
1018 	/* Display TX rings */
1019 	for (queue = 0; queue < tx_cnt; queue++) {
1020 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1021 
1022 		pr_info("\tTX Queue %d rings\n", queue);
1023 
1024 		if (priv->extend_desc)
1025 			head_tx = (void *)tx_q->dma_etx;
1026 		else
1027 			head_tx = (void *)tx_q->dma_tx;
1028 
1029 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1030 	}
1031 }
1032 
1033 static void stmmac_display_rings(struct stmmac_priv *priv)
1034 {
1035 	/* Display RX ring */
1036 	stmmac_display_rx_rings(priv);
1037 
1038 	/* Display TX ring */
1039 	stmmac_display_tx_rings(priv);
1040 }
1041 
1042 static int stmmac_set_bfsize(int mtu, int bufsize)
1043 {
1044 	int ret = bufsize;
1045 
1046 	if (mtu >= BUF_SIZE_4KiB)
1047 		ret = BUF_SIZE_8KiB;
1048 	else if (mtu >= BUF_SIZE_2KiB)
1049 		ret = BUF_SIZE_4KiB;
1050 	else if (mtu > DEFAULT_BUFSIZE)
1051 		ret = BUF_SIZE_2KiB;
1052 	else
1053 		ret = DEFAULT_BUFSIZE;
1054 
1055 	return ret;
1056 }
1057 
1058 /**
1059  * stmmac_clear_rx_descriptors - clear RX descriptors
1060  * @priv: driver private structure
1061  * @queue: RX queue index
1062  * Description: this function is called to clear the RX descriptors
1063  * in case of both basic and extended descriptors are used.
1064  */
1065 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1066 {
1067 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1068 	int i;
1069 
1070 	/* Clear the RX descriptors */
1071 	for (i = 0; i < DMA_RX_SIZE; i++)
1072 		if (priv->extend_desc)
1073 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1074 					priv->use_riwt, priv->mode,
1075 					(i == DMA_RX_SIZE - 1));
1076 		else
1077 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1078 					priv->use_riwt, priv->mode,
1079 					(i == DMA_RX_SIZE - 1));
1080 }
1081 
1082 /**
1083  * stmmac_clear_tx_descriptors - clear tx descriptors
1084  * @priv: driver private structure
1085  * @queue: TX queue index.
1086  * Description: this function is called to clear the TX descriptors
1087  * in case of both basic and extended descriptors are used.
1088  */
1089 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1090 {
1091 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1092 	int i;
1093 
1094 	/* Clear the TX descriptors */
1095 	for (i = 0; i < DMA_TX_SIZE; i++)
1096 		if (priv->extend_desc)
1097 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1098 					priv->mode, (i == DMA_TX_SIZE - 1));
1099 		else
1100 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1101 					priv->mode, (i == DMA_TX_SIZE - 1));
1102 }
1103 
1104 /**
1105  * stmmac_clear_descriptors - clear descriptors
1106  * @priv: driver private structure
1107  * Description: this function is called to clear the TX and RX descriptors
1108  * in case of both basic and extended descriptors are used.
1109  */
1110 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1111 {
1112 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1113 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1114 	u32 queue;
1115 
1116 	/* Clear the RX descriptors */
1117 	for (queue = 0; queue < rx_queue_cnt; queue++)
1118 		stmmac_clear_rx_descriptors(priv, queue);
1119 
1120 	/* Clear the TX descriptors */
1121 	for (queue = 0; queue < tx_queue_cnt; queue++)
1122 		stmmac_clear_tx_descriptors(priv, queue);
1123 }
1124 
1125 /**
1126  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1127  * @priv: driver private structure
1128  * @p: descriptor pointer
1129  * @i: descriptor index
1130  * @flags: gfp flag
1131  * @queue: RX queue index
1132  * Description: this function is called to allocate a receive buffer, perform
1133  * the DMA mapping and init the descriptor.
1134  */
1135 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1136 				  int i, gfp_t flags, u32 queue)
1137 {
1138 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1139 	struct sk_buff *skb;
1140 
1141 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1142 	if (!skb) {
1143 		netdev_err(priv->dev,
1144 			   "%s: Rx init fails; skb is NULL\n", __func__);
1145 		return -ENOMEM;
1146 	}
1147 	rx_q->rx_skbuff[i] = skb;
1148 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1149 						priv->dma_buf_sz,
1150 						DMA_FROM_DEVICE);
1151 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1152 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1153 		dev_kfree_skb_any(skb);
1154 		return -EINVAL;
1155 	}
1156 
1157 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1158 
1159 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1160 		stmmac_init_desc3(priv, p);
1161 
1162 	return 0;
1163 }
1164 
1165 /**
1166  * stmmac_free_rx_buffer - free RX dma buffers
1167  * @priv: private structure
1168  * @queue: RX queue index
1169  * @i: buffer index.
1170  */
1171 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1172 {
1173 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1174 
1175 	if (rx_q->rx_skbuff[i]) {
1176 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1177 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1178 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1179 	}
1180 	rx_q->rx_skbuff[i] = NULL;
1181 }
1182 
1183 /**
1184  * stmmac_free_tx_buffer - free RX dma buffers
1185  * @priv: private structure
1186  * @queue: RX queue index
1187  * @i: buffer index.
1188  */
1189 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1190 {
1191 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1192 
1193 	if (tx_q->tx_skbuff_dma[i].buf) {
1194 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1195 			dma_unmap_page(priv->device,
1196 				       tx_q->tx_skbuff_dma[i].buf,
1197 				       tx_q->tx_skbuff_dma[i].len,
1198 				       DMA_TO_DEVICE);
1199 		else
1200 			dma_unmap_single(priv->device,
1201 					 tx_q->tx_skbuff_dma[i].buf,
1202 					 tx_q->tx_skbuff_dma[i].len,
1203 					 DMA_TO_DEVICE);
1204 	}
1205 
1206 	if (tx_q->tx_skbuff[i]) {
1207 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1208 		tx_q->tx_skbuff[i] = NULL;
1209 		tx_q->tx_skbuff_dma[i].buf = 0;
1210 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1211 	}
1212 }
1213 
1214 /**
1215  * init_dma_rx_desc_rings - init the RX descriptor rings
1216  * @dev: net device structure
1217  * @flags: gfp flag.
1218  * Description: this function initializes the DMA RX descriptors
1219  * and allocates the socket buffers. It supports the chained and ring
1220  * modes.
1221  */
1222 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1223 {
1224 	struct stmmac_priv *priv = netdev_priv(dev);
1225 	u32 rx_count = priv->plat->rx_queues_to_use;
1226 	int ret = -ENOMEM;
1227 	int bfsize = 0;
1228 	int queue;
1229 	int i;
1230 
1231 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1232 	if (bfsize < 0)
1233 		bfsize = 0;
1234 
1235 	if (bfsize < BUF_SIZE_16KiB)
1236 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1237 
1238 	priv->dma_buf_sz = bfsize;
1239 
1240 	/* RX INITIALIZATION */
1241 	netif_dbg(priv, probe, priv->dev,
1242 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1243 
1244 	for (queue = 0; queue < rx_count; queue++) {
1245 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1246 
1247 		netif_dbg(priv, probe, priv->dev,
1248 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1249 			  (u32)rx_q->dma_rx_phy);
1250 
1251 		for (i = 0; i < DMA_RX_SIZE; i++) {
1252 			struct dma_desc *p;
1253 
1254 			if (priv->extend_desc)
1255 				p = &((rx_q->dma_erx + i)->basic);
1256 			else
1257 				p = rx_q->dma_rx + i;
1258 
1259 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1260 						     queue);
1261 			if (ret)
1262 				goto err_init_rx_buffers;
1263 
1264 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1265 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1266 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1267 		}
1268 
1269 		rx_q->cur_rx = 0;
1270 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1271 
1272 		stmmac_clear_rx_descriptors(priv, queue);
1273 
1274 		/* Setup the chained descriptor addresses */
1275 		if (priv->mode == STMMAC_CHAIN_MODE) {
1276 			if (priv->extend_desc)
1277 				stmmac_mode_init(priv, rx_q->dma_erx,
1278 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1279 			else
1280 				stmmac_mode_init(priv, rx_q->dma_rx,
1281 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1282 		}
1283 	}
1284 
1285 	buf_sz = bfsize;
1286 
1287 	return 0;
1288 
1289 err_init_rx_buffers:
1290 	while (queue >= 0) {
1291 		while (--i >= 0)
1292 			stmmac_free_rx_buffer(priv, queue, i);
1293 
1294 		if (queue == 0)
1295 			break;
1296 
1297 		i = DMA_RX_SIZE;
1298 		queue--;
1299 	}
1300 
1301 	return ret;
1302 }
1303 
1304 /**
1305  * init_dma_tx_desc_rings - init the TX descriptor rings
1306  * @dev: net device structure.
1307  * Description: this function initializes the DMA TX descriptors
1308  * and allocates the socket buffers. It supports the chained and ring
1309  * modes.
1310  */
1311 static int init_dma_tx_desc_rings(struct net_device *dev)
1312 {
1313 	struct stmmac_priv *priv = netdev_priv(dev);
1314 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1315 	u32 queue;
1316 	int i;
1317 
1318 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1319 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1320 
1321 		netif_dbg(priv, probe, priv->dev,
1322 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1323 			 (u32)tx_q->dma_tx_phy);
1324 
1325 		/* Setup the chained descriptor addresses */
1326 		if (priv->mode == STMMAC_CHAIN_MODE) {
1327 			if (priv->extend_desc)
1328 				stmmac_mode_init(priv, tx_q->dma_etx,
1329 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1330 			else
1331 				stmmac_mode_init(priv, tx_q->dma_tx,
1332 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1333 		}
1334 
1335 		for (i = 0; i < DMA_TX_SIZE; i++) {
1336 			struct dma_desc *p;
1337 			if (priv->extend_desc)
1338 				p = &((tx_q->dma_etx + i)->basic);
1339 			else
1340 				p = tx_q->dma_tx + i;
1341 
1342 			stmmac_clear_desc(priv, p);
1343 
1344 			tx_q->tx_skbuff_dma[i].buf = 0;
1345 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1346 			tx_q->tx_skbuff_dma[i].len = 0;
1347 			tx_q->tx_skbuff_dma[i].last_segment = false;
1348 			tx_q->tx_skbuff[i] = NULL;
1349 		}
1350 
1351 		tx_q->dirty_tx = 0;
1352 		tx_q->cur_tx = 0;
1353 		tx_q->mss = 0;
1354 
1355 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 /**
1362  * init_dma_desc_rings - init the RX/TX descriptor rings
1363  * @dev: net device structure
1364  * @flags: gfp flag.
1365  * Description: this function initializes the DMA RX/TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370 {
1371 	struct stmmac_priv *priv = netdev_priv(dev);
1372 	int ret;
1373 
1374 	ret = init_dma_rx_desc_rings(dev, flags);
1375 	if (ret)
1376 		return ret;
1377 
1378 	ret = init_dma_tx_desc_rings(dev);
1379 
1380 	stmmac_clear_descriptors(priv);
1381 
1382 	if (netif_msg_hw(priv))
1383 		stmmac_display_rings(priv);
1384 
1385 	return ret;
1386 }
1387 
1388 /**
1389  * dma_free_rx_skbufs - free RX dma buffers
1390  * @priv: private structure
1391  * @queue: RX queue index
1392  */
1393 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1394 {
1395 	int i;
1396 
1397 	for (i = 0; i < DMA_RX_SIZE; i++)
1398 		stmmac_free_rx_buffer(priv, queue, i);
1399 }
1400 
1401 /**
1402  * dma_free_tx_skbufs - free TX dma buffers
1403  * @priv: private structure
1404  * @queue: TX queue index
1405  */
1406 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < DMA_TX_SIZE; i++)
1411 		stmmac_free_tx_buffer(priv, queue, i);
1412 }
1413 
1414 /**
1415  * free_dma_rx_desc_resources - free RX dma desc resources
1416  * @priv: private structure
1417  */
1418 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419 {
1420 	u32 rx_count = priv->plat->rx_queues_to_use;
1421 	u32 queue;
1422 
1423 	/* Free RX queue resources */
1424 	for (queue = 0; queue < rx_count; queue++) {
1425 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426 
1427 		/* Release the DMA RX socket buffers */
1428 		dma_free_rx_skbufs(priv, queue);
1429 
1430 		/* Free DMA regions of consistent memory previously allocated */
1431 		if (!priv->extend_desc)
1432 			dma_free_coherent(priv->device,
1433 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1434 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1435 		else
1436 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1437 					  sizeof(struct dma_extended_desc),
1438 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1439 
1440 		kfree(rx_q->rx_skbuff_dma);
1441 		kfree(rx_q->rx_skbuff);
1442 	}
1443 }
1444 
1445 /**
1446  * free_dma_tx_desc_resources - free TX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450 {
1451 	u32 tx_count = priv->plat->tx_queues_to_use;
1452 	u32 queue;
1453 
1454 	/* Free TX queue resources */
1455 	for (queue = 0; queue < tx_count; queue++) {
1456 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457 
1458 		/* Release the DMA TX socket buffers */
1459 		dma_free_tx_skbufs(priv, queue);
1460 
1461 		/* Free DMA regions of consistent memory previously allocated */
1462 		if (!priv->extend_desc)
1463 			dma_free_coherent(priv->device,
1464 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1465 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1466 		else
1467 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1468 					  sizeof(struct dma_extended_desc),
1469 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1470 
1471 		kfree(tx_q->tx_skbuff_dma);
1472 		kfree(tx_q->tx_skbuff);
1473 	}
1474 }
1475 
1476 /**
1477  * alloc_dma_rx_desc_resources - alloc RX resources.
1478  * @priv: private structure
1479  * Description: according to which descriptor can be used (extend or basic)
1480  * this function allocates the resources for TX and RX paths. In case of
1481  * reception, for example, it pre-allocated the RX socket buffer in order to
1482  * allow zero-copy mechanism.
1483  */
1484 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 rx_count = priv->plat->rx_queues_to_use;
1487 	int ret = -ENOMEM;
1488 	u32 queue;
1489 
1490 	/* RX queues buffers and DMA */
1491 	for (queue = 0; queue < rx_count; queue++) {
1492 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1493 
1494 		rx_q->queue_index = queue;
1495 		rx_q->priv_data = priv;
1496 
1497 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498 						    sizeof(dma_addr_t),
1499 						    GFP_KERNEL);
1500 		if (!rx_q->rx_skbuff_dma)
1501 			goto err_dma;
1502 
1503 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504 						sizeof(struct sk_buff *),
1505 						GFP_KERNEL);
1506 		if (!rx_q->rx_skbuff)
1507 			goto err_dma;
1508 
1509 		if (priv->extend_desc) {
1510 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511 							    DMA_RX_SIZE *
1512 							    sizeof(struct
1513 							    dma_extended_desc),
1514 							    &rx_q->dma_rx_phy,
1515 							    GFP_KERNEL);
1516 			if (!rx_q->dma_erx)
1517 				goto err_dma;
1518 
1519 		} else {
1520 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521 							   DMA_RX_SIZE *
1522 							   sizeof(struct
1523 							   dma_desc),
1524 							   &rx_q->dma_rx_phy,
1525 							   GFP_KERNEL);
1526 			if (!rx_q->dma_rx)
1527 				goto err_dma;
1528 		}
1529 	}
1530 
1531 	return 0;
1532 
1533 err_dma:
1534 	free_dma_rx_desc_resources(priv);
1535 
1536 	return ret;
1537 }
1538 
1539 /**
1540  * alloc_dma_tx_desc_resources - alloc TX resources.
1541  * @priv: private structure
1542  * Description: according to which descriptor can be used (extend or basic)
1543  * this function allocates the resources for TX and RX paths. In case of
1544  * reception, for example, it pre-allocated the RX socket buffer in order to
1545  * allow zero-copy mechanism.
1546  */
1547 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548 {
1549 	u32 tx_count = priv->plat->tx_queues_to_use;
1550 	int ret = -ENOMEM;
1551 	u32 queue;
1552 
1553 	/* TX queues buffers and DMA */
1554 	for (queue = 0; queue < tx_count; queue++) {
1555 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1556 
1557 		tx_q->queue_index = queue;
1558 		tx_q->priv_data = priv;
1559 
1560 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561 						    sizeof(*tx_q->tx_skbuff_dma),
1562 						    GFP_KERNEL);
1563 		if (!tx_q->tx_skbuff_dma)
1564 			goto err_dma;
1565 
1566 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567 						sizeof(struct sk_buff *),
1568 						GFP_KERNEL);
1569 		if (!tx_q->tx_skbuff)
1570 			goto err_dma;
1571 
1572 		if (priv->extend_desc) {
1573 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574 							    DMA_TX_SIZE *
1575 							    sizeof(struct
1576 							    dma_extended_desc),
1577 							    &tx_q->dma_tx_phy,
1578 							    GFP_KERNEL);
1579 			if (!tx_q->dma_etx)
1580 				goto err_dma;
1581 		} else {
1582 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583 							   DMA_TX_SIZE *
1584 							   sizeof(struct
1585 								  dma_desc),
1586 							   &tx_q->dma_tx_phy,
1587 							   GFP_KERNEL);
1588 			if (!tx_q->dma_tx)
1589 				goto err_dma;
1590 		}
1591 	}
1592 
1593 	return 0;
1594 
1595 err_dma:
1596 	free_dma_tx_desc_resources(priv);
1597 
1598 	return ret;
1599 }
1600 
1601 /**
1602  * alloc_dma_desc_resources - alloc TX/RX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610 {
1611 	/* RX Allocation */
1612 	int ret = alloc_dma_rx_desc_resources(priv);
1613 
1614 	if (ret)
1615 		return ret;
1616 
1617 	ret = alloc_dma_tx_desc_resources(priv);
1618 
1619 	return ret;
1620 }
1621 
1622 /**
1623  * free_dma_desc_resources - free dma desc resources
1624  * @priv: private structure
1625  */
1626 static void free_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628 	/* Release the DMA RX socket buffers */
1629 	free_dma_rx_desc_resources(priv);
1630 
1631 	/* Release the DMA TX socket buffers */
1632 	free_dma_tx_desc_resources(priv);
1633 }
1634 
1635 /**
1636  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637  *  @priv: driver private structure
1638  *  Description: It is used for enabling the rx queues in the MAC
1639  */
1640 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641 {
1642 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643 	int queue;
1644 	u8 mode;
1645 
1646 	for (queue = 0; queue < rx_queues_count; queue++) {
1647 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1649 	}
1650 }
1651 
1652 /**
1653  * stmmac_start_rx_dma - start RX DMA channel
1654  * @priv: driver private structure
1655  * @chan: RX channel index
1656  * Description:
1657  * This starts a RX DMA channel
1658  */
1659 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660 {
1661 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662 	stmmac_start_rx(priv, priv->ioaddr, chan);
1663 }
1664 
1665 /**
1666  * stmmac_start_tx_dma - start TX DMA channel
1667  * @priv: driver private structure
1668  * @chan: TX channel index
1669  * Description:
1670  * This starts a TX DMA channel
1671  */
1672 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673 {
1674 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675 	stmmac_start_tx(priv, priv->ioaddr, chan);
1676 }
1677 
1678 /**
1679  * stmmac_stop_rx_dma - stop RX DMA channel
1680  * @priv: driver private structure
1681  * @chan: RX channel index
1682  * Description:
1683  * This stops a RX DMA channel
1684  */
1685 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686 {
1687 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1689 }
1690 
1691 /**
1692  * stmmac_stop_tx_dma - stop TX DMA channel
1693  * @priv: driver private structure
1694  * @chan: TX channel index
1695  * Description:
1696  * This stops a TX DMA channel
1697  */
1698 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699 {
1700 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1702 }
1703 
1704 /**
1705  * stmmac_start_all_dma - start all RX and TX DMA channels
1706  * @priv: driver private structure
1707  * Description:
1708  * This starts all the RX and TX DMA channels
1709  */
1710 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711 {
1712 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714 	u32 chan = 0;
1715 
1716 	for (chan = 0; chan < rx_channels_count; chan++)
1717 		stmmac_start_rx_dma(priv, chan);
1718 
1719 	for (chan = 0; chan < tx_channels_count; chan++)
1720 		stmmac_start_tx_dma(priv, chan);
1721 }
1722 
1723 /**
1724  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This stops the RX and TX DMA channels
1728  */
1729 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730 {
1731 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733 	u32 chan = 0;
1734 
1735 	for (chan = 0; chan < rx_channels_count; chan++)
1736 		stmmac_stop_rx_dma(priv, chan);
1737 
1738 	for (chan = 0; chan < tx_channels_count; chan++)
1739 		stmmac_stop_tx_dma(priv, chan);
1740 }
1741 
1742 /**
1743  *  stmmac_dma_operation_mode - HW DMA operation mode
1744  *  @priv: driver private structure
1745  *  Description: it is used for configuring the DMA operation mode register in
1746  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1747  */
1748 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749 {
1750 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752 	int rxfifosz = priv->plat->rx_fifo_size;
1753 	int txfifosz = priv->plat->tx_fifo_size;
1754 	u32 txmode = 0;
1755 	u32 rxmode = 0;
1756 	u32 chan = 0;
1757 	u8 qmode = 0;
1758 
1759 	if (rxfifosz == 0)
1760 		rxfifosz = priv->dma_cap.rx_fifo_size;
1761 	if (txfifosz == 0)
1762 		txfifosz = priv->dma_cap.tx_fifo_size;
1763 
1764 	/* Adjust for real per queue fifo size */
1765 	rxfifosz /= rx_channels_count;
1766 	txfifosz /= tx_channels_count;
1767 
1768 	if (priv->plat->force_thresh_dma_mode) {
1769 		txmode = tc;
1770 		rxmode = tc;
1771 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1772 		/*
1773 		 * In case of GMAC, SF mode can be enabled
1774 		 * to perform the TX COE in HW. This depends on:
1775 		 * 1) TX COE if actually supported
1776 		 * 2) There is no bugged Jumbo frame support
1777 		 *    that needs to not insert csum in the TDES.
1778 		 */
1779 		txmode = SF_DMA_MODE;
1780 		rxmode = SF_DMA_MODE;
1781 		priv->xstats.threshold = SF_DMA_MODE;
1782 	} else {
1783 		txmode = tc;
1784 		rxmode = SF_DMA_MODE;
1785 	}
1786 
1787 	/* configure all channels */
1788 	for (chan = 0; chan < rx_channels_count; chan++) {
1789 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1790 
1791 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1792 				rxfifosz, qmode);
1793 	}
1794 
1795 	for (chan = 0; chan < tx_channels_count; chan++) {
1796 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1797 
1798 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1799 				txfifosz, qmode);
1800 	}
1801 }
1802 
1803 /**
1804  * stmmac_tx_clean - to manage the transmission completion
1805  * @priv: driver private structure
1806  * @queue: TX queue index
1807  * Description: it reclaims the transmit resources after transmission completes.
1808  */
1809 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1810 {
1811 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1812 	unsigned int bytes_compl = 0, pkts_compl = 0;
1813 	unsigned int entry;
1814 
1815 	netif_tx_lock(priv->dev);
1816 
1817 	priv->xstats.tx_clean++;
1818 
1819 	entry = tx_q->dirty_tx;
1820 	while (entry != tx_q->cur_tx) {
1821 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1822 		struct dma_desc *p;
1823 		int status;
1824 
1825 		if (priv->extend_desc)
1826 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1827 		else
1828 			p = tx_q->dma_tx + entry;
1829 
1830 		status = stmmac_tx_status(priv, &priv->dev->stats,
1831 				&priv->xstats, p, priv->ioaddr);
1832 		/* Check if the descriptor is owned by the DMA */
1833 		if (unlikely(status & tx_dma_own))
1834 			break;
1835 
1836 		/* Make sure descriptor fields are read after reading
1837 		 * the own bit.
1838 		 */
1839 		dma_rmb();
1840 
1841 		/* Just consider the last segment and ...*/
1842 		if (likely(!(status & tx_not_ls))) {
1843 			/* ... verify the status error condition */
1844 			if (unlikely(status & tx_err)) {
1845 				priv->dev->stats.tx_errors++;
1846 			} else {
1847 				priv->dev->stats.tx_packets++;
1848 				priv->xstats.tx_pkt_n++;
1849 			}
1850 			stmmac_get_tx_hwtstamp(priv, p, skb);
1851 		}
1852 
1853 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1854 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1855 				dma_unmap_page(priv->device,
1856 					       tx_q->tx_skbuff_dma[entry].buf,
1857 					       tx_q->tx_skbuff_dma[entry].len,
1858 					       DMA_TO_DEVICE);
1859 			else
1860 				dma_unmap_single(priv->device,
1861 						 tx_q->tx_skbuff_dma[entry].buf,
1862 						 tx_q->tx_skbuff_dma[entry].len,
1863 						 DMA_TO_DEVICE);
1864 			tx_q->tx_skbuff_dma[entry].buf = 0;
1865 			tx_q->tx_skbuff_dma[entry].len = 0;
1866 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1867 		}
1868 
1869 		stmmac_clean_desc3(priv, tx_q, p);
1870 
1871 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1872 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1873 
1874 		if (likely(skb != NULL)) {
1875 			pkts_compl++;
1876 			bytes_compl += skb->len;
1877 			dev_consume_skb_any(skb);
1878 			tx_q->tx_skbuff[entry] = NULL;
1879 		}
1880 
1881 		stmmac_release_tx_desc(priv, p, priv->mode);
1882 
1883 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1884 	}
1885 	tx_q->dirty_tx = entry;
1886 
1887 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1888 				  pkts_compl, bytes_compl);
1889 
1890 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1891 								queue))) &&
1892 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1893 
1894 		netif_dbg(priv, tx_done, priv->dev,
1895 			  "%s: restart transmit\n", __func__);
1896 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1897 	}
1898 
1899 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1900 		stmmac_enable_eee_mode(priv);
1901 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1902 	}
1903 	netif_tx_unlock(priv->dev);
1904 }
1905 
1906 /**
1907  * stmmac_tx_err - to manage the tx error
1908  * @priv: driver private structure
1909  * @chan: channel index
1910  * Description: it cleans the descriptors and restarts the transmission
1911  * in case of transmission errors.
1912  */
1913 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1914 {
1915 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1916 	int i;
1917 
1918 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1919 
1920 	stmmac_stop_tx_dma(priv, chan);
1921 	dma_free_tx_skbufs(priv, chan);
1922 	for (i = 0; i < DMA_TX_SIZE; i++)
1923 		if (priv->extend_desc)
1924 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1925 					priv->mode, (i == DMA_TX_SIZE - 1));
1926 		else
1927 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1928 					priv->mode, (i == DMA_TX_SIZE - 1));
1929 	tx_q->dirty_tx = 0;
1930 	tx_q->cur_tx = 0;
1931 	tx_q->mss = 0;
1932 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1933 	stmmac_start_tx_dma(priv, chan);
1934 
1935 	priv->dev->stats.tx_errors++;
1936 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1937 }
1938 
1939 /**
1940  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1941  *  @priv: driver private structure
1942  *  @txmode: TX operating mode
1943  *  @rxmode: RX operating mode
1944  *  @chan: channel index
1945  *  Description: it is used for configuring of the DMA operation mode in
1946  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1947  *  mode.
1948  */
1949 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1950 					  u32 rxmode, u32 chan)
1951 {
1952 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1953 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1954 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1955 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1956 	int rxfifosz = priv->plat->rx_fifo_size;
1957 	int txfifosz = priv->plat->tx_fifo_size;
1958 
1959 	if (rxfifosz == 0)
1960 		rxfifosz = priv->dma_cap.rx_fifo_size;
1961 	if (txfifosz == 0)
1962 		txfifosz = priv->dma_cap.tx_fifo_size;
1963 
1964 	/* Adjust for real per queue fifo size */
1965 	rxfifosz /= rx_channels_count;
1966 	txfifosz /= tx_channels_count;
1967 
1968 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1969 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1970 }
1971 
1972 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1973 {
1974 	int ret;
1975 
1976 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1977 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1978 	if (ret && (ret != -EINVAL)) {
1979 		stmmac_global_err(priv);
1980 		return true;
1981 	}
1982 
1983 	return false;
1984 }
1985 
1986 /**
1987  * stmmac_dma_interrupt - DMA ISR
1988  * @priv: driver private structure
1989  * Description: this is the DMA ISR. It is called by the main ISR.
1990  * It calls the dwmac dma routine and schedule poll method in case of some
1991  * work can be done.
1992  */
1993 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1994 {
1995 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1996 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
1997 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
1998 				tx_channel_count : rx_channel_count;
1999 	u32 chan;
2000 	bool poll_scheduled = false;
2001 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2002 
2003 	/* Make sure we never check beyond our status buffer. */
2004 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2005 		channels_to_check = ARRAY_SIZE(status);
2006 
2007 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2008 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2009 	 * stmmac_channel struct.
2010 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2011 	 * all tx queues rather than just a single tx queue.
2012 	 */
2013 	for (chan = 0; chan < channels_to_check; chan++)
2014 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2015 				&priv->xstats, chan);
2016 
2017 	for (chan = 0; chan < rx_channel_count; chan++) {
2018 		if (likely(status[chan] & handle_rx)) {
2019 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2020 
2021 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2022 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2023 				__napi_schedule(&rx_q->napi);
2024 				poll_scheduled = true;
2025 			}
2026 		}
2027 	}
2028 
2029 	/* If we scheduled poll, we already know that tx queues will be checked.
2030 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2031 	 * completed transmission, if so, call stmmac_poll (once).
2032 	 */
2033 	if (!poll_scheduled) {
2034 		for (chan = 0; chan < tx_channel_count; chan++) {
2035 			if (status[chan] & handle_tx) {
2036 				/* It doesn't matter what rx queue we choose
2037 				 * here. We use 0 since it always exists.
2038 				 */
2039 				struct stmmac_rx_queue *rx_q =
2040 					&priv->rx_queue[0];
2041 
2042 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2043 					stmmac_disable_dma_irq(priv,
2044 							priv->ioaddr, chan);
2045 					__napi_schedule(&rx_q->napi);
2046 				}
2047 				break;
2048 			}
2049 		}
2050 	}
2051 
2052 	for (chan = 0; chan < tx_channel_count; chan++) {
2053 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2054 			/* Try to bump up the dma threshold on this failure */
2055 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2056 			    (tc <= 256)) {
2057 				tc += 64;
2058 				if (priv->plat->force_thresh_dma_mode)
2059 					stmmac_set_dma_operation_mode(priv,
2060 								      tc,
2061 								      tc,
2062 								      chan);
2063 				else
2064 					stmmac_set_dma_operation_mode(priv,
2065 								    tc,
2066 								    SF_DMA_MODE,
2067 								    chan);
2068 				priv->xstats.threshold = tc;
2069 			}
2070 		} else if (unlikely(status[chan] == tx_hard_error)) {
2071 			stmmac_tx_err(priv, chan);
2072 		}
2073 	}
2074 }
2075 
2076 /**
2077  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2078  * @priv: driver private structure
2079  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2080  */
2081 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2082 {
2083 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2084 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2085 
2086 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2087 
2088 	if (priv->dma_cap.rmon) {
2089 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2090 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2091 	} else
2092 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2093 }
2094 
2095 /**
2096  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2097  * @priv: driver private structure
2098  * Description:
2099  *  new GMAC chip generations have a new register to indicate the
2100  *  presence of the optional feature/functions.
2101  *  This can be also used to override the value passed through the
2102  *  platform and necessary for old MAC10/100 and GMAC chips.
2103  */
2104 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2105 {
2106 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2107 }
2108 
2109 /**
2110  * stmmac_check_ether_addr - check if the MAC addr is valid
2111  * @priv: driver private structure
2112  * Description:
2113  * it is to verify if the MAC address is valid, in case of failures it
2114  * generates a random MAC address
2115  */
2116 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2117 {
2118 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2119 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2120 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2121 			eth_hw_addr_random(priv->dev);
2122 		netdev_info(priv->dev, "device MAC address %pM\n",
2123 			    priv->dev->dev_addr);
2124 	}
2125 }
2126 
2127 /**
2128  * stmmac_init_dma_engine - DMA init.
2129  * @priv: driver private structure
2130  * Description:
2131  * It inits the DMA invoking the specific MAC/GMAC callback.
2132  * Some DMA parameters can be passed from the platform;
2133  * in case of these are not passed a default is kept for the MAC or GMAC.
2134  */
2135 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2136 {
2137 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2138 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2139 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2140 	struct stmmac_rx_queue *rx_q;
2141 	struct stmmac_tx_queue *tx_q;
2142 	u32 chan = 0;
2143 	int atds = 0;
2144 	int ret = 0;
2145 
2146 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2147 		dev_err(priv->device, "Invalid DMA configuration\n");
2148 		return -EINVAL;
2149 	}
2150 
2151 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2152 		atds = 1;
2153 
2154 	ret = stmmac_reset(priv, priv->ioaddr);
2155 	if (ret) {
2156 		dev_err(priv->device, "Failed to reset the dma\n");
2157 		return ret;
2158 	}
2159 
2160 	/* DMA RX Channel Configuration */
2161 	for (chan = 0; chan < rx_channels_count; chan++) {
2162 		rx_q = &priv->rx_queue[chan];
2163 
2164 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2165 				    rx_q->dma_rx_phy, chan);
2166 
2167 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2168 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2169 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2170 				       rx_q->rx_tail_addr, chan);
2171 	}
2172 
2173 	/* DMA TX Channel Configuration */
2174 	for (chan = 0; chan < tx_channels_count; chan++) {
2175 		tx_q = &priv->tx_queue[chan];
2176 
2177 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2178 				    tx_q->dma_tx_phy, chan);
2179 
2180 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2181 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2182 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2183 				       tx_q->tx_tail_addr, chan);
2184 	}
2185 
2186 	/* DMA CSR Channel configuration */
2187 	for (chan = 0; chan < dma_csr_ch; chan++)
2188 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2189 
2190 	/* DMA Configuration */
2191 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2192 
2193 	if (priv->plat->axi)
2194 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2195 
2196 	return ret;
2197 }
2198 
2199 /**
2200  * stmmac_tx_timer - mitigation sw timer for tx.
2201  * @data: data pointer
2202  * Description:
2203  * This is the timer handler to directly invoke the stmmac_tx_clean.
2204  */
2205 static void stmmac_tx_timer(struct timer_list *t)
2206 {
2207 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2208 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2209 	u32 queue;
2210 
2211 	/* let's scan all the tx queues */
2212 	for (queue = 0; queue < tx_queues_count; queue++)
2213 		stmmac_tx_clean(priv, queue);
2214 }
2215 
2216 /**
2217  * stmmac_init_tx_coalesce - init tx mitigation options.
2218  * @priv: driver private structure
2219  * Description:
2220  * This inits the transmit coalesce parameters: i.e. timer rate,
2221  * timer handler and default threshold used for enabling the
2222  * interrupt on completion bit.
2223  */
2224 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2225 {
2226 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2227 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2228 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2229 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2230 	add_timer(&priv->txtimer);
2231 }
2232 
2233 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2234 {
2235 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2236 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2237 	u32 chan;
2238 
2239 	/* set TX ring length */
2240 	for (chan = 0; chan < tx_channels_count; chan++)
2241 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2242 				(DMA_TX_SIZE - 1), chan);
2243 
2244 	/* set RX ring length */
2245 	for (chan = 0; chan < rx_channels_count; chan++)
2246 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2247 				(DMA_RX_SIZE - 1), chan);
2248 }
2249 
2250 /**
2251  *  stmmac_set_tx_queue_weight - Set TX queue weight
2252  *  @priv: driver private structure
2253  *  Description: It is used for setting TX queues weight
2254  */
2255 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2256 {
2257 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2258 	u32 weight;
2259 	u32 queue;
2260 
2261 	for (queue = 0; queue < tx_queues_count; queue++) {
2262 		weight = priv->plat->tx_queues_cfg[queue].weight;
2263 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2264 	}
2265 }
2266 
2267 /**
2268  *  stmmac_configure_cbs - Configure CBS in TX queue
2269  *  @priv: driver private structure
2270  *  Description: It is used for configuring CBS in AVB TX queues
2271  */
2272 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2273 {
2274 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2275 	u32 mode_to_use;
2276 	u32 queue;
2277 
2278 	/* queue 0 is reserved for legacy traffic */
2279 	for (queue = 1; queue < tx_queues_count; queue++) {
2280 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2281 		if (mode_to_use == MTL_QUEUE_DCB)
2282 			continue;
2283 
2284 		stmmac_config_cbs(priv, priv->hw,
2285 				priv->plat->tx_queues_cfg[queue].send_slope,
2286 				priv->plat->tx_queues_cfg[queue].idle_slope,
2287 				priv->plat->tx_queues_cfg[queue].high_credit,
2288 				priv->plat->tx_queues_cfg[queue].low_credit,
2289 				queue);
2290 	}
2291 }
2292 
2293 /**
2294  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2295  *  @priv: driver private structure
2296  *  Description: It is used for mapping RX queues to RX dma channels
2297  */
2298 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2299 {
2300 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2301 	u32 queue;
2302 	u32 chan;
2303 
2304 	for (queue = 0; queue < rx_queues_count; queue++) {
2305 		chan = priv->plat->rx_queues_cfg[queue].chan;
2306 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2307 	}
2308 }
2309 
2310 /**
2311  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2312  *  @priv: driver private structure
2313  *  Description: It is used for configuring the RX Queue Priority
2314  */
2315 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2316 {
2317 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2318 	u32 queue;
2319 	u32 prio;
2320 
2321 	for (queue = 0; queue < rx_queues_count; queue++) {
2322 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2323 			continue;
2324 
2325 		prio = priv->plat->rx_queues_cfg[queue].prio;
2326 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2327 	}
2328 }
2329 
2330 /**
2331  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2332  *  @priv: driver private structure
2333  *  Description: It is used for configuring the TX Queue Priority
2334  */
2335 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2336 {
2337 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2338 	u32 queue;
2339 	u32 prio;
2340 
2341 	for (queue = 0; queue < tx_queues_count; queue++) {
2342 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2343 			continue;
2344 
2345 		prio = priv->plat->tx_queues_cfg[queue].prio;
2346 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2347 	}
2348 }
2349 
2350 /**
2351  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2352  *  @priv: driver private structure
2353  *  Description: It is used for configuring the RX queue routing
2354  */
2355 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2356 {
2357 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2358 	u32 queue;
2359 	u8 packet;
2360 
2361 	for (queue = 0; queue < rx_queues_count; queue++) {
2362 		/* no specific packet type routing specified for the queue */
2363 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2364 			continue;
2365 
2366 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2367 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2368 	}
2369 }
2370 
2371 /**
2372  *  stmmac_mtl_configuration - Configure MTL
2373  *  @priv: driver private structure
2374  *  Description: It is used for configurring MTL
2375  */
2376 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2377 {
2378 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2379 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2380 
2381 	if (tx_queues_count > 1)
2382 		stmmac_set_tx_queue_weight(priv);
2383 
2384 	/* Configure MTL RX algorithms */
2385 	if (rx_queues_count > 1)
2386 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2387 				priv->plat->rx_sched_algorithm);
2388 
2389 	/* Configure MTL TX algorithms */
2390 	if (tx_queues_count > 1)
2391 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2392 				priv->plat->tx_sched_algorithm);
2393 
2394 	/* Configure CBS in AVB TX queues */
2395 	if (tx_queues_count > 1)
2396 		stmmac_configure_cbs(priv);
2397 
2398 	/* Map RX MTL to DMA channels */
2399 	stmmac_rx_queue_dma_chan_map(priv);
2400 
2401 	/* Enable MAC RX Queues */
2402 	stmmac_mac_enable_rx_queues(priv);
2403 
2404 	/* Set RX priorities */
2405 	if (rx_queues_count > 1)
2406 		stmmac_mac_config_rx_queues_prio(priv);
2407 
2408 	/* Set TX priorities */
2409 	if (tx_queues_count > 1)
2410 		stmmac_mac_config_tx_queues_prio(priv);
2411 
2412 	/* Set RX routing */
2413 	if (rx_queues_count > 1)
2414 		stmmac_mac_config_rx_queues_routing(priv);
2415 }
2416 
2417 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2418 {
2419 	if (priv->dma_cap.asp) {
2420 		netdev_info(priv->dev, "Enabling Safety Features\n");
2421 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2422 	} else {
2423 		netdev_info(priv->dev, "No Safety Features support found\n");
2424 	}
2425 }
2426 
2427 /**
2428  * stmmac_hw_setup - setup mac in a usable state.
2429  *  @dev : pointer to the device structure.
2430  *  Description:
2431  *  this is the main function to setup the HW in a usable state because the
2432  *  dma engine is reset, the core registers are configured (e.g. AXI,
2433  *  Checksum features, timers). The DMA is ready to start receiving and
2434  *  transmitting.
2435  *  Return value:
2436  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2437  *  file on failure.
2438  */
2439 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2440 {
2441 	struct stmmac_priv *priv = netdev_priv(dev);
2442 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2443 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2444 	u32 chan;
2445 	int ret;
2446 
2447 	/* DMA initialization and SW reset */
2448 	ret = stmmac_init_dma_engine(priv);
2449 	if (ret < 0) {
2450 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2451 			   __func__);
2452 		return ret;
2453 	}
2454 
2455 	/* Copy the MAC addr into the HW  */
2456 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2457 
2458 	/* PS and related bits will be programmed according to the speed */
2459 	if (priv->hw->pcs) {
2460 		int speed = priv->plat->mac_port_sel_speed;
2461 
2462 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2463 		    (speed == SPEED_1000)) {
2464 			priv->hw->ps = speed;
2465 		} else {
2466 			dev_warn(priv->device, "invalid port speed\n");
2467 			priv->hw->ps = 0;
2468 		}
2469 	}
2470 
2471 	/* Initialize the MAC Core */
2472 	stmmac_core_init(priv, priv->hw, dev);
2473 
2474 	/* Initialize MTL*/
2475 	stmmac_mtl_configuration(priv);
2476 
2477 	/* Initialize Safety Features */
2478 	stmmac_safety_feat_configuration(priv);
2479 
2480 	ret = stmmac_rx_ipc(priv, priv->hw);
2481 	if (!ret) {
2482 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2483 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2484 		priv->hw->rx_csum = 0;
2485 	}
2486 
2487 	/* Enable the MAC Rx/Tx */
2488 	stmmac_mac_set(priv, priv->ioaddr, true);
2489 
2490 	/* Set the HW DMA mode and the COE */
2491 	stmmac_dma_operation_mode(priv);
2492 
2493 	stmmac_mmc_setup(priv);
2494 
2495 	if (init_ptp) {
2496 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2497 		if (ret < 0)
2498 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2499 
2500 		ret = stmmac_init_ptp(priv);
2501 		if (ret == -EOPNOTSUPP)
2502 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2503 		else if (ret)
2504 			netdev_warn(priv->dev, "PTP init failed\n");
2505 	}
2506 
2507 #ifdef CONFIG_DEBUG_FS
2508 	ret = stmmac_init_fs(dev);
2509 	if (ret < 0)
2510 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2511 			    __func__);
2512 #endif
2513 	/* Start the ball rolling... */
2514 	stmmac_start_all_dma(priv);
2515 
2516 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2517 
2518 	if (priv->use_riwt) {
2519 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2520 		if (!ret)
2521 			priv->rx_riwt = MAX_DMA_RIWT;
2522 	}
2523 
2524 	if (priv->hw->pcs)
2525 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2526 
2527 	/* set TX and RX rings length */
2528 	stmmac_set_rings_length(priv);
2529 
2530 	/* Enable TSO */
2531 	if (priv->tso) {
2532 		for (chan = 0; chan < tx_cnt; chan++)
2533 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 static void stmmac_hw_teardown(struct net_device *dev)
2540 {
2541 	struct stmmac_priv *priv = netdev_priv(dev);
2542 
2543 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2544 }
2545 
2546 /**
2547  *  stmmac_open - open entry point of the driver
2548  *  @dev : pointer to the device structure.
2549  *  Description:
2550  *  This function is the open entry point of the driver.
2551  *  Return value:
2552  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2553  *  file on failure.
2554  */
2555 static int stmmac_open(struct net_device *dev)
2556 {
2557 	struct stmmac_priv *priv = netdev_priv(dev);
2558 	int ret;
2559 
2560 	stmmac_check_ether_addr(priv);
2561 
2562 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2563 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2564 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2565 		ret = stmmac_init_phy(dev);
2566 		if (ret) {
2567 			netdev_err(priv->dev,
2568 				   "%s: Cannot attach to PHY (error: %d)\n",
2569 				   __func__, ret);
2570 			return ret;
2571 		}
2572 	}
2573 
2574 	/* Extra statistics */
2575 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2576 	priv->xstats.threshold = tc;
2577 
2578 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2579 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2580 
2581 	ret = alloc_dma_desc_resources(priv);
2582 	if (ret < 0) {
2583 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2584 			   __func__);
2585 		goto dma_desc_error;
2586 	}
2587 
2588 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2589 	if (ret < 0) {
2590 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2591 			   __func__);
2592 		goto init_error;
2593 	}
2594 
2595 	ret = stmmac_hw_setup(dev, true);
2596 	if (ret < 0) {
2597 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2598 		goto init_error;
2599 	}
2600 
2601 	stmmac_init_tx_coalesce(priv);
2602 
2603 	if (dev->phydev)
2604 		phy_start(dev->phydev);
2605 
2606 	/* Request the IRQ lines */
2607 	ret = request_irq(dev->irq, stmmac_interrupt,
2608 			  IRQF_SHARED, dev->name, dev);
2609 	if (unlikely(ret < 0)) {
2610 		netdev_err(priv->dev,
2611 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2612 			   __func__, dev->irq, ret);
2613 		goto irq_error;
2614 	}
2615 
2616 	/* Request the Wake IRQ in case of another line is used for WoL */
2617 	if (priv->wol_irq != dev->irq) {
2618 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2619 				  IRQF_SHARED, dev->name, dev);
2620 		if (unlikely(ret < 0)) {
2621 			netdev_err(priv->dev,
2622 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2623 				   __func__, priv->wol_irq, ret);
2624 			goto wolirq_error;
2625 		}
2626 	}
2627 
2628 	/* Request the IRQ lines */
2629 	if (priv->lpi_irq > 0) {
2630 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2631 				  dev->name, dev);
2632 		if (unlikely(ret < 0)) {
2633 			netdev_err(priv->dev,
2634 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2635 				   __func__, priv->lpi_irq, ret);
2636 			goto lpiirq_error;
2637 		}
2638 	}
2639 
2640 	stmmac_enable_all_queues(priv);
2641 	stmmac_start_all_queues(priv);
2642 
2643 	return 0;
2644 
2645 lpiirq_error:
2646 	if (priv->wol_irq != dev->irq)
2647 		free_irq(priv->wol_irq, dev);
2648 wolirq_error:
2649 	free_irq(dev->irq, dev);
2650 irq_error:
2651 	if (dev->phydev)
2652 		phy_stop(dev->phydev);
2653 
2654 	del_timer_sync(&priv->txtimer);
2655 	stmmac_hw_teardown(dev);
2656 init_error:
2657 	free_dma_desc_resources(priv);
2658 dma_desc_error:
2659 	if (dev->phydev)
2660 		phy_disconnect(dev->phydev);
2661 
2662 	return ret;
2663 }
2664 
2665 /**
2666  *  stmmac_release - close entry point of the driver
2667  *  @dev : device pointer.
2668  *  Description:
2669  *  This is the stop entry point of the driver.
2670  */
2671 static int stmmac_release(struct net_device *dev)
2672 {
2673 	struct stmmac_priv *priv = netdev_priv(dev);
2674 
2675 	if (priv->eee_enabled)
2676 		del_timer_sync(&priv->eee_ctrl_timer);
2677 
2678 	/* Stop and disconnect the PHY */
2679 	if (dev->phydev) {
2680 		phy_stop(dev->phydev);
2681 		phy_disconnect(dev->phydev);
2682 	}
2683 
2684 	stmmac_stop_all_queues(priv);
2685 
2686 	stmmac_disable_all_queues(priv);
2687 
2688 	del_timer_sync(&priv->txtimer);
2689 
2690 	/* Free the IRQ lines */
2691 	free_irq(dev->irq, dev);
2692 	if (priv->wol_irq != dev->irq)
2693 		free_irq(priv->wol_irq, dev);
2694 	if (priv->lpi_irq > 0)
2695 		free_irq(priv->lpi_irq, dev);
2696 
2697 	/* Stop TX/RX DMA and clear the descriptors */
2698 	stmmac_stop_all_dma(priv);
2699 
2700 	/* Release and free the Rx/Tx resources */
2701 	free_dma_desc_resources(priv);
2702 
2703 	/* Disable the MAC Rx/Tx */
2704 	stmmac_mac_set(priv, priv->ioaddr, false);
2705 
2706 	netif_carrier_off(dev);
2707 
2708 #ifdef CONFIG_DEBUG_FS
2709 	stmmac_exit_fs(dev);
2710 #endif
2711 
2712 	stmmac_release_ptp(priv);
2713 
2714 	return 0;
2715 }
2716 
2717 /**
2718  *  stmmac_tso_allocator - close entry point of the driver
2719  *  @priv: driver private structure
2720  *  @des: buffer start address
2721  *  @total_len: total length to fill in descriptors
2722  *  @last_segmant: condition for the last descriptor
2723  *  @queue: TX queue index
2724  *  Description:
2725  *  This function fills descriptor and request new descriptors according to
2726  *  buffer length to fill
2727  */
2728 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2729 				 int total_len, bool last_segment, u32 queue)
2730 {
2731 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2732 	struct dma_desc *desc;
2733 	u32 buff_size;
2734 	int tmp_len;
2735 
2736 	tmp_len = total_len;
2737 
2738 	while (tmp_len > 0) {
2739 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2740 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2741 		desc = tx_q->dma_tx + tx_q->cur_tx;
2742 
2743 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2744 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2745 			    TSO_MAX_BUFF_SIZE : tmp_len;
2746 
2747 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2748 				0, 1,
2749 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2750 				0, 0);
2751 
2752 		tmp_len -= TSO_MAX_BUFF_SIZE;
2753 	}
2754 }
2755 
2756 /**
2757  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2758  *  @skb : the socket buffer
2759  *  @dev : device pointer
2760  *  Description: this is the transmit function that is called on TSO frames
2761  *  (support available on GMAC4 and newer chips).
2762  *  Diagram below show the ring programming in case of TSO frames:
2763  *
2764  *  First Descriptor
2765  *   --------
2766  *   | DES0 |---> buffer1 = L2/L3/L4 header
2767  *   | DES1 |---> TCP Payload (can continue on next descr...)
2768  *   | DES2 |---> buffer 1 and 2 len
2769  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2770  *   --------
2771  *	|
2772  *     ...
2773  *	|
2774  *   --------
2775  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2776  *   | DES1 | --|
2777  *   | DES2 | --> buffer 1 and 2 len
2778  *   | DES3 |
2779  *   --------
2780  *
2781  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2782  */
2783 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2784 {
2785 	struct dma_desc *desc, *first, *mss_desc = NULL;
2786 	struct stmmac_priv *priv = netdev_priv(dev);
2787 	int nfrags = skb_shinfo(skb)->nr_frags;
2788 	u32 queue = skb_get_queue_mapping(skb);
2789 	unsigned int first_entry, des;
2790 	struct stmmac_tx_queue *tx_q;
2791 	int tmp_pay_len = 0;
2792 	u32 pay_len, mss;
2793 	u8 proto_hdr_len;
2794 	int i;
2795 
2796 	tx_q = &priv->tx_queue[queue];
2797 
2798 	/* Compute header lengths */
2799 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2800 
2801 	/* Desc availability based on threshold should be enough safe */
2802 	if (unlikely(stmmac_tx_avail(priv, queue) <
2803 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2804 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2805 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2806 								queue));
2807 			/* This is a hard error, log it. */
2808 			netdev_err(priv->dev,
2809 				   "%s: Tx Ring full when queue awake\n",
2810 				   __func__);
2811 		}
2812 		return NETDEV_TX_BUSY;
2813 	}
2814 
2815 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2816 
2817 	mss = skb_shinfo(skb)->gso_size;
2818 
2819 	/* set new MSS value if needed */
2820 	if (mss != tx_q->mss) {
2821 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2822 		stmmac_set_mss(priv, mss_desc, mss);
2823 		tx_q->mss = mss;
2824 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2825 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2826 	}
2827 
2828 	if (netif_msg_tx_queued(priv)) {
2829 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2830 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2831 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2832 			skb->data_len);
2833 	}
2834 
2835 	first_entry = tx_q->cur_tx;
2836 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2837 
2838 	desc = tx_q->dma_tx + first_entry;
2839 	first = desc;
2840 
2841 	/* first descriptor: fill Headers on Buf1 */
2842 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2843 			     DMA_TO_DEVICE);
2844 	if (dma_mapping_error(priv->device, des))
2845 		goto dma_map_err;
2846 
2847 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2848 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2849 
2850 	first->des0 = cpu_to_le32(des);
2851 
2852 	/* Fill start of payload in buff2 of first descriptor */
2853 	if (pay_len)
2854 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2855 
2856 	/* If needed take extra descriptors to fill the remaining payload */
2857 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2858 
2859 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2860 
2861 	/* Prepare fragments */
2862 	for (i = 0; i < nfrags; i++) {
2863 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2864 
2865 		des = skb_frag_dma_map(priv->device, frag, 0,
2866 				       skb_frag_size(frag),
2867 				       DMA_TO_DEVICE);
2868 		if (dma_mapping_error(priv->device, des))
2869 			goto dma_map_err;
2870 
2871 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2872 				     (i == nfrags - 1), queue);
2873 
2874 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2875 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2876 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2877 	}
2878 
2879 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2880 
2881 	/* Only the last descriptor gets to point to the skb. */
2882 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2883 
2884 	/* We've used all descriptors we need for this skb, however,
2885 	 * advance cur_tx so that it references a fresh descriptor.
2886 	 * ndo_start_xmit will fill this descriptor the next time it's
2887 	 * called and stmmac_tx_clean may clean up to this descriptor.
2888 	 */
2889 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2890 
2891 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2892 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2893 			  __func__);
2894 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2895 	}
2896 
2897 	dev->stats.tx_bytes += skb->len;
2898 	priv->xstats.tx_tso_frames++;
2899 	priv->xstats.tx_tso_nfrags += nfrags;
2900 
2901 	/* Manage tx mitigation */
2902 	priv->tx_count_frames += nfrags + 1;
2903 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2904 		mod_timer(&priv->txtimer,
2905 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2906 	} else {
2907 		priv->tx_count_frames = 0;
2908 		stmmac_set_tx_ic(priv, desc);
2909 		priv->xstats.tx_set_ic_bit++;
2910 	}
2911 
2912 	skb_tx_timestamp(skb);
2913 
2914 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2915 		     priv->hwts_tx_en)) {
2916 		/* declare that device is doing timestamping */
2917 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2918 		stmmac_enable_tx_timestamp(priv, first);
2919 	}
2920 
2921 	/* Complete the first descriptor before granting the DMA */
2922 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2923 			proto_hdr_len,
2924 			pay_len,
2925 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2926 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2927 
2928 	/* If context desc is used to change MSS */
2929 	if (mss_desc) {
2930 		/* Make sure that first descriptor has been completely
2931 		 * written, including its own bit. This is because MSS is
2932 		 * actually before first descriptor, so we need to make
2933 		 * sure that MSS's own bit is the last thing written.
2934 		 */
2935 		dma_wmb();
2936 		stmmac_set_tx_owner(priv, mss_desc);
2937 	}
2938 
2939 	/* The own bit must be the latest setting done when prepare the
2940 	 * descriptor and then barrier is needed to make sure that
2941 	 * all is coherent before granting the DMA engine.
2942 	 */
2943 	wmb();
2944 
2945 	if (netif_msg_pktdata(priv)) {
2946 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2947 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2948 			tx_q->cur_tx, first, nfrags);
2949 
2950 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2951 
2952 		pr_info(">>> frame to be transmitted: ");
2953 		print_pkt(skb->data, skb_headlen(skb));
2954 	}
2955 
2956 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2957 
2958 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2959 
2960 	return NETDEV_TX_OK;
2961 
2962 dma_map_err:
2963 	dev_err(priv->device, "Tx dma map failed\n");
2964 	dev_kfree_skb(skb);
2965 	priv->dev->stats.tx_dropped++;
2966 	return NETDEV_TX_OK;
2967 }
2968 
2969 /**
2970  *  stmmac_xmit - Tx entry point of the driver
2971  *  @skb : the socket buffer
2972  *  @dev : device pointer
2973  *  Description : this is the tx entry point of the driver.
2974  *  It programs the chain or the ring and supports oversized frames
2975  *  and SG feature.
2976  */
2977 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2978 {
2979 	struct stmmac_priv *priv = netdev_priv(dev);
2980 	unsigned int nopaged_len = skb_headlen(skb);
2981 	int i, csum_insertion = 0, is_jumbo = 0;
2982 	u32 queue = skb_get_queue_mapping(skb);
2983 	int nfrags = skb_shinfo(skb)->nr_frags;
2984 	int entry;
2985 	unsigned int first_entry;
2986 	struct dma_desc *desc, *first;
2987 	struct stmmac_tx_queue *tx_q;
2988 	unsigned int enh_desc;
2989 	unsigned int des;
2990 
2991 	tx_q = &priv->tx_queue[queue];
2992 
2993 	/* Manage oversized TCP frames for GMAC4 device */
2994 	if (skb_is_gso(skb) && priv->tso) {
2995 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2996 			return stmmac_tso_xmit(skb, dev);
2997 	}
2998 
2999 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3000 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3001 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3002 								queue));
3003 			/* This is a hard error, log it. */
3004 			netdev_err(priv->dev,
3005 				   "%s: Tx Ring full when queue awake\n",
3006 				   __func__);
3007 		}
3008 		return NETDEV_TX_BUSY;
3009 	}
3010 
3011 	if (priv->tx_path_in_lpi_mode)
3012 		stmmac_disable_eee_mode(priv);
3013 
3014 	entry = tx_q->cur_tx;
3015 	first_entry = entry;
3016 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3017 
3018 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3019 
3020 	if (likely(priv->extend_desc))
3021 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3022 	else
3023 		desc = tx_q->dma_tx + entry;
3024 
3025 	first = desc;
3026 
3027 	enh_desc = priv->plat->enh_desc;
3028 	/* To program the descriptors according to the size of the frame */
3029 	if (enh_desc)
3030 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3031 
3032 	if (unlikely(is_jumbo)) {
3033 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3034 		if (unlikely(entry < 0) && (entry != -EINVAL))
3035 			goto dma_map_err;
3036 	}
3037 
3038 	for (i = 0; i < nfrags; i++) {
3039 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3040 		int len = skb_frag_size(frag);
3041 		bool last_segment = (i == (nfrags - 1));
3042 
3043 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3044 		WARN_ON(tx_q->tx_skbuff[entry]);
3045 
3046 		if (likely(priv->extend_desc))
3047 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3048 		else
3049 			desc = tx_q->dma_tx + entry;
3050 
3051 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3052 				       DMA_TO_DEVICE);
3053 		if (dma_mapping_error(priv->device, des))
3054 			goto dma_map_err; /* should reuse desc w/o issues */
3055 
3056 		tx_q->tx_skbuff_dma[entry].buf = des;
3057 
3058 		stmmac_set_desc_addr(priv, desc, des);
3059 
3060 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3061 		tx_q->tx_skbuff_dma[entry].len = len;
3062 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3063 
3064 		/* Prepare the descriptor and set the own bit too */
3065 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3066 				priv->mode, 1, last_segment, skb->len);
3067 	}
3068 
3069 	/* Only the last descriptor gets to point to the skb. */
3070 	tx_q->tx_skbuff[entry] = skb;
3071 
3072 	/* We've used all descriptors we need for this skb, however,
3073 	 * advance cur_tx so that it references a fresh descriptor.
3074 	 * ndo_start_xmit will fill this descriptor the next time it's
3075 	 * called and stmmac_tx_clean may clean up to this descriptor.
3076 	 */
3077 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3078 	tx_q->cur_tx = entry;
3079 
3080 	if (netif_msg_pktdata(priv)) {
3081 		void *tx_head;
3082 
3083 		netdev_dbg(priv->dev,
3084 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3085 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3086 			   entry, first, nfrags);
3087 
3088 		if (priv->extend_desc)
3089 			tx_head = (void *)tx_q->dma_etx;
3090 		else
3091 			tx_head = (void *)tx_q->dma_tx;
3092 
3093 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3094 
3095 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3096 		print_pkt(skb->data, skb->len);
3097 	}
3098 
3099 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3100 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3101 			  __func__);
3102 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3103 	}
3104 
3105 	dev->stats.tx_bytes += skb->len;
3106 
3107 	/* According to the coalesce parameter the IC bit for the latest
3108 	 * segment is reset and the timer re-started to clean the tx status.
3109 	 * This approach takes care about the fragments: desc is the first
3110 	 * element in case of no SG.
3111 	 */
3112 	priv->tx_count_frames += nfrags + 1;
3113 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3114 	    !priv->tx_timer_armed) {
3115 		mod_timer(&priv->txtimer,
3116 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3117 		priv->tx_timer_armed = true;
3118 	} else {
3119 		priv->tx_count_frames = 0;
3120 		stmmac_set_tx_ic(priv, desc);
3121 		priv->xstats.tx_set_ic_bit++;
3122 		priv->tx_timer_armed = false;
3123 	}
3124 
3125 	skb_tx_timestamp(skb);
3126 
3127 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3128 	 * problems because all the descriptors are actually ready to be
3129 	 * passed to the DMA engine.
3130 	 */
3131 	if (likely(!is_jumbo)) {
3132 		bool last_segment = (nfrags == 0);
3133 
3134 		des = dma_map_single(priv->device, skb->data,
3135 				     nopaged_len, DMA_TO_DEVICE);
3136 		if (dma_mapping_error(priv->device, des))
3137 			goto dma_map_err;
3138 
3139 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3140 
3141 		stmmac_set_desc_addr(priv, first, des);
3142 
3143 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3144 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3145 
3146 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3147 			     priv->hwts_tx_en)) {
3148 			/* declare that device is doing timestamping */
3149 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3150 			stmmac_enable_tx_timestamp(priv, first);
3151 		}
3152 
3153 		/* Prepare the first descriptor setting the OWN bit too */
3154 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3155 				csum_insertion, priv->mode, 1, last_segment,
3156 				skb->len);
3157 
3158 		/* The own bit must be the latest setting done when prepare the
3159 		 * descriptor and then barrier is needed to make sure that
3160 		 * all is coherent before granting the DMA engine.
3161 		 */
3162 		wmb();
3163 	}
3164 
3165 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3166 
3167 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3168 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3169 
3170 	return NETDEV_TX_OK;
3171 
3172 dma_map_err:
3173 	netdev_err(priv->dev, "Tx DMA map failed\n");
3174 	dev_kfree_skb(skb);
3175 	priv->dev->stats.tx_dropped++;
3176 	return NETDEV_TX_OK;
3177 }
3178 
3179 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3180 {
3181 	struct ethhdr *ehdr;
3182 	u16 vlanid;
3183 
3184 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3185 	    NETIF_F_HW_VLAN_CTAG_RX &&
3186 	    !__vlan_get_tag(skb, &vlanid)) {
3187 		/* pop the vlan tag */
3188 		ehdr = (struct ethhdr *)skb->data;
3189 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3190 		skb_pull(skb, VLAN_HLEN);
3191 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3192 	}
3193 }
3194 
3195 
3196 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3197 {
3198 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3199 		return 0;
3200 
3201 	return 1;
3202 }
3203 
3204 /**
3205  * stmmac_rx_refill - refill used skb preallocated buffers
3206  * @priv: driver private structure
3207  * @queue: RX queue index
3208  * Description : this is to reallocate the skb for the reception process
3209  * that is based on zero-copy.
3210  */
3211 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3212 {
3213 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3214 	int dirty = stmmac_rx_dirty(priv, queue);
3215 	unsigned int entry = rx_q->dirty_rx;
3216 
3217 	int bfsize = priv->dma_buf_sz;
3218 
3219 	while (dirty-- > 0) {
3220 		struct dma_desc *p;
3221 
3222 		if (priv->extend_desc)
3223 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3224 		else
3225 			p = rx_q->dma_rx + entry;
3226 
3227 		if (likely(!rx_q->rx_skbuff[entry])) {
3228 			struct sk_buff *skb;
3229 
3230 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3231 			if (unlikely(!skb)) {
3232 				/* so for a while no zero-copy! */
3233 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3234 				if (unlikely(net_ratelimit()))
3235 					dev_err(priv->device,
3236 						"fail to alloc skb entry %d\n",
3237 						entry);
3238 				break;
3239 			}
3240 
3241 			rx_q->rx_skbuff[entry] = skb;
3242 			rx_q->rx_skbuff_dma[entry] =
3243 			    dma_map_single(priv->device, skb->data, bfsize,
3244 					   DMA_FROM_DEVICE);
3245 			if (dma_mapping_error(priv->device,
3246 					      rx_q->rx_skbuff_dma[entry])) {
3247 				netdev_err(priv->dev, "Rx DMA map failed\n");
3248 				dev_kfree_skb(skb);
3249 				break;
3250 			}
3251 
3252 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3253 			stmmac_refill_desc3(priv, rx_q, p);
3254 
3255 			if (rx_q->rx_zeroc_thresh > 0)
3256 				rx_q->rx_zeroc_thresh--;
3257 
3258 			netif_dbg(priv, rx_status, priv->dev,
3259 				  "refill entry #%d\n", entry);
3260 		}
3261 		dma_wmb();
3262 
3263 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3264 
3265 		dma_wmb();
3266 
3267 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3268 	}
3269 	rx_q->dirty_rx = entry;
3270 }
3271 
3272 /**
3273  * stmmac_rx - manage the receive process
3274  * @priv: driver private structure
3275  * @limit: napi bugget
3276  * @queue: RX queue index.
3277  * Description :  this the function called by the napi poll method.
3278  * It gets all the frames inside the ring.
3279  */
3280 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3281 {
3282 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3283 	unsigned int entry = rx_q->cur_rx;
3284 	int coe = priv->hw->rx_csum;
3285 	unsigned int next_entry;
3286 	unsigned int count = 0;
3287 
3288 	if (netif_msg_rx_status(priv)) {
3289 		void *rx_head;
3290 
3291 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3292 		if (priv->extend_desc)
3293 			rx_head = (void *)rx_q->dma_erx;
3294 		else
3295 			rx_head = (void *)rx_q->dma_rx;
3296 
3297 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3298 	}
3299 	while (count < limit) {
3300 		int status;
3301 		struct dma_desc *p;
3302 		struct dma_desc *np;
3303 
3304 		if (priv->extend_desc)
3305 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3306 		else
3307 			p = rx_q->dma_rx + entry;
3308 
3309 		/* read the status of the incoming frame */
3310 		status = stmmac_rx_status(priv, &priv->dev->stats,
3311 				&priv->xstats, p);
3312 		/* check if managed by the DMA otherwise go ahead */
3313 		if (unlikely(status & dma_own))
3314 			break;
3315 
3316 		count++;
3317 
3318 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3319 		next_entry = rx_q->cur_rx;
3320 
3321 		if (priv->extend_desc)
3322 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3323 		else
3324 			np = rx_q->dma_rx + next_entry;
3325 
3326 		prefetch(np);
3327 
3328 		if (priv->extend_desc)
3329 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3330 					&priv->xstats, rx_q->dma_erx + entry);
3331 		if (unlikely(status == discard_frame)) {
3332 			priv->dev->stats.rx_errors++;
3333 			if (priv->hwts_rx_en && !priv->extend_desc) {
3334 				/* DESC2 & DESC3 will be overwritten by device
3335 				 * with timestamp value, hence reinitialize
3336 				 * them in stmmac_rx_refill() function so that
3337 				 * device can reuse it.
3338 				 */
3339 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3340 				rx_q->rx_skbuff[entry] = NULL;
3341 				dma_unmap_single(priv->device,
3342 						 rx_q->rx_skbuff_dma[entry],
3343 						 priv->dma_buf_sz,
3344 						 DMA_FROM_DEVICE);
3345 			}
3346 		} else {
3347 			struct sk_buff *skb;
3348 			int frame_len;
3349 			unsigned int des;
3350 
3351 			stmmac_get_desc_addr(priv, p, &des);
3352 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3353 
3354 			/*  If frame length is greater than skb buffer size
3355 			 *  (preallocated during init) then the packet is
3356 			 *  ignored
3357 			 */
3358 			if (frame_len > priv->dma_buf_sz) {
3359 				netdev_err(priv->dev,
3360 					   "len %d larger than size (%d)\n",
3361 					   frame_len, priv->dma_buf_sz);
3362 				priv->dev->stats.rx_length_errors++;
3363 				break;
3364 			}
3365 
3366 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3367 			 * Type frames (LLC/LLC-SNAP)
3368 			 *
3369 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3370 			 * feature is always disabled and packets need to be
3371 			 * stripped manually.
3372 			 */
3373 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3374 			    unlikely(status != llc_snap))
3375 				frame_len -= ETH_FCS_LEN;
3376 
3377 			if (netif_msg_rx_status(priv)) {
3378 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3379 					   p, entry, des);
3380 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3381 					   frame_len, status);
3382 			}
3383 
3384 			/* The zero-copy is always used for all the sizes
3385 			 * in case of GMAC4 because it needs
3386 			 * to refill the used descriptors, always.
3387 			 */
3388 			if (unlikely(!priv->plat->has_gmac4 &&
3389 				     ((frame_len < priv->rx_copybreak) ||
3390 				     stmmac_rx_threshold_count(rx_q)))) {
3391 				skb = netdev_alloc_skb_ip_align(priv->dev,
3392 								frame_len);
3393 				if (unlikely(!skb)) {
3394 					if (net_ratelimit())
3395 						dev_warn(priv->device,
3396 							 "packet dropped\n");
3397 					priv->dev->stats.rx_dropped++;
3398 					break;
3399 				}
3400 
3401 				dma_sync_single_for_cpu(priv->device,
3402 							rx_q->rx_skbuff_dma
3403 							[entry], frame_len,
3404 							DMA_FROM_DEVICE);
3405 				skb_copy_to_linear_data(skb,
3406 							rx_q->
3407 							rx_skbuff[entry]->data,
3408 							frame_len);
3409 
3410 				skb_put(skb, frame_len);
3411 				dma_sync_single_for_device(priv->device,
3412 							   rx_q->rx_skbuff_dma
3413 							   [entry], frame_len,
3414 							   DMA_FROM_DEVICE);
3415 			} else {
3416 				skb = rx_q->rx_skbuff[entry];
3417 				if (unlikely(!skb)) {
3418 					netdev_err(priv->dev,
3419 						   "%s: Inconsistent Rx chain\n",
3420 						   priv->dev->name);
3421 					priv->dev->stats.rx_dropped++;
3422 					break;
3423 				}
3424 				prefetch(skb->data - NET_IP_ALIGN);
3425 				rx_q->rx_skbuff[entry] = NULL;
3426 				rx_q->rx_zeroc_thresh++;
3427 
3428 				skb_put(skb, frame_len);
3429 				dma_unmap_single(priv->device,
3430 						 rx_q->rx_skbuff_dma[entry],
3431 						 priv->dma_buf_sz,
3432 						 DMA_FROM_DEVICE);
3433 			}
3434 
3435 			if (netif_msg_pktdata(priv)) {
3436 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3437 					   frame_len);
3438 				print_pkt(skb->data, frame_len);
3439 			}
3440 
3441 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3442 
3443 			stmmac_rx_vlan(priv->dev, skb);
3444 
3445 			skb->protocol = eth_type_trans(skb, priv->dev);
3446 
3447 			if (unlikely(!coe))
3448 				skb_checksum_none_assert(skb);
3449 			else
3450 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3451 
3452 			napi_gro_receive(&rx_q->napi, skb);
3453 
3454 			priv->dev->stats.rx_packets++;
3455 			priv->dev->stats.rx_bytes += frame_len;
3456 		}
3457 		entry = next_entry;
3458 	}
3459 
3460 	stmmac_rx_refill(priv, queue);
3461 
3462 	priv->xstats.rx_pkt_n += count;
3463 
3464 	return count;
3465 }
3466 
3467 /**
3468  *  stmmac_poll - stmmac poll method (NAPI)
3469  *  @napi : pointer to the napi structure.
3470  *  @budget : maximum number of packets that the current CPU can receive from
3471  *	      all interfaces.
3472  *  Description :
3473  *  To look at the incoming frames and clear the tx resources.
3474  */
3475 static int stmmac_poll(struct napi_struct *napi, int budget)
3476 {
3477 	struct stmmac_rx_queue *rx_q =
3478 		container_of(napi, struct stmmac_rx_queue, napi);
3479 	struct stmmac_priv *priv = rx_q->priv_data;
3480 	u32 tx_count = priv->plat->tx_queues_to_use;
3481 	u32 chan = rx_q->queue_index;
3482 	int work_done = 0;
3483 	u32 queue;
3484 
3485 	priv->xstats.napi_poll++;
3486 
3487 	/* check all the queues */
3488 	for (queue = 0; queue < tx_count; queue++)
3489 		stmmac_tx_clean(priv, queue);
3490 
3491 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3492 	if (work_done < budget) {
3493 		napi_complete_done(napi, work_done);
3494 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3495 	}
3496 	return work_done;
3497 }
3498 
3499 /**
3500  *  stmmac_tx_timeout
3501  *  @dev : Pointer to net device structure
3502  *  Description: this function is called when a packet transmission fails to
3503  *   complete within a reasonable time. The driver will mark the error in the
3504  *   netdev structure and arrange for the device to be reset to a sane state
3505  *   in order to transmit a new packet.
3506  */
3507 static void stmmac_tx_timeout(struct net_device *dev)
3508 {
3509 	struct stmmac_priv *priv = netdev_priv(dev);
3510 
3511 	stmmac_global_err(priv);
3512 }
3513 
3514 /**
3515  *  stmmac_set_rx_mode - entry point for multicast addressing
3516  *  @dev : pointer to the device structure
3517  *  Description:
3518  *  This function is a driver entry point which gets called by the kernel
3519  *  whenever multicast addresses must be enabled/disabled.
3520  *  Return value:
3521  *  void.
3522  */
3523 static void stmmac_set_rx_mode(struct net_device *dev)
3524 {
3525 	struct stmmac_priv *priv = netdev_priv(dev);
3526 
3527 	stmmac_set_filter(priv, priv->hw, dev);
3528 }
3529 
3530 /**
3531  *  stmmac_change_mtu - entry point to change MTU size for the device.
3532  *  @dev : device pointer.
3533  *  @new_mtu : the new MTU size for the device.
3534  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3535  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3536  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3537  *  Return value:
3538  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3539  *  file on failure.
3540  */
3541 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3542 {
3543 	struct stmmac_priv *priv = netdev_priv(dev);
3544 
3545 	if (netif_running(dev)) {
3546 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3547 		return -EBUSY;
3548 	}
3549 
3550 	dev->mtu = new_mtu;
3551 
3552 	netdev_update_features(dev);
3553 
3554 	return 0;
3555 }
3556 
3557 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3558 					     netdev_features_t features)
3559 {
3560 	struct stmmac_priv *priv = netdev_priv(dev);
3561 
3562 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3563 		features &= ~NETIF_F_RXCSUM;
3564 
3565 	if (!priv->plat->tx_coe)
3566 		features &= ~NETIF_F_CSUM_MASK;
3567 
3568 	/* Some GMAC devices have a bugged Jumbo frame support that
3569 	 * needs to have the Tx COE disabled for oversized frames
3570 	 * (due to limited buffer sizes). In this case we disable
3571 	 * the TX csum insertion in the TDES and not use SF.
3572 	 */
3573 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3574 		features &= ~NETIF_F_CSUM_MASK;
3575 
3576 	/* Disable tso if asked by ethtool */
3577 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3578 		if (features & NETIF_F_TSO)
3579 			priv->tso = true;
3580 		else
3581 			priv->tso = false;
3582 	}
3583 
3584 	return features;
3585 }
3586 
3587 static int stmmac_set_features(struct net_device *netdev,
3588 			       netdev_features_t features)
3589 {
3590 	struct stmmac_priv *priv = netdev_priv(netdev);
3591 
3592 	/* Keep the COE Type in case of csum is supporting */
3593 	if (features & NETIF_F_RXCSUM)
3594 		priv->hw->rx_csum = priv->plat->rx_coe;
3595 	else
3596 		priv->hw->rx_csum = 0;
3597 	/* No check needed because rx_coe has been set before and it will be
3598 	 * fixed in case of issue.
3599 	 */
3600 	stmmac_rx_ipc(priv, priv->hw);
3601 
3602 	return 0;
3603 }
3604 
3605 /**
3606  *  stmmac_interrupt - main ISR
3607  *  @irq: interrupt number.
3608  *  @dev_id: to pass the net device pointer.
3609  *  Description: this is the main driver interrupt service routine.
3610  *  It can call:
3611  *  o DMA service routine (to manage incoming frame reception and transmission
3612  *    status)
3613  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3614  *    interrupts.
3615  */
3616 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3617 {
3618 	struct net_device *dev = (struct net_device *)dev_id;
3619 	struct stmmac_priv *priv = netdev_priv(dev);
3620 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3621 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3622 	u32 queues_count;
3623 	u32 queue;
3624 
3625 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3626 
3627 	if (priv->irq_wake)
3628 		pm_wakeup_event(priv->device, 0);
3629 
3630 	if (unlikely(!dev)) {
3631 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3632 		return IRQ_NONE;
3633 	}
3634 
3635 	/* Check if adapter is up */
3636 	if (test_bit(STMMAC_DOWN, &priv->state))
3637 		return IRQ_HANDLED;
3638 	/* Check if a fatal error happened */
3639 	if (stmmac_safety_feat_interrupt(priv))
3640 		return IRQ_HANDLED;
3641 
3642 	/* To handle GMAC own interrupts */
3643 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3644 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3645 		int mtl_status;
3646 
3647 		if (unlikely(status)) {
3648 			/* For LPI we need to save the tx status */
3649 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3650 				priv->tx_path_in_lpi_mode = true;
3651 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3652 				priv->tx_path_in_lpi_mode = false;
3653 		}
3654 
3655 		for (queue = 0; queue < queues_count; queue++) {
3656 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3657 
3658 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3659 								queue);
3660 			if (mtl_status != -EINVAL)
3661 				status |= mtl_status;
3662 
3663 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3664 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3665 						       rx_q->rx_tail_addr,
3666 						       queue);
3667 		}
3668 
3669 		/* PCS link status */
3670 		if (priv->hw->pcs) {
3671 			if (priv->xstats.pcs_link)
3672 				netif_carrier_on(dev);
3673 			else
3674 				netif_carrier_off(dev);
3675 		}
3676 	}
3677 
3678 	/* To handle DMA interrupts */
3679 	stmmac_dma_interrupt(priv);
3680 
3681 	return IRQ_HANDLED;
3682 }
3683 
3684 #ifdef CONFIG_NET_POLL_CONTROLLER
3685 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3686  * to allow network I/O with interrupts disabled.
3687  */
3688 static void stmmac_poll_controller(struct net_device *dev)
3689 {
3690 	disable_irq(dev->irq);
3691 	stmmac_interrupt(dev->irq, dev);
3692 	enable_irq(dev->irq);
3693 }
3694 #endif
3695 
3696 /**
3697  *  stmmac_ioctl - Entry point for the Ioctl
3698  *  @dev: Device pointer.
3699  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3700  *  a proprietary structure used to pass information to the driver.
3701  *  @cmd: IOCTL command
3702  *  Description:
3703  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3704  */
3705 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3706 {
3707 	int ret = -EOPNOTSUPP;
3708 
3709 	if (!netif_running(dev))
3710 		return -EINVAL;
3711 
3712 	switch (cmd) {
3713 	case SIOCGMIIPHY:
3714 	case SIOCGMIIREG:
3715 	case SIOCSMIIREG:
3716 		if (!dev->phydev)
3717 			return -EINVAL;
3718 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3719 		break;
3720 	case SIOCSHWTSTAMP:
3721 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3722 		break;
3723 	default:
3724 		break;
3725 	}
3726 
3727 	return ret;
3728 }
3729 
3730 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3731 				    void *cb_priv)
3732 {
3733 	struct stmmac_priv *priv = cb_priv;
3734 	int ret = -EOPNOTSUPP;
3735 
3736 	stmmac_disable_all_queues(priv);
3737 
3738 	switch (type) {
3739 	case TC_SETUP_CLSU32:
3740 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3741 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3742 		break;
3743 	default:
3744 		break;
3745 	}
3746 
3747 	stmmac_enable_all_queues(priv);
3748 	return ret;
3749 }
3750 
3751 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3752 				 struct tc_block_offload *f)
3753 {
3754 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3755 		return -EOPNOTSUPP;
3756 
3757 	switch (f->command) {
3758 	case TC_BLOCK_BIND:
3759 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3760 				priv, priv);
3761 	case TC_BLOCK_UNBIND:
3762 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3763 		return 0;
3764 	default:
3765 		return -EOPNOTSUPP;
3766 	}
3767 }
3768 
3769 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3770 			   void *type_data)
3771 {
3772 	struct stmmac_priv *priv = netdev_priv(ndev);
3773 
3774 	switch (type) {
3775 	case TC_SETUP_BLOCK:
3776 		return stmmac_setup_tc_block(priv, type_data);
3777 	default:
3778 		return -EOPNOTSUPP;
3779 	}
3780 }
3781 
3782 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3783 {
3784 	struct stmmac_priv *priv = netdev_priv(ndev);
3785 	int ret = 0;
3786 
3787 	ret = eth_mac_addr(ndev, addr);
3788 	if (ret)
3789 		return ret;
3790 
3791 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3792 
3793 	return ret;
3794 }
3795 
3796 #ifdef CONFIG_DEBUG_FS
3797 static struct dentry *stmmac_fs_dir;
3798 
3799 static void sysfs_display_ring(void *head, int size, int extend_desc,
3800 			       struct seq_file *seq)
3801 {
3802 	int i;
3803 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3804 	struct dma_desc *p = (struct dma_desc *)head;
3805 
3806 	for (i = 0; i < size; i++) {
3807 		if (extend_desc) {
3808 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3809 				   i, (unsigned int)virt_to_phys(ep),
3810 				   le32_to_cpu(ep->basic.des0),
3811 				   le32_to_cpu(ep->basic.des1),
3812 				   le32_to_cpu(ep->basic.des2),
3813 				   le32_to_cpu(ep->basic.des3));
3814 			ep++;
3815 		} else {
3816 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3817 				   i, (unsigned int)virt_to_phys(p),
3818 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3819 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3820 			p++;
3821 		}
3822 		seq_printf(seq, "\n");
3823 	}
3824 }
3825 
3826 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3827 {
3828 	struct net_device *dev = seq->private;
3829 	struct stmmac_priv *priv = netdev_priv(dev);
3830 	u32 rx_count = priv->plat->rx_queues_to_use;
3831 	u32 tx_count = priv->plat->tx_queues_to_use;
3832 	u32 queue;
3833 
3834 	for (queue = 0; queue < rx_count; queue++) {
3835 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3836 
3837 		seq_printf(seq, "RX Queue %d:\n", queue);
3838 
3839 		if (priv->extend_desc) {
3840 			seq_printf(seq, "Extended descriptor ring:\n");
3841 			sysfs_display_ring((void *)rx_q->dma_erx,
3842 					   DMA_RX_SIZE, 1, seq);
3843 		} else {
3844 			seq_printf(seq, "Descriptor ring:\n");
3845 			sysfs_display_ring((void *)rx_q->dma_rx,
3846 					   DMA_RX_SIZE, 0, seq);
3847 		}
3848 	}
3849 
3850 	for (queue = 0; queue < tx_count; queue++) {
3851 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3852 
3853 		seq_printf(seq, "TX Queue %d:\n", queue);
3854 
3855 		if (priv->extend_desc) {
3856 			seq_printf(seq, "Extended descriptor ring:\n");
3857 			sysfs_display_ring((void *)tx_q->dma_etx,
3858 					   DMA_TX_SIZE, 1, seq);
3859 		} else {
3860 			seq_printf(seq, "Descriptor ring:\n");
3861 			sysfs_display_ring((void *)tx_q->dma_tx,
3862 					   DMA_TX_SIZE, 0, seq);
3863 		}
3864 	}
3865 
3866 	return 0;
3867 }
3868 
3869 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3870 {
3871 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3872 }
3873 
3874 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3875 
3876 static const struct file_operations stmmac_rings_status_fops = {
3877 	.owner = THIS_MODULE,
3878 	.open = stmmac_sysfs_ring_open,
3879 	.read = seq_read,
3880 	.llseek = seq_lseek,
3881 	.release = single_release,
3882 };
3883 
3884 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3885 {
3886 	struct net_device *dev = seq->private;
3887 	struct stmmac_priv *priv = netdev_priv(dev);
3888 
3889 	if (!priv->hw_cap_support) {
3890 		seq_printf(seq, "DMA HW features not supported\n");
3891 		return 0;
3892 	}
3893 
3894 	seq_printf(seq, "==============================\n");
3895 	seq_printf(seq, "\tDMA HW features\n");
3896 	seq_printf(seq, "==============================\n");
3897 
3898 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3899 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3900 	seq_printf(seq, "\t1000 Mbps: %s\n",
3901 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3902 	seq_printf(seq, "\tHalf duplex: %s\n",
3903 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3904 	seq_printf(seq, "\tHash Filter: %s\n",
3905 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3906 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3907 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3908 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3909 		   (priv->dma_cap.pcs) ? "Y" : "N");
3910 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3911 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3912 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3913 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3914 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3915 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3916 	seq_printf(seq, "\tRMON module: %s\n",
3917 		   (priv->dma_cap.rmon) ? "Y" : "N");
3918 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3919 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3920 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3921 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3922 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3923 		   (priv->dma_cap.eee) ? "Y" : "N");
3924 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3925 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3926 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3927 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3928 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3929 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3930 	} else {
3931 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3932 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3933 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3934 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3935 	}
3936 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3937 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3938 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3939 		   priv->dma_cap.number_rx_channel);
3940 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3941 		   priv->dma_cap.number_tx_channel);
3942 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3943 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3944 
3945 	return 0;
3946 }
3947 
3948 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3949 {
3950 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3951 }
3952 
3953 static const struct file_operations stmmac_dma_cap_fops = {
3954 	.owner = THIS_MODULE,
3955 	.open = stmmac_sysfs_dma_cap_open,
3956 	.read = seq_read,
3957 	.llseek = seq_lseek,
3958 	.release = single_release,
3959 };
3960 
3961 static int stmmac_init_fs(struct net_device *dev)
3962 {
3963 	struct stmmac_priv *priv = netdev_priv(dev);
3964 
3965 	/* Create per netdev entries */
3966 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3967 
3968 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3969 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3970 
3971 		return -ENOMEM;
3972 	}
3973 
3974 	/* Entry to report DMA RX/TX rings */
3975 	priv->dbgfs_rings_status =
3976 		debugfs_create_file("descriptors_status", 0444,
3977 				    priv->dbgfs_dir, dev,
3978 				    &stmmac_rings_status_fops);
3979 
3980 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3981 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3982 		debugfs_remove_recursive(priv->dbgfs_dir);
3983 
3984 		return -ENOMEM;
3985 	}
3986 
3987 	/* Entry to report the DMA HW features */
3988 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3989 						  priv->dbgfs_dir,
3990 						  dev, &stmmac_dma_cap_fops);
3991 
3992 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3993 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3994 		debugfs_remove_recursive(priv->dbgfs_dir);
3995 
3996 		return -ENOMEM;
3997 	}
3998 
3999 	return 0;
4000 }
4001 
4002 static void stmmac_exit_fs(struct net_device *dev)
4003 {
4004 	struct stmmac_priv *priv = netdev_priv(dev);
4005 
4006 	debugfs_remove_recursive(priv->dbgfs_dir);
4007 }
4008 #endif /* CONFIG_DEBUG_FS */
4009 
4010 static const struct net_device_ops stmmac_netdev_ops = {
4011 	.ndo_open = stmmac_open,
4012 	.ndo_start_xmit = stmmac_xmit,
4013 	.ndo_stop = stmmac_release,
4014 	.ndo_change_mtu = stmmac_change_mtu,
4015 	.ndo_fix_features = stmmac_fix_features,
4016 	.ndo_set_features = stmmac_set_features,
4017 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4018 	.ndo_tx_timeout = stmmac_tx_timeout,
4019 	.ndo_do_ioctl = stmmac_ioctl,
4020 	.ndo_setup_tc = stmmac_setup_tc,
4021 #ifdef CONFIG_NET_POLL_CONTROLLER
4022 	.ndo_poll_controller = stmmac_poll_controller,
4023 #endif
4024 	.ndo_set_mac_address = stmmac_set_mac_address,
4025 };
4026 
4027 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4028 {
4029 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4030 		return;
4031 	if (test_bit(STMMAC_DOWN, &priv->state))
4032 		return;
4033 
4034 	netdev_err(priv->dev, "Reset adapter.\n");
4035 
4036 	rtnl_lock();
4037 	netif_trans_update(priv->dev);
4038 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4039 		usleep_range(1000, 2000);
4040 
4041 	set_bit(STMMAC_DOWN, &priv->state);
4042 	dev_close(priv->dev);
4043 	dev_open(priv->dev);
4044 	clear_bit(STMMAC_DOWN, &priv->state);
4045 	clear_bit(STMMAC_RESETING, &priv->state);
4046 	rtnl_unlock();
4047 }
4048 
4049 static void stmmac_service_task(struct work_struct *work)
4050 {
4051 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4052 			service_task);
4053 
4054 	stmmac_reset_subtask(priv);
4055 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4056 }
4057 
4058 /**
4059  *  stmmac_hw_init - Init the MAC device
4060  *  @priv: driver private structure
4061  *  Description: this function is to configure the MAC device according to
4062  *  some platform parameters or the HW capability register. It prepares the
4063  *  driver to use either ring or chain modes and to setup either enhanced or
4064  *  normal descriptors.
4065  */
4066 static int stmmac_hw_init(struct stmmac_priv *priv)
4067 {
4068 	int ret;
4069 
4070 	/* dwmac-sun8i only work in chain mode */
4071 	if (priv->plat->has_sun8i)
4072 		chain_mode = 1;
4073 	priv->chain_mode = chain_mode;
4074 
4075 	/* Initialize HW Interface */
4076 	ret = stmmac_hwif_init(priv);
4077 	if (ret)
4078 		return ret;
4079 
4080 	/* Get the HW capability (new GMAC newer than 3.50a) */
4081 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4082 	if (priv->hw_cap_support) {
4083 		dev_info(priv->device, "DMA HW capability register supported\n");
4084 
4085 		/* We can override some gmac/dma configuration fields: e.g.
4086 		 * enh_desc, tx_coe (e.g. that are passed through the
4087 		 * platform) with the values from the HW capability
4088 		 * register (if supported).
4089 		 */
4090 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4091 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4092 		priv->hw->pmt = priv->plat->pmt;
4093 
4094 		/* TXCOE doesn't work in thresh DMA mode */
4095 		if (priv->plat->force_thresh_dma_mode)
4096 			priv->plat->tx_coe = 0;
4097 		else
4098 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4099 
4100 		/* In case of GMAC4 rx_coe is from HW cap register. */
4101 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4102 
4103 		if (priv->dma_cap.rx_coe_type2)
4104 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4105 		else if (priv->dma_cap.rx_coe_type1)
4106 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4107 
4108 	} else {
4109 		dev_info(priv->device, "No HW DMA feature register supported\n");
4110 	}
4111 
4112 	if (priv->plat->rx_coe) {
4113 		priv->hw->rx_csum = priv->plat->rx_coe;
4114 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4115 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4116 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4117 	}
4118 	if (priv->plat->tx_coe)
4119 		dev_info(priv->device, "TX Checksum insertion supported\n");
4120 
4121 	if (priv->plat->pmt) {
4122 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4123 		device_set_wakeup_capable(priv->device, 1);
4124 	}
4125 
4126 	if (priv->dma_cap.tsoen)
4127 		dev_info(priv->device, "TSO supported\n");
4128 
4129 	return 0;
4130 }
4131 
4132 /**
4133  * stmmac_dvr_probe
4134  * @device: device pointer
4135  * @plat_dat: platform data pointer
4136  * @res: stmmac resource pointer
4137  * Description: this is the main probe function used to
4138  * call the alloc_etherdev, allocate the priv structure.
4139  * Return:
4140  * returns 0 on success, otherwise errno.
4141  */
4142 int stmmac_dvr_probe(struct device *device,
4143 		     struct plat_stmmacenet_data *plat_dat,
4144 		     struct stmmac_resources *res)
4145 {
4146 	struct net_device *ndev = NULL;
4147 	struct stmmac_priv *priv;
4148 	int ret = 0;
4149 	u32 queue;
4150 
4151 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4152 				  MTL_MAX_TX_QUEUES,
4153 				  MTL_MAX_RX_QUEUES);
4154 	if (!ndev)
4155 		return -ENOMEM;
4156 
4157 	SET_NETDEV_DEV(ndev, device);
4158 
4159 	priv = netdev_priv(ndev);
4160 	priv->device = device;
4161 	priv->dev = ndev;
4162 
4163 	stmmac_set_ethtool_ops(ndev);
4164 	priv->pause = pause;
4165 	priv->plat = plat_dat;
4166 	priv->ioaddr = res->addr;
4167 	priv->dev->base_addr = (unsigned long)res->addr;
4168 
4169 	priv->dev->irq = res->irq;
4170 	priv->wol_irq = res->wol_irq;
4171 	priv->lpi_irq = res->lpi_irq;
4172 
4173 	if (res->mac)
4174 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4175 
4176 	dev_set_drvdata(device, priv->dev);
4177 
4178 	/* Verify driver arguments */
4179 	stmmac_verify_args();
4180 
4181 	/* Allocate workqueue */
4182 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4183 	if (!priv->wq) {
4184 		dev_err(priv->device, "failed to create workqueue\n");
4185 		goto error_wq;
4186 	}
4187 
4188 	INIT_WORK(&priv->service_task, stmmac_service_task);
4189 
4190 	/* Override with kernel parameters if supplied XXX CRS XXX
4191 	 * this needs to have multiple instances
4192 	 */
4193 	if ((phyaddr >= 0) && (phyaddr <= 31))
4194 		priv->plat->phy_addr = phyaddr;
4195 
4196 	if (priv->plat->stmmac_rst) {
4197 		ret = reset_control_assert(priv->plat->stmmac_rst);
4198 		reset_control_deassert(priv->plat->stmmac_rst);
4199 		/* Some reset controllers have only reset callback instead of
4200 		 * assert + deassert callbacks pair.
4201 		 */
4202 		if (ret == -ENOTSUPP)
4203 			reset_control_reset(priv->plat->stmmac_rst);
4204 	}
4205 
4206 	/* Init MAC and get the capabilities */
4207 	ret = stmmac_hw_init(priv);
4208 	if (ret)
4209 		goto error_hw_init;
4210 
4211 	/* Configure real RX and TX queues */
4212 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4213 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4214 
4215 	ndev->netdev_ops = &stmmac_netdev_ops;
4216 
4217 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4218 			    NETIF_F_RXCSUM;
4219 
4220 	ret = stmmac_tc_init(priv, priv);
4221 	if (!ret) {
4222 		ndev->hw_features |= NETIF_F_HW_TC;
4223 	}
4224 
4225 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4226 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4227 		priv->tso = true;
4228 		dev_info(priv->device, "TSO feature enabled\n");
4229 	}
4230 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4231 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4232 #ifdef STMMAC_VLAN_TAG_USED
4233 	/* Both mac100 and gmac support receive VLAN tag detection */
4234 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4235 #endif
4236 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4237 
4238 	/* MTU range: 46 - hw-specific max */
4239 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4240 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4241 		ndev->max_mtu = JUMBO_LEN;
4242 	else
4243 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4244 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4245 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4246 	 */
4247 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4248 	    (priv->plat->maxmtu >= ndev->min_mtu))
4249 		ndev->max_mtu = priv->plat->maxmtu;
4250 	else if (priv->plat->maxmtu < ndev->min_mtu)
4251 		dev_warn(priv->device,
4252 			 "%s: warning: maxmtu having invalid value (%d)\n",
4253 			 __func__, priv->plat->maxmtu);
4254 
4255 	if (flow_ctrl)
4256 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4257 
4258 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4259 	 * In some case, for example on bugged HW this feature
4260 	 * has to be disable and this can be done by passing the
4261 	 * riwt_off field from the platform.
4262 	 */
4263 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4264 		priv->use_riwt = 1;
4265 		dev_info(priv->device,
4266 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4267 	}
4268 
4269 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4270 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4271 
4272 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4273 			       (8 * priv->plat->rx_queues_to_use));
4274 	}
4275 
4276 	mutex_init(&priv->lock);
4277 
4278 	/* If a specific clk_csr value is passed from the platform
4279 	 * this means that the CSR Clock Range selection cannot be
4280 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4281 	 * set the MDC clock dynamically according to the csr actual
4282 	 * clock input.
4283 	 */
4284 	if (!priv->plat->clk_csr)
4285 		stmmac_clk_csr_set(priv);
4286 	else
4287 		priv->clk_csr = priv->plat->clk_csr;
4288 
4289 	stmmac_check_pcs_mode(priv);
4290 
4291 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4292 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4293 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4294 		/* MDIO bus Registration */
4295 		ret = stmmac_mdio_register(ndev);
4296 		if (ret < 0) {
4297 			dev_err(priv->device,
4298 				"%s: MDIO bus (id: %d) registration failed",
4299 				__func__, priv->plat->bus_id);
4300 			goto error_mdio_register;
4301 		}
4302 	}
4303 
4304 	ret = register_netdev(ndev);
4305 	if (ret) {
4306 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4307 			__func__, ret);
4308 		goto error_netdev_register;
4309 	}
4310 
4311 	return ret;
4312 
4313 error_netdev_register:
4314 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4315 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4316 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4317 		stmmac_mdio_unregister(ndev);
4318 error_mdio_register:
4319 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4320 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4321 
4322 		netif_napi_del(&rx_q->napi);
4323 	}
4324 error_hw_init:
4325 	destroy_workqueue(priv->wq);
4326 error_wq:
4327 	free_netdev(ndev);
4328 
4329 	return ret;
4330 }
4331 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4332 
4333 /**
4334  * stmmac_dvr_remove
4335  * @dev: device pointer
4336  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4337  * changes the link status, releases the DMA descriptor rings.
4338  */
4339 int stmmac_dvr_remove(struct device *dev)
4340 {
4341 	struct net_device *ndev = dev_get_drvdata(dev);
4342 	struct stmmac_priv *priv = netdev_priv(ndev);
4343 
4344 	netdev_info(priv->dev, "%s: removing driver", __func__);
4345 
4346 	stmmac_stop_all_dma(priv);
4347 
4348 	stmmac_mac_set(priv, priv->ioaddr, false);
4349 	netif_carrier_off(ndev);
4350 	unregister_netdev(ndev);
4351 	if (priv->plat->stmmac_rst)
4352 		reset_control_assert(priv->plat->stmmac_rst);
4353 	clk_disable_unprepare(priv->plat->pclk);
4354 	clk_disable_unprepare(priv->plat->stmmac_clk);
4355 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4356 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4357 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4358 		stmmac_mdio_unregister(ndev);
4359 	destroy_workqueue(priv->wq);
4360 	mutex_destroy(&priv->lock);
4361 	free_netdev(ndev);
4362 
4363 	return 0;
4364 }
4365 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4366 
4367 /**
4368  * stmmac_suspend - suspend callback
4369  * @dev: device pointer
4370  * Description: this is the function to suspend the device and it is called
4371  * by the platform driver to stop the network queue, release the resources,
4372  * program the PMT register (for WoL), clean and release driver resources.
4373  */
4374 int stmmac_suspend(struct device *dev)
4375 {
4376 	struct net_device *ndev = dev_get_drvdata(dev);
4377 	struct stmmac_priv *priv = netdev_priv(ndev);
4378 
4379 	if (!ndev || !netif_running(ndev))
4380 		return 0;
4381 
4382 	if (ndev->phydev)
4383 		phy_stop(ndev->phydev);
4384 
4385 	mutex_lock(&priv->lock);
4386 
4387 	netif_device_detach(ndev);
4388 	stmmac_stop_all_queues(priv);
4389 
4390 	stmmac_disable_all_queues(priv);
4391 
4392 	/* Stop TX/RX DMA */
4393 	stmmac_stop_all_dma(priv);
4394 
4395 	/* Enable Power down mode by programming the PMT regs */
4396 	if (device_may_wakeup(priv->device)) {
4397 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4398 		priv->irq_wake = 1;
4399 	} else {
4400 		stmmac_mac_set(priv, priv->ioaddr, false);
4401 		pinctrl_pm_select_sleep_state(priv->device);
4402 		/* Disable clock in case of PWM is off */
4403 		clk_disable(priv->plat->pclk);
4404 		clk_disable(priv->plat->stmmac_clk);
4405 	}
4406 	mutex_unlock(&priv->lock);
4407 
4408 	priv->oldlink = false;
4409 	priv->speed = SPEED_UNKNOWN;
4410 	priv->oldduplex = DUPLEX_UNKNOWN;
4411 	return 0;
4412 }
4413 EXPORT_SYMBOL_GPL(stmmac_suspend);
4414 
4415 /**
4416  * stmmac_reset_queues_param - reset queue parameters
4417  * @dev: device pointer
4418  */
4419 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4420 {
4421 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4422 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4423 	u32 queue;
4424 
4425 	for (queue = 0; queue < rx_cnt; queue++) {
4426 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4427 
4428 		rx_q->cur_rx = 0;
4429 		rx_q->dirty_rx = 0;
4430 	}
4431 
4432 	for (queue = 0; queue < tx_cnt; queue++) {
4433 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4434 
4435 		tx_q->cur_tx = 0;
4436 		tx_q->dirty_tx = 0;
4437 		tx_q->mss = 0;
4438 	}
4439 }
4440 
4441 /**
4442  * stmmac_resume - resume callback
4443  * @dev: device pointer
4444  * Description: when resume this function is invoked to setup the DMA and CORE
4445  * in a usable state.
4446  */
4447 int stmmac_resume(struct device *dev)
4448 {
4449 	struct net_device *ndev = dev_get_drvdata(dev);
4450 	struct stmmac_priv *priv = netdev_priv(ndev);
4451 
4452 	if (!netif_running(ndev))
4453 		return 0;
4454 
4455 	/* Power Down bit, into the PM register, is cleared
4456 	 * automatically as soon as a magic packet or a Wake-up frame
4457 	 * is received. Anyway, it's better to manually clear
4458 	 * this bit because it can generate problems while resuming
4459 	 * from another devices (e.g. serial console).
4460 	 */
4461 	if (device_may_wakeup(priv->device)) {
4462 		mutex_lock(&priv->lock);
4463 		stmmac_pmt(priv, priv->hw, 0);
4464 		mutex_unlock(&priv->lock);
4465 		priv->irq_wake = 0;
4466 	} else {
4467 		pinctrl_pm_select_default_state(priv->device);
4468 		/* enable the clk previously disabled */
4469 		clk_enable(priv->plat->stmmac_clk);
4470 		clk_enable(priv->plat->pclk);
4471 		/* reset the phy so that it's ready */
4472 		if (priv->mii)
4473 			stmmac_mdio_reset(priv->mii);
4474 	}
4475 
4476 	netif_device_attach(ndev);
4477 
4478 	mutex_lock(&priv->lock);
4479 
4480 	stmmac_reset_queues_param(priv);
4481 
4482 	stmmac_clear_descriptors(priv);
4483 
4484 	stmmac_hw_setup(ndev, false);
4485 	stmmac_init_tx_coalesce(priv);
4486 	stmmac_set_rx_mode(ndev);
4487 
4488 	stmmac_enable_all_queues(priv);
4489 
4490 	stmmac_start_all_queues(priv);
4491 
4492 	mutex_unlock(&priv->lock);
4493 
4494 	if (ndev->phydev)
4495 		phy_start(ndev->phydev);
4496 
4497 	return 0;
4498 }
4499 EXPORT_SYMBOL_GPL(stmmac_resume);
4500 
4501 #ifndef MODULE
4502 static int __init stmmac_cmdline_opt(char *str)
4503 {
4504 	char *opt;
4505 
4506 	if (!str || !*str)
4507 		return -EINVAL;
4508 	while ((opt = strsep(&str, ",")) != NULL) {
4509 		if (!strncmp(opt, "debug:", 6)) {
4510 			if (kstrtoint(opt + 6, 0, &debug))
4511 				goto err;
4512 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4513 			if (kstrtoint(opt + 8, 0, &phyaddr))
4514 				goto err;
4515 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4516 			if (kstrtoint(opt + 7, 0, &buf_sz))
4517 				goto err;
4518 		} else if (!strncmp(opt, "tc:", 3)) {
4519 			if (kstrtoint(opt + 3, 0, &tc))
4520 				goto err;
4521 		} else if (!strncmp(opt, "watchdog:", 9)) {
4522 			if (kstrtoint(opt + 9, 0, &watchdog))
4523 				goto err;
4524 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4525 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4526 				goto err;
4527 		} else if (!strncmp(opt, "pause:", 6)) {
4528 			if (kstrtoint(opt + 6, 0, &pause))
4529 				goto err;
4530 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4531 			if (kstrtoint(opt + 10, 0, &eee_timer))
4532 				goto err;
4533 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4534 			if (kstrtoint(opt + 11, 0, &chain_mode))
4535 				goto err;
4536 		}
4537 	}
4538 	return 0;
4539 
4540 err:
4541 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4542 	return -EINVAL;
4543 }
4544 
4545 __setup("stmmaceth=", stmmac_cmdline_opt);
4546 #endif /* MODULE */
4547 
4548 static int __init stmmac_init(void)
4549 {
4550 #ifdef CONFIG_DEBUG_FS
4551 	/* Create debugfs main directory if it doesn't exist yet */
4552 	if (!stmmac_fs_dir) {
4553 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4554 
4555 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4556 			pr_err("ERROR %s, debugfs create directory failed\n",
4557 			       STMMAC_RESOURCE_NAME);
4558 
4559 			return -ENOMEM;
4560 		}
4561 	}
4562 #endif
4563 
4564 	return 0;
4565 }
4566 
4567 static void __exit stmmac_exit(void)
4568 {
4569 #ifdef CONFIG_DEBUG_FS
4570 	debugfs_remove_recursive(stmmac_fs_dir);
4571 #endif
4572 }
4573 
4574 module_init(stmmac_init)
4575 module_exit(stmmac_exit)
4576 
4577 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4578 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4579 MODULE_LICENSE("GPL");
4580