xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	bool ret = false;
385 
386 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388 	    !phy_interface_mode_is_rgmii(interface))
389 		goto out;
390 
391 	/* Using PCS we cannot dial with the phy registers at this stage
392 	 * so we do not support extra feature like EEE.
393 	 */
394 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397 		goto out;
398 
399 	/* MAC core supports the EEE feature. */
400 	if (priv->dma_cap.eee) {
401 		int tx_lpi_timer = priv->tx_lpi_timer;
402 
403 		/* Check if the PHY supports EEE */
404 		if (phy_init_eee(ndev->phydev, 1)) {
405 			/* To manage at run-time if the EEE cannot be supported
406 			 * anymore (for example because the lp caps have been
407 			 * changed).
408 			 * In that case the driver disable own timers.
409 			 */
410 			mutex_lock(&priv->lock);
411 			if (priv->eee_active) {
412 				netdev_dbg(priv->dev, "disable EEE\n");
413 				del_timer_sync(&priv->eee_ctrl_timer);
414 				stmmac_set_eee_timer(priv, priv->hw, 0,
415 						tx_lpi_timer);
416 			}
417 			priv->eee_active = 0;
418 			mutex_unlock(&priv->lock);
419 			goto out;
420 		}
421 		/* Activate the EEE and start timers */
422 		mutex_lock(&priv->lock);
423 		if (!priv->eee_active) {
424 			priv->eee_active = 1;
425 			timer_setup(&priv->eee_ctrl_timer,
426 				    stmmac_eee_ctrl_timer, 0);
427 			mod_timer(&priv->eee_ctrl_timer,
428 				  STMMAC_LPI_T(eee_timer));
429 
430 			stmmac_set_eee_timer(priv, priv->hw,
431 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 		}
433 		/* Set HW EEE according to the speed */
434 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435 
436 		ret = true;
437 		mutex_unlock(&priv->lock);
438 
439 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 	}
441 out:
442 	return ret;
443 }
444 
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 				   struct dma_desc *p, struct sk_buff *skb)
455 {
456 	struct skb_shared_hwtstamps shhwtstamp;
457 	u64 ns;
458 
459 	if (!priv->hwts_tx_en)
460 		return;
461 
462 	/* exit if skb doesn't support hw tstamp */
463 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 		return;
465 
466 	/* check tx tstamp status */
467 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468 		/* get the valid tstamp */
469 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470 
471 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473 
474 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475 		/* pass tstamp to stack */
476 		skb_tstamp_tx(skb, &shhwtstamp);
477 	}
478 
479 	return;
480 }
481 
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 				   struct dma_desc *np, struct sk_buff *skb)
493 {
494 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 	struct dma_desc *desc = p;
496 	u64 ns;
497 
498 	if (!priv->hwts_rx_en)
499 		return;
500 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501 	if (priv->plat->has_gmac4)
502 		desc = np;
503 
504 	/* Check if timestamp is available */
505 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 		shhwtstamp = skb_hwtstamps(skb);
509 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 	} else  {
512 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 	}
514 }
515 
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529 	struct stmmac_priv *priv = netdev_priv(dev);
530 	struct hwtstamp_config config;
531 	struct timespec64 now;
532 	u64 temp = 0;
533 	u32 ptp_v2 = 0;
534 	u32 tstamp_all = 0;
535 	u32 ptp_over_ipv4_udp = 0;
536 	u32 ptp_over_ipv6_udp = 0;
537 	u32 ptp_over_ethernet = 0;
538 	u32 snap_type_sel = 0;
539 	u32 ts_master_en = 0;
540 	u32 ts_event_en = 0;
541 	u32 value = 0;
542 	u32 sec_inc;
543 
544 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546 		priv->hwts_tx_en = 0;
547 		priv->hwts_rx_en = 0;
548 
549 		return -EOPNOTSUPP;
550 	}
551 
552 	if (copy_from_user(&config, ifr->ifr_data,
553 			   sizeof(struct hwtstamp_config)))
554 		return -EFAULT;
555 
556 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 		   __func__, config.flags, config.tx_type, config.rx_filter);
558 
559 	/* reserved for future extensions */
560 	if (config.flags)
561 		return -EINVAL;
562 
563 	if (config.tx_type != HWTSTAMP_TX_OFF &&
564 	    config.tx_type != HWTSTAMP_TX_ON)
565 		return -ERANGE;
566 
567 	if (priv->adv_ts) {
568 		switch (config.rx_filter) {
569 		case HWTSTAMP_FILTER_NONE:
570 			/* time stamp no incoming packet at all */
571 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572 			break;
573 
574 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575 			/* PTP v1, UDP, any kind of event packet */
576 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 			/* take time stamp for all event messages */
578 			if (priv->plat->has_gmac4)
579 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 			else
581 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588 			/* PTP v1, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590 			/* take time stamp for SYNC messages only */
591 			ts_event_en = PTP_TCR_TSEVNTENA;
592 
593 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 			break;
596 
597 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598 			/* PTP v1, UDP, Delay_req packet */
599 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600 			/* take time stamp for Delay_Req messages only */
601 			ts_master_en = PTP_TCR_TSMSTRENA;
602 			ts_event_en = PTP_TCR_TSEVNTENA;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609 			/* PTP v2, UDP, any kind of event packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for all event messages */
613 			if (priv->plat->has_gmac4)
614 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 			else
616 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617 
618 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623 			/* PTP v2, UDP, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634 			/* PTP v2, UDP, Delay_req packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for Delay_Req messages only */
638 			ts_master_en = PTP_TCR_TSMSTRENA;
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for all event messages */
650 			if (priv->plat->has_gmac4)
651 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 			else
653 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 
655 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661 			/* PTP v2/802.AS1, any layer, Sync packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for SYNC messages only */
665 			ts_event_en = PTP_TCR_TSEVNTENA;
666 
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			/* take time stamp for Delay_Req messages only */
677 			ts_master_en = PTP_TCR_TSMSTRENA;
678 			ts_event_en = PTP_TCR_TSEVNTENA;
679 
680 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_NTP_ALL:
686 		case HWTSTAMP_FILTER_ALL:
687 			/* time stamp any incoming packet */
688 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689 			tstamp_all = PTP_TCR_TSENALL;
690 			break;
691 
692 		default:
693 			return -ERANGE;
694 		}
695 	} else {
696 		switch (config.rx_filter) {
697 		case HWTSTAMP_FILTER_NONE:
698 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699 			break;
700 		default:
701 			/* PTP v1, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 			break;
704 		}
705 	}
706 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708 
709 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 	else {
712 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 			 ts_master_en | snap_type_sel);
716 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717 
718 		/* program Sub Second Increment reg */
719 		stmmac_config_sub_second_increment(priv,
720 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721 				priv->plat->has_gmac4, &sec_inc);
722 		temp = div_u64(1000000000ULL, sec_inc);
723 
724 		/* Store sub second increment and flags for later use */
725 		priv->sub_second_inc = sec_inc;
726 		priv->systime_flags = value;
727 
728 		/* calculate default added value:
729 		 * formula is :
730 		 * addend = (2^32)/freq_div_ratio;
731 		 * where, freq_div_ratio = 1e9ns/sec_inc
732 		 */
733 		temp = (u64)(temp << 32);
734 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736 
737 		/* initialize system time */
738 		ktime_get_real_ts64(&now);
739 
740 		/* lower 32 bits of tv_sec are safe until y2106 */
741 		stmmac_init_systime(priv, priv->ptpaddr,
742 				(u32)now.tv_sec, now.tv_nsec);
743 	}
744 
745 	return copy_to_user(ifr->ifr_data, &config,
746 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747 }
748 
749 /**
750  * stmmac_init_ptp - init PTP
751  * @priv: driver private structure
752  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
753  * This is done by looking at the HW cap. register.
754  * This function also registers the ptp driver.
755  */
756 static int stmmac_init_ptp(struct stmmac_priv *priv)
757 {
758 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759 		return -EOPNOTSUPP;
760 
761 	priv->adv_ts = 0;
762 	/* Check if adv_ts can be enabled for dwmac 4.x core */
763 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764 		priv->adv_ts = 1;
765 	/* Dwmac 3.x core with extend_desc can support adv_ts */
766 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767 		priv->adv_ts = 1;
768 
769 	if (priv->dma_cap.time_stamp)
770 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771 
772 	if (priv->adv_ts)
773 		netdev_info(priv->dev,
774 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
775 
776 	priv->hwts_tx_en = 0;
777 	priv->hwts_rx_en = 0;
778 
779 	stmmac_ptp_register(priv);
780 
781 	return 0;
782 }
783 
784 static void stmmac_release_ptp(struct stmmac_priv *priv)
785 {
786 	if (priv->plat->clk_ptp_ref)
787 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
788 	stmmac_ptp_unregister(priv);
789 }
790 
791 /**
792  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
793  *  @priv: driver private structure
794  *  Description: It is used for configuring the flow control in all queues
795  */
796 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797 {
798 	u32 tx_cnt = priv->plat->tx_queues_to_use;
799 
800 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801 			priv->pause, tx_cnt);
802 }
803 
804 /**
805  * stmmac_adjust_link - adjusts the link parameters
806  * @dev: net device structure
807  * Description: this is the helper called by the physical abstraction layer
808  * drivers to communicate the phy link status. According the speed and duplex
809  * this driver can invoke registered glue-logic as well.
810  * It also invoke the eee initialization because it could happen when switch
811  * on different networks (that are eee capable).
812  */
813 static void stmmac_adjust_link(struct net_device *dev)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct phy_device *phydev = dev->phydev;
817 	bool new_state = false;
818 
819 	if (!phydev)
820 		return;
821 
822 	mutex_lock(&priv->lock);
823 
824 	if (phydev->link) {
825 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826 
827 		/* Now we make sure that we can be in full duplex mode.
828 		 * If not, we operate in half-duplex mode. */
829 		if (phydev->duplex != priv->oldduplex) {
830 			new_state = true;
831 			if (!phydev->duplex)
832 				ctrl &= ~priv->hw->link.duplex;
833 			else
834 				ctrl |= priv->hw->link.duplex;
835 			priv->oldduplex = phydev->duplex;
836 		}
837 		/* Flow Control operation */
838 		if (phydev->pause)
839 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
840 
841 		if (phydev->speed != priv->speed) {
842 			new_state = true;
843 			ctrl &= ~priv->hw->link.speed_mask;
844 			switch (phydev->speed) {
845 			case SPEED_1000:
846 				ctrl |= priv->hw->link.speed1000;
847 				break;
848 			case SPEED_100:
849 				ctrl |= priv->hw->link.speed100;
850 				break;
851 			case SPEED_10:
852 				ctrl |= priv->hw->link.speed10;
853 				break;
854 			default:
855 				netif_warn(priv, link, priv->dev,
856 					   "broken speed: %d\n", phydev->speed);
857 				phydev->speed = SPEED_UNKNOWN;
858 				break;
859 			}
860 			if (phydev->speed != SPEED_UNKNOWN)
861 				stmmac_hw_fix_mac_speed(priv);
862 			priv->speed = phydev->speed;
863 		}
864 
865 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866 
867 		if (!priv->oldlink) {
868 			new_state = true;
869 			priv->oldlink = true;
870 		}
871 	} else if (priv->oldlink) {
872 		new_state = true;
873 		priv->oldlink = false;
874 		priv->speed = SPEED_UNKNOWN;
875 		priv->oldduplex = DUPLEX_UNKNOWN;
876 	}
877 
878 	if (new_state && netif_msg_link(priv))
879 		phy_print_status(phydev);
880 
881 	mutex_unlock(&priv->lock);
882 
883 	if (phydev->is_pseudo_fixed_link)
884 		/* Stop PHY layer to call the hook to adjust the link in case
885 		 * of a switch is attached to the stmmac driver.
886 		 */
887 		phydev->irq = PHY_IGNORE_INTERRUPT;
888 	else
889 		/* At this stage, init the EEE if supported.
890 		 * Never called in case of fixed_link.
891 		 */
892 		priv->eee_enabled = stmmac_eee_init(priv);
893 }
894 
895 /**
896  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
897  * @priv: driver private structure
898  * Description: this is to verify if the HW supports the PCS.
899  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
900  * configured for the TBI, RTBI, or SGMII PHY interface.
901  */
902 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903 {
904 	int interface = priv->plat->interface;
905 
906 	if (priv->dma_cap.pcs) {
907 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912 			priv->hw->pcs = STMMAC_PCS_RGMII;
913 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
914 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915 			priv->hw->pcs = STMMAC_PCS_SGMII;
916 		}
917 	}
918 }
919 
920 /**
921  * stmmac_init_phy - PHY initialization
922  * @dev: net device structure
923  * Description: it initializes the driver's PHY state, and attaches the PHY
924  * to the mac driver.
925  *  Return value:
926  *  0 on success
927  */
928 static int stmmac_init_phy(struct net_device *dev)
929 {
930 	struct stmmac_priv *priv = netdev_priv(dev);
931 	struct phy_device *phydev;
932 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
933 	char bus_id[MII_BUS_ID_SIZE];
934 	int interface = priv->plat->interface;
935 	int max_speed = priv->plat->max_speed;
936 	priv->oldlink = false;
937 	priv->speed = SPEED_UNKNOWN;
938 	priv->oldduplex = DUPLEX_UNKNOWN;
939 
940 	if (priv->plat->phy_node) {
941 		phydev = of_phy_connect(dev, priv->plat->phy_node,
942 					&stmmac_adjust_link, 0, interface);
943 	} else {
944 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
945 			 priv->plat->bus_id);
946 
947 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
948 			 priv->plat->phy_addr);
949 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
950 			   phy_id_fmt);
951 
952 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
953 				     interface);
954 	}
955 
956 	if (IS_ERR_OR_NULL(phydev)) {
957 		netdev_err(priv->dev, "Could not attach to PHY\n");
958 		if (!phydev)
959 			return -ENODEV;
960 
961 		return PTR_ERR(phydev);
962 	}
963 
964 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
965 	if ((interface == PHY_INTERFACE_MODE_MII) ||
966 	    (interface == PHY_INTERFACE_MODE_RMII) ||
967 		(max_speed < 1000 && max_speed > 0))
968 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
969 					 SUPPORTED_1000baseT_Full);
970 
971 	/*
972 	 * Broken HW is sometimes missing the pull-up resistor on the
973 	 * MDIO line, which results in reads to non-existent devices returning
974 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
975 	 * device as well.
976 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
977 	 */
978 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
979 		phy_disconnect(phydev);
980 		return -ENODEV;
981 	}
982 
983 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
984 	 * subsequent PHY polling, make sure we force a link transition if
985 	 * we have a UP/DOWN/UP transition
986 	 */
987 	if (phydev->is_pseudo_fixed_link)
988 		phydev->irq = PHY_POLL;
989 
990 	phy_attached_info(phydev);
991 	return 0;
992 }
993 
994 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
995 {
996 	u32 rx_cnt = priv->plat->rx_queues_to_use;
997 	void *head_rx;
998 	u32 queue;
999 
1000 	/* Display RX rings */
1001 	for (queue = 0; queue < rx_cnt; queue++) {
1002 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1003 
1004 		pr_info("\tRX Queue %u rings\n", queue);
1005 
1006 		if (priv->extend_desc)
1007 			head_rx = (void *)rx_q->dma_erx;
1008 		else
1009 			head_rx = (void *)rx_q->dma_rx;
1010 
1011 		/* Display RX ring */
1012 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1013 	}
1014 }
1015 
1016 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1017 {
1018 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1019 	void *head_tx;
1020 	u32 queue;
1021 
1022 	/* Display TX rings */
1023 	for (queue = 0; queue < tx_cnt; queue++) {
1024 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1025 
1026 		pr_info("\tTX Queue %d rings\n", queue);
1027 
1028 		if (priv->extend_desc)
1029 			head_tx = (void *)tx_q->dma_etx;
1030 		else
1031 			head_tx = (void *)tx_q->dma_tx;
1032 
1033 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1034 	}
1035 }
1036 
1037 static void stmmac_display_rings(struct stmmac_priv *priv)
1038 {
1039 	/* Display RX ring */
1040 	stmmac_display_rx_rings(priv);
1041 
1042 	/* Display TX ring */
1043 	stmmac_display_tx_rings(priv);
1044 }
1045 
1046 static int stmmac_set_bfsize(int mtu, int bufsize)
1047 {
1048 	int ret = bufsize;
1049 
1050 	if (mtu >= BUF_SIZE_4KiB)
1051 		ret = BUF_SIZE_8KiB;
1052 	else if (mtu >= BUF_SIZE_2KiB)
1053 		ret = BUF_SIZE_4KiB;
1054 	else if (mtu > DEFAULT_BUFSIZE)
1055 		ret = BUF_SIZE_2KiB;
1056 	else
1057 		ret = DEFAULT_BUFSIZE;
1058 
1059 	return ret;
1060 }
1061 
1062 /**
1063  * stmmac_clear_rx_descriptors - clear RX descriptors
1064  * @priv: driver private structure
1065  * @queue: RX queue index
1066  * Description: this function is called to clear the RX descriptors
1067  * in case of both basic and extended descriptors are used.
1068  */
1069 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1070 {
1071 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1072 	int i;
1073 
1074 	/* Clear the RX descriptors */
1075 	for (i = 0; i < DMA_RX_SIZE; i++)
1076 		if (priv->extend_desc)
1077 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1078 					priv->use_riwt, priv->mode,
1079 					(i == DMA_RX_SIZE - 1));
1080 		else
1081 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1082 					priv->use_riwt, priv->mode,
1083 					(i == DMA_RX_SIZE - 1));
1084 }
1085 
1086 /**
1087  * stmmac_clear_tx_descriptors - clear tx descriptors
1088  * @priv: driver private structure
1089  * @queue: TX queue index.
1090  * Description: this function is called to clear the TX descriptors
1091  * in case of both basic and extended descriptors are used.
1092  */
1093 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1094 {
1095 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1096 	int i;
1097 
1098 	/* Clear the TX descriptors */
1099 	for (i = 0; i < DMA_TX_SIZE; i++)
1100 		if (priv->extend_desc)
1101 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1102 					priv->mode, (i == DMA_TX_SIZE - 1));
1103 		else
1104 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1105 					priv->mode, (i == DMA_TX_SIZE - 1));
1106 }
1107 
1108 /**
1109  * stmmac_clear_descriptors - clear descriptors
1110  * @priv: driver private structure
1111  * Description: this function is called to clear the TX and RX descriptors
1112  * in case of both basic and extended descriptors are used.
1113  */
1114 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1115 {
1116 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1117 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1118 	u32 queue;
1119 
1120 	/* Clear the RX descriptors */
1121 	for (queue = 0; queue < rx_queue_cnt; queue++)
1122 		stmmac_clear_rx_descriptors(priv, queue);
1123 
1124 	/* Clear the TX descriptors */
1125 	for (queue = 0; queue < tx_queue_cnt; queue++)
1126 		stmmac_clear_tx_descriptors(priv, queue);
1127 }
1128 
1129 /**
1130  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1131  * @priv: driver private structure
1132  * @p: descriptor pointer
1133  * @i: descriptor index
1134  * @flags: gfp flag
1135  * @queue: RX queue index
1136  * Description: this function is called to allocate a receive buffer, perform
1137  * the DMA mapping and init the descriptor.
1138  */
1139 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1140 				  int i, gfp_t flags, u32 queue)
1141 {
1142 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1143 	struct sk_buff *skb;
1144 
1145 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1146 	if (!skb) {
1147 		netdev_err(priv->dev,
1148 			   "%s: Rx init fails; skb is NULL\n", __func__);
1149 		return -ENOMEM;
1150 	}
1151 	rx_q->rx_skbuff[i] = skb;
1152 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1153 						priv->dma_buf_sz,
1154 						DMA_FROM_DEVICE);
1155 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1156 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1157 		dev_kfree_skb_any(skb);
1158 		return -EINVAL;
1159 	}
1160 
1161 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1162 
1163 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1164 		stmmac_init_desc3(priv, p);
1165 
1166 	return 0;
1167 }
1168 
1169 /**
1170  * stmmac_free_rx_buffer - free RX dma buffers
1171  * @priv: private structure
1172  * @queue: RX queue index
1173  * @i: buffer index.
1174  */
1175 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1176 {
1177 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1178 
1179 	if (rx_q->rx_skbuff[i]) {
1180 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1181 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1182 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1183 	}
1184 	rx_q->rx_skbuff[i] = NULL;
1185 }
1186 
1187 /**
1188  * stmmac_free_tx_buffer - free RX dma buffers
1189  * @priv: private structure
1190  * @queue: RX queue index
1191  * @i: buffer index.
1192  */
1193 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1194 {
1195 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1196 
1197 	if (tx_q->tx_skbuff_dma[i].buf) {
1198 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1199 			dma_unmap_page(priv->device,
1200 				       tx_q->tx_skbuff_dma[i].buf,
1201 				       tx_q->tx_skbuff_dma[i].len,
1202 				       DMA_TO_DEVICE);
1203 		else
1204 			dma_unmap_single(priv->device,
1205 					 tx_q->tx_skbuff_dma[i].buf,
1206 					 tx_q->tx_skbuff_dma[i].len,
1207 					 DMA_TO_DEVICE);
1208 	}
1209 
1210 	if (tx_q->tx_skbuff[i]) {
1211 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1212 		tx_q->tx_skbuff[i] = NULL;
1213 		tx_q->tx_skbuff_dma[i].buf = 0;
1214 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1215 	}
1216 }
1217 
1218 /**
1219  * init_dma_rx_desc_rings - init the RX descriptor rings
1220  * @dev: net device structure
1221  * @flags: gfp flag.
1222  * Description: this function initializes the DMA RX descriptors
1223  * and allocates the socket buffers. It supports the chained and ring
1224  * modes.
1225  */
1226 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1227 {
1228 	struct stmmac_priv *priv = netdev_priv(dev);
1229 	u32 rx_count = priv->plat->rx_queues_to_use;
1230 	int ret = -ENOMEM;
1231 	int bfsize = 0;
1232 	int queue;
1233 	int i;
1234 
1235 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1236 	if (bfsize < 0)
1237 		bfsize = 0;
1238 
1239 	if (bfsize < BUF_SIZE_16KiB)
1240 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1241 
1242 	priv->dma_buf_sz = bfsize;
1243 
1244 	/* RX INITIALIZATION */
1245 	netif_dbg(priv, probe, priv->dev,
1246 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1247 
1248 	for (queue = 0; queue < rx_count; queue++) {
1249 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1250 
1251 		netif_dbg(priv, probe, priv->dev,
1252 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1253 			  (u32)rx_q->dma_rx_phy);
1254 
1255 		for (i = 0; i < DMA_RX_SIZE; i++) {
1256 			struct dma_desc *p;
1257 
1258 			if (priv->extend_desc)
1259 				p = &((rx_q->dma_erx + i)->basic);
1260 			else
1261 				p = rx_q->dma_rx + i;
1262 
1263 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1264 						     queue);
1265 			if (ret)
1266 				goto err_init_rx_buffers;
1267 
1268 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1269 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1270 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1271 		}
1272 
1273 		rx_q->cur_rx = 0;
1274 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1275 
1276 		stmmac_clear_rx_descriptors(priv, queue);
1277 
1278 		/* Setup the chained descriptor addresses */
1279 		if (priv->mode == STMMAC_CHAIN_MODE) {
1280 			if (priv->extend_desc)
1281 				stmmac_mode_init(priv, rx_q->dma_erx,
1282 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1283 			else
1284 				stmmac_mode_init(priv, rx_q->dma_rx,
1285 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1286 		}
1287 	}
1288 
1289 	buf_sz = bfsize;
1290 
1291 	return 0;
1292 
1293 err_init_rx_buffers:
1294 	while (queue >= 0) {
1295 		while (--i >= 0)
1296 			stmmac_free_rx_buffer(priv, queue, i);
1297 
1298 		if (queue == 0)
1299 			break;
1300 
1301 		i = DMA_RX_SIZE;
1302 		queue--;
1303 	}
1304 
1305 	return ret;
1306 }
1307 
1308 /**
1309  * init_dma_tx_desc_rings - init the TX descriptor rings
1310  * @dev: net device structure.
1311  * Description: this function initializes the DMA TX descriptors
1312  * and allocates the socket buffers. It supports the chained and ring
1313  * modes.
1314  */
1315 static int init_dma_tx_desc_rings(struct net_device *dev)
1316 {
1317 	struct stmmac_priv *priv = netdev_priv(dev);
1318 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1319 	u32 queue;
1320 	int i;
1321 
1322 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1323 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1324 
1325 		netif_dbg(priv, probe, priv->dev,
1326 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1327 			 (u32)tx_q->dma_tx_phy);
1328 
1329 		/* Setup the chained descriptor addresses */
1330 		if (priv->mode == STMMAC_CHAIN_MODE) {
1331 			if (priv->extend_desc)
1332 				stmmac_mode_init(priv, tx_q->dma_etx,
1333 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1334 			else
1335 				stmmac_mode_init(priv, tx_q->dma_tx,
1336 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1337 		}
1338 
1339 		for (i = 0; i < DMA_TX_SIZE; i++) {
1340 			struct dma_desc *p;
1341 			if (priv->extend_desc)
1342 				p = &((tx_q->dma_etx + i)->basic);
1343 			else
1344 				p = tx_q->dma_tx + i;
1345 
1346 			stmmac_clear_desc(priv, p);
1347 
1348 			tx_q->tx_skbuff_dma[i].buf = 0;
1349 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1350 			tx_q->tx_skbuff_dma[i].len = 0;
1351 			tx_q->tx_skbuff_dma[i].last_segment = false;
1352 			tx_q->tx_skbuff[i] = NULL;
1353 		}
1354 
1355 		tx_q->dirty_tx = 0;
1356 		tx_q->cur_tx = 0;
1357 		tx_q->mss = 0;
1358 
1359 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 /**
1366  * init_dma_desc_rings - init the RX/TX descriptor rings
1367  * @dev: net device structure
1368  * @flags: gfp flag.
1369  * Description: this function initializes the DMA RX/TX descriptors
1370  * and allocates the socket buffers. It supports the chained and ring
1371  * modes.
1372  */
1373 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1374 {
1375 	struct stmmac_priv *priv = netdev_priv(dev);
1376 	int ret;
1377 
1378 	ret = init_dma_rx_desc_rings(dev, flags);
1379 	if (ret)
1380 		return ret;
1381 
1382 	ret = init_dma_tx_desc_rings(dev);
1383 
1384 	stmmac_clear_descriptors(priv);
1385 
1386 	if (netif_msg_hw(priv))
1387 		stmmac_display_rings(priv);
1388 
1389 	return ret;
1390 }
1391 
1392 /**
1393  * dma_free_rx_skbufs - free RX dma buffers
1394  * @priv: private structure
1395  * @queue: RX queue index
1396  */
1397 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1398 {
1399 	int i;
1400 
1401 	for (i = 0; i < DMA_RX_SIZE; i++)
1402 		stmmac_free_rx_buffer(priv, queue, i);
1403 }
1404 
1405 /**
1406  * dma_free_tx_skbufs - free TX dma buffers
1407  * @priv: private structure
1408  * @queue: TX queue index
1409  */
1410 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412 	int i;
1413 
1414 	for (i = 0; i < DMA_TX_SIZE; i++)
1415 		stmmac_free_tx_buffer(priv, queue, i);
1416 }
1417 
1418 /**
1419  * free_dma_rx_desc_resources - free RX dma desc resources
1420  * @priv: private structure
1421  */
1422 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1423 {
1424 	u32 rx_count = priv->plat->rx_queues_to_use;
1425 	u32 queue;
1426 
1427 	/* Free RX queue resources */
1428 	for (queue = 0; queue < rx_count; queue++) {
1429 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1430 
1431 		/* Release the DMA RX socket buffers */
1432 		dma_free_rx_skbufs(priv, queue);
1433 
1434 		/* Free DMA regions of consistent memory previously allocated */
1435 		if (!priv->extend_desc)
1436 			dma_free_coherent(priv->device,
1437 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1438 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1439 		else
1440 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1441 					  sizeof(struct dma_extended_desc),
1442 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1443 
1444 		kfree(rx_q->rx_skbuff_dma);
1445 		kfree(rx_q->rx_skbuff);
1446 	}
1447 }
1448 
1449 /**
1450  * free_dma_tx_desc_resources - free TX dma desc resources
1451  * @priv: private structure
1452  */
1453 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1454 {
1455 	u32 tx_count = priv->plat->tx_queues_to_use;
1456 	u32 queue;
1457 
1458 	/* Free TX queue resources */
1459 	for (queue = 0; queue < tx_count; queue++) {
1460 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1461 
1462 		/* Release the DMA TX socket buffers */
1463 		dma_free_tx_skbufs(priv, queue);
1464 
1465 		/* Free DMA regions of consistent memory previously allocated */
1466 		if (!priv->extend_desc)
1467 			dma_free_coherent(priv->device,
1468 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1469 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1470 		else
1471 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1472 					  sizeof(struct dma_extended_desc),
1473 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1474 
1475 		kfree(tx_q->tx_skbuff_dma);
1476 		kfree(tx_q->tx_skbuff);
1477 	}
1478 }
1479 
1480 /**
1481  * alloc_dma_rx_desc_resources - alloc RX resources.
1482  * @priv: private structure
1483  * Description: according to which descriptor can be used (extend or basic)
1484  * this function allocates the resources for TX and RX paths. In case of
1485  * reception, for example, it pre-allocated the RX socket buffer in order to
1486  * allow zero-copy mechanism.
1487  */
1488 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1489 {
1490 	u32 rx_count = priv->plat->rx_queues_to_use;
1491 	int ret = -ENOMEM;
1492 	u32 queue;
1493 
1494 	/* RX queues buffers and DMA */
1495 	for (queue = 0; queue < rx_count; queue++) {
1496 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1497 
1498 		rx_q->queue_index = queue;
1499 		rx_q->priv_data = priv;
1500 
1501 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1502 						    sizeof(dma_addr_t),
1503 						    GFP_KERNEL);
1504 		if (!rx_q->rx_skbuff_dma)
1505 			goto err_dma;
1506 
1507 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1508 						sizeof(struct sk_buff *),
1509 						GFP_KERNEL);
1510 		if (!rx_q->rx_skbuff)
1511 			goto err_dma;
1512 
1513 		if (priv->extend_desc) {
1514 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1515 							    DMA_RX_SIZE *
1516 							    sizeof(struct
1517 							    dma_extended_desc),
1518 							    &rx_q->dma_rx_phy,
1519 							    GFP_KERNEL);
1520 			if (!rx_q->dma_erx)
1521 				goto err_dma;
1522 
1523 		} else {
1524 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1525 							   DMA_RX_SIZE *
1526 							   sizeof(struct
1527 							   dma_desc),
1528 							   &rx_q->dma_rx_phy,
1529 							   GFP_KERNEL);
1530 			if (!rx_q->dma_rx)
1531 				goto err_dma;
1532 		}
1533 	}
1534 
1535 	return 0;
1536 
1537 err_dma:
1538 	free_dma_rx_desc_resources(priv);
1539 
1540 	return ret;
1541 }
1542 
1543 /**
1544  * alloc_dma_tx_desc_resources - alloc TX resources.
1545  * @priv: private structure
1546  * Description: according to which descriptor can be used (extend or basic)
1547  * this function allocates the resources for TX and RX paths. In case of
1548  * reception, for example, it pre-allocated the RX socket buffer in order to
1549  * allow zero-copy mechanism.
1550  */
1551 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1552 {
1553 	u32 tx_count = priv->plat->tx_queues_to_use;
1554 	int ret = -ENOMEM;
1555 	u32 queue;
1556 
1557 	/* TX queues buffers and DMA */
1558 	for (queue = 0; queue < tx_count; queue++) {
1559 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1560 
1561 		tx_q->queue_index = queue;
1562 		tx_q->priv_data = priv;
1563 
1564 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1565 						    sizeof(*tx_q->tx_skbuff_dma),
1566 						    GFP_KERNEL);
1567 		if (!tx_q->tx_skbuff_dma)
1568 			goto err_dma;
1569 
1570 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1571 						sizeof(struct sk_buff *),
1572 						GFP_KERNEL);
1573 		if (!tx_q->tx_skbuff)
1574 			goto err_dma;
1575 
1576 		if (priv->extend_desc) {
1577 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1578 							    DMA_TX_SIZE *
1579 							    sizeof(struct
1580 							    dma_extended_desc),
1581 							    &tx_q->dma_tx_phy,
1582 							    GFP_KERNEL);
1583 			if (!tx_q->dma_etx)
1584 				goto err_dma;
1585 		} else {
1586 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1587 							   DMA_TX_SIZE *
1588 							   sizeof(struct
1589 								  dma_desc),
1590 							   &tx_q->dma_tx_phy,
1591 							   GFP_KERNEL);
1592 			if (!tx_q->dma_tx)
1593 				goto err_dma;
1594 		}
1595 	}
1596 
1597 	return 0;
1598 
1599 err_dma:
1600 	free_dma_tx_desc_resources(priv);
1601 
1602 	return ret;
1603 }
1604 
1605 /**
1606  * alloc_dma_desc_resources - alloc TX/RX resources.
1607  * @priv: private structure
1608  * Description: according to which descriptor can be used (extend or basic)
1609  * this function allocates the resources for TX and RX paths. In case of
1610  * reception, for example, it pre-allocated the RX socket buffer in order to
1611  * allow zero-copy mechanism.
1612  */
1613 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1614 {
1615 	/* RX Allocation */
1616 	int ret = alloc_dma_rx_desc_resources(priv);
1617 
1618 	if (ret)
1619 		return ret;
1620 
1621 	ret = alloc_dma_tx_desc_resources(priv);
1622 
1623 	return ret;
1624 }
1625 
1626 /**
1627  * free_dma_desc_resources - free dma desc resources
1628  * @priv: private structure
1629  */
1630 static void free_dma_desc_resources(struct stmmac_priv *priv)
1631 {
1632 	/* Release the DMA RX socket buffers */
1633 	free_dma_rx_desc_resources(priv);
1634 
1635 	/* Release the DMA TX socket buffers */
1636 	free_dma_tx_desc_resources(priv);
1637 }
1638 
1639 /**
1640  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1641  *  @priv: driver private structure
1642  *  Description: It is used for enabling the rx queues in the MAC
1643  */
1644 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1645 {
1646 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1647 	int queue;
1648 	u8 mode;
1649 
1650 	for (queue = 0; queue < rx_queues_count; queue++) {
1651 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1652 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1653 	}
1654 }
1655 
1656 /**
1657  * stmmac_start_rx_dma - start RX DMA channel
1658  * @priv: driver private structure
1659  * @chan: RX channel index
1660  * Description:
1661  * This starts a RX DMA channel
1662  */
1663 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1664 {
1665 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1666 	stmmac_start_rx(priv, priv->ioaddr, chan);
1667 }
1668 
1669 /**
1670  * stmmac_start_tx_dma - start TX DMA channel
1671  * @priv: driver private structure
1672  * @chan: TX channel index
1673  * Description:
1674  * This starts a TX DMA channel
1675  */
1676 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1677 {
1678 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1679 	stmmac_start_tx(priv, priv->ioaddr, chan);
1680 }
1681 
1682 /**
1683  * stmmac_stop_rx_dma - stop RX DMA channel
1684  * @priv: driver private structure
1685  * @chan: RX channel index
1686  * Description:
1687  * This stops a RX DMA channel
1688  */
1689 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1692 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1693 }
1694 
1695 /**
1696  * stmmac_stop_tx_dma - stop TX DMA channel
1697  * @priv: driver private structure
1698  * @chan: TX channel index
1699  * Description:
1700  * This stops a TX DMA channel
1701  */
1702 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1705 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1706 }
1707 
1708 /**
1709  * stmmac_start_all_dma - start all RX and TX DMA channels
1710  * @priv: driver private structure
1711  * Description:
1712  * This starts all the RX and TX DMA channels
1713  */
1714 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1715 {
1716 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1717 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1718 	u32 chan = 0;
1719 
1720 	for (chan = 0; chan < rx_channels_count; chan++)
1721 		stmmac_start_rx_dma(priv, chan);
1722 
1723 	for (chan = 0; chan < tx_channels_count; chan++)
1724 		stmmac_start_tx_dma(priv, chan);
1725 }
1726 
1727 /**
1728  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1729  * @priv: driver private structure
1730  * Description:
1731  * This stops the RX and TX DMA channels
1732  */
1733 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1734 {
1735 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737 	u32 chan = 0;
1738 
1739 	for (chan = 0; chan < rx_channels_count; chan++)
1740 		stmmac_stop_rx_dma(priv, chan);
1741 
1742 	for (chan = 0; chan < tx_channels_count; chan++)
1743 		stmmac_stop_tx_dma(priv, chan);
1744 }
1745 
1746 /**
1747  *  stmmac_dma_operation_mode - HW DMA operation mode
1748  *  @priv: driver private structure
1749  *  Description: it is used for configuring the DMA operation mode register in
1750  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1751  */
1752 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1753 {
1754 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1755 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756 	int rxfifosz = priv->plat->rx_fifo_size;
1757 	int txfifosz = priv->plat->tx_fifo_size;
1758 	u32 txmode = 0;
1759 	u32 rxmode = 0;
1760 	u32 chan = 0;
1761 	u8 qmode = 0;
1762 
1763 	if (rxfifosz == 0)
1764 		rxfifosz = priv->dma_cap.rx_fifo_size;
1765 	if (txfifosz == 0)
1766 		txfifosz = priv->dma_cap.tx_fifo_size;
1767 
1768 	/* Adjust for real per queue fifo size */
1769 	rxfifosz /= rx_channels_count;
1770 	txfifosz /= tx_channels_count;
1771 
1772 	if (priv->plat->force_thresh_dma_mode) {
1773 		txmode = tc;
1774 		rxmode = tc;
1775 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1776 		/*
1777 		 * In case of GMAC, SF mode can be enabled
1778 		 * to perform the TX COE in HW. This depends on:
1779 		 * 1) TX COE if actually supported
1780 		 * 2) There is no bugged Jumbo frame support
1781 		 *    that needs to not insert csum in the TDES.
1782 		 */
1783 		txmode = SF_DMA_MODE;
1784 		rxmode = SF_DMA_MODE;
1785 		priv->xstats.threshold = SF_DMA_MODE;
1786 	} else {
1787 		txmode = tc;
1788 		rxmode = SF_DMA_MODE;
1789 	}
1790 
1791 	/* configure all channels */
1792 	for (chan = 0; chan < rx_channels_count; chan++) {
1793 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1794 
1795 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1796 				rxfifosz, qmode);
1797 	}
1798 
1799 	for (chan = 0; chan < tx_channels_count; chan++) {
1800 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1801 
1802 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1803 				txfifosz, qmode);
1804 	}
1805 }
1806 
1807 /**
1808  * stmmac_tx_clean - to manage the transmission completion
1809  * @priv: driver private structure
1810  * @queue: TX queue index
1811  * Description: it reclaims the transmit resources after transmission completes.
1812  */
1813 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1814 {
1815 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1816 	unsigned int bytes_compl = 0, pkts_compl = 0;
1817 	unsigned int entry;
1818 
1819 	netif_tx_lock(priv->dev);
1820 
1821 	priv->xstats.tx_clean++;
1822 
1823 	entry = tx_q->dirty_tx;
1824 	while (entry != tx_q->cur_tx) {
1825 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826 		struct dma_desc *p;
1827 		int status;
1828 
1829 		if (priv->extend_desc)
1830 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1831 		else
1832 			p = tx_q->dma_tx + entry;
1833 
1834 		status = stmmac_tx_status(priv, &priv->dev->stats,
1835 				&priv->xstats, p, priv->ioaddr);
1836 		/* Check if the descriptor is owned by the DMA */
1837 		if (unlikely(status & tx_dma_own))
1838 			break;
1839 
1840 		/* Make sure descriptor fields are read after reading
1841 		 * the own bit.
1842 		 */
1843 		dma_rmb();
1844 
1845 		/* Just consider the last segment and ...*/
1846 		if (likely(!(status & tx_not_ls))) {
1847 			/* ... verify the status error condition */
1848 			if (unlikely(status & tx_err)) {
1849 				priv->dev->stats.tx_errors++;
1850 			} else {
1851 				priv->dev->stats.tx_packets++;
1852 				priv->xstats.tx_pkt_n++;
1853 			}
1854 			stmmac_get_tx_hwtstamp(priv, p, skb);
1855 		}
1856 
1857 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1858 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1859 				dma_unmap_page(priv->device,
1860 					       tx_q->tx_skbuff_dma[entry].buf,
1861 					       tx_q->tx_skbuff_dma[entry].len,
1862 					       DMA_TO_DEVICE);
1863 			else
1864 				dma_unmap_single(priv->device,
1865 						 tx_q->tx_skbuff_dma[entry].buf,
1866 						 tx_q->tx_skbuff_dma[entry].len,
1867 						 DMA_TO_DEVICE);
1868 			tx_q->tx_skbuff_dma[entry].buf = 0;
1869 			tx_q->tx_skbuff_dma[entry].len = 0;
1870 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1871 		}
1872 
1873 		stmmac_clean_desc3(priv, tx_q, p);
1874 
1875 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1876 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1877 
1878 		if (likely(skb != NULL)) {
1879 			pkts_compl++;
1880 			bytes_compl += skb->len;
1881 			dev_consume_skb_any(skb);
1882 			tx_q->tx_skbuff[entry] = NULL;
1883 		}
1884 
1885 		stmmac_release_tx_desc(priv, p, priv->mode);
1886 
1887 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1888 	}
1889 	tx_q->dirty_tx = entry;
1890 
1891 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1892 				  pkts_compl, bytes_compl);
1893 
1894 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1895 								queue))) &&
1896 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1897 
1898 		netif_dbg(priv, tx_done, priv->dev,
1899 			  "%s: restart transmit\n", __func__);
1900 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1901 	}
1902 
1903 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1904 		stmmac_enable_eee_mode(priv);
1905 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1906 	}
1907 	netif_tx_unlock(priv->dev);
1908 }
1909 
1910 /**
1911  * stmmac_tx_err - to manage the tx error
1912  * @priv: driver private structure
1913  * @chan: channel index
1914  * Description: it cleans the descriptors and restarts the transmission
1915  * in case of transmission errors.
1916  */
1917 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1918 {
1919 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1920 	int i;
1921 
1922 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1923 
1924 	stmmac_stop_tx_dma(priv, chan);
1925 	dma_free_tx_skbufs(priv, chan);
1926 	for (i = 0; i < DMA_TX_SIZE; i++)
1927 		if (priv->extend_desc)
1928 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1929 					priv->mode, (i == DMA_TX_SIZE - 1));
1930 		else
1931 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1932 					priv->mode, (i == DMA_TX_SIZE - 1));
1933 	tx_q->dirty_tx = 0;
1934 	tx_q->cur_tx = 0;
1935 	tx_q->mss = 0;
1936 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1937 	stmmac_start_tx_dma(priv, chan);
1938 
1939 	priv->dev->stats.tx_errors++;
1940 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1941 }
1942 
1943 /**
1944  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1945  *  @priv: driver private structure
1946  *  @txmode: TX operating mode
1947  *  @rxmode: RX operating mode
1948  *  @chan: channel index
1949  *  Description: it is used for configuring of the DMA operation mode in
1950  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1951  *  mode.
1952  */
1953 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1954 					  u32 rxmode, u32 chan)
1955 {
1956 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1957 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1958 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1959 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1960 	int rxfifosz = priv->plat->rx_fifo_size;
1961 	int txfifosz = priv->plat->tx_fifo_size;
1962 
1963 	if (rxfifosz == 0)
1964 		rxfifosz = priv->dma_cap.rx_fifo_size;
1965 	if (txfifosz == 0)
1966 		txfifosz = priv->dma_cap.tx_fifo_size;
1967 
1968 	/* Adjust for real per queue fifo size */
1969 	rxfifosz /= rx_channels_count;
1970 	txfifosz /= tx_channels_count;
1971 
1972 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1973 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1974 }
1975 
1976 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1977 {
1978 	int ret;
1979 
1980 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1981 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1982 	if (ret && (ret != -EINVAL)) {
1983 		stmmac_global_err(priv);
1984 		return true;
1985 	}
1986 
1987 	return false;
1988 }
1989 
1990 /**
1991  * stmmac_dma_interrupt - DMA ISR
1992  * @priv: driver private structure
1993  * Description: this is the DMA ISR. It is called by the main ISR.
1994  * It calls the dwmac dma routine and schedule poll method in case of some
1995  * work can be done.
1996  */
1997 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1998 {
1999 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2000 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2001 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2002 				tx_channel_count : rx_channel_count;
2003 	u32 chan;
2004 	bool poll_scheduled = false;
2005 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2006 
2007 	/* Make sure we never check beyond our status buffer. */
2008 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2009 		channels_to_check = ARRAY_SIZE(status);
2010 
2011 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2012 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2013 	 * stmmac_channel struct.
2014 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2015 	 * all tx queues rather than just a single tx queue.
2016 	 */
2017 	for (chan = 0; chan < channels_to_check; chan++)
2018 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2019 				&priv->xstats, chan);
2020 
2021 	for (chan = 0; chan < rx_channel_count; chan++) {
2022 		if (likely(status[chan] & handle_rx)) {
2023 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2024 
2025 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2026 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2027 				__napi_schedule(&rx_q->napi);
2028 				poll_scheduled = true;
2029 			}
2030 		}
2031 	}
2032 
2033 	/* If we scheduled poll, we already know that tx queues will be checked.
2034 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2035 	 * completed transmission, if so, call stmmac_poll (once).
2036 	 */
2037 	if (!poll_scheduled) {
2038 		for (chan = 0; chan < tx_channel_count; chan++) {
2039 			if (status[chan] & handle_tx) {
2040 				/* It doesn't matter what rx queue we choose
2041 				 * here. We use 0 since it always exists.
2042 				 */
2043 				struct stmmac_rx_queue *rx_q =
2044 					&priv->rx_queue[0];
2045 
2046 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2047 					stmmac_disable_dma_irq(priv,
2048 							priv->ioaddr, chan);
2049 					__napi_schedule(&rx_q->napi);
2050 				}
2051 				break;
2052 			}
2053 		}
2054 	}
2055 
2056 	for (chan = 0; chan < tx_channel_count; chan++) {
2057 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2058 			/* Try to bump up the dma threshold on this failure */
2059 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2060 			    (tc <= 256)) {
2061 				tc += 64;
2062 				if (priv->plat->force_thresh_dma_mode)
2063 					stmmac_set_dma_operation_mode(priv,
2064 								      tc,
2065 								      tc,
2066 								      chan);
2067 				else
2068 					stmmac_set_dma_operation_mode(priv,
2069 								    tc,
2070 								    SF_DMA_MODE,
2071 								    chan);
2072 				priv->xstats.threshold = tc;
2073 			}
2074 		} else if (unlikely(status[chan] == tx_hard_error)) {
2075 			stmmac_tx_err(priv, chan);
2076 		}
2077 	}
2078 }
2079 
2080 /**
2081  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2082  * @priv: driver private structure
2083  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2084  */
2085 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2086 {
2087 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2088 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2089 
2090 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2091 
2092 	if (priv->dma_cap.rmon) {
2093 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2094 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2095 	} else
2096 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2097 }
2098 
2099 /**
2100  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2101  * @priv: driver private structure
2102  * Description:
2103  *  new GMAC chip generations have a new register to indicate the
2104  *  presence of the optional feature/functions.
2105  *  This can be also used to override the value passed through the
2106  *  platform and necessary for old MAC10/100 and GMAC chips.
2107  */
2108 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2109 {
2110 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2111 }
2112 
2113 /**
2114  * stmmac_check_ether_addr - check if the MAC addr is valid
2115  * @priv: driver private structure
2116  * Description:
2117  * it is to verify if the MAC address is valid, in case of failures it
2118  * generates a random MAC address
2119  */
2120 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2121 {
2122 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2123 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2124 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2125 			eth_hw_addr_random(priv->dev);
2126 		netdev_info(priv->dev, "device MAC address %pM\n",
2127 			    priv->dev->dev_addr);
2128 	}
2129 }
2130 
2131 /**
2132  * stmmac_init_dma_engine - DMA init.
2133  * @priv: driver private structure
2134  * Description:
2135  * It inits the DMA invoking the specific MAC/GMAC callback.
2136  * Some DMA parameters can be passed from the platform;
2137  * in case of these are not passed a default is kept for the MAC or GMAC.
2138  */
2139 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2140 {
2141 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2142 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2143 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2144 	struct stmmac_rx_queue *rx_q;
2145 	struct stmmac_tx_queue *tx_q;
2146 	u32 chan = 0;
2147 	int atds = 0;
2148 	int ret = 0;
2149 
2150 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2151 		dev_err(priv->device, "Invalid DMA configuration\n");
2152 		return -EINVAL;
2153 	}
2154 
2155 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2156 		atds = 1;
2157 
2158 	ret = stmmac_reset(priv, priv->ioaddr);
2159 	if (ret) {
2160 		dev_err(priv->device, "Failed to reset the dma\n");
2161 		return ret;
2162 	}
2163 
2164 	/* DMA RX Channel Configuration */
2165 	for (chan = 0; chan < rx_channels_count; chan++) {
2166 		rx_q = &priv->rx_queue[chan];
2167 
2168 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2169 				    rx_q->dma_rx_phy, chan);
2170 
2171 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2172 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2173 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2174 				       rx_q->rx_tail_addr, chan);
2175 	}
2176 
2177 	/* DMA TX Channel Configuration */
2178 	for (chan = 0; chan < tx_channels_count; chan++) {
2179 		tx_q = &priv->tx_queue[chan];
2180 
2181 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2182 				    tx_q->dma_tx_phy, chan);
2183 
2184 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2185 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2186 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2187 				       tx_q->tx_tail_addr, chan);
2188 	}
2189 
2190 	/* DMA CSR Channel configuration */
2191 	for (chan = 0; chan < dma_csr_ch; chan++)
2192 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2193 
2194 	/* DMA Configuration */
2195 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2196 
2197 	if (priv->plat->axi)
2198 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2199 
2200 	return ret;
2201 }
2202 
2203 /**
2204  * stmmac_tx_timer - mitigation sw timer for tx.
2205  * @data: data pointer
2206  * Description:
2207  * This is the timer handler to directly invoke the stmmac_tx_clean.
2208  */
2209 static void stmmac_tx_timer(struct timer_list *t)
2210 {
2211 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2212 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2213 	u32 queue;
2214 
2215 	/* let's scan all the tx queues */
2216 	for (queue = 0; queue < tx_queues_count; queue++)
2217 		stmmac_tx_clean(priv, queue);
2218 }
2219 
2220 /**
2221  * stmmac_init_tx_coalesce - init tx mitigation options.
2222  * @priv: driver private structure
2223  * Description:
2224  * This inits the transmit coalesce parameters: i.e. timer rate,
2225  * timer handler and default threshold used for enabling the
2226  * interrupt on completion bit.
2227  */
2228 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2229 {
2230 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2231 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2232 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2233 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2234 	add_timer(&priv->txtimer);
2235 }
2236 
2237 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2238 {
2239 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2240 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2241 	u32 chan;
2242 
2243 	/* set TX ring length */
2244 	for (chan = 0; chan < tx_channels_count; chan++)
2245 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2246 				(DMA_TX_SIZE - 1), chan);
2247 
2248 	/* set RX ring length */
2249 	for (chan = 0; chan < rx_channels_count; chan++)
2250 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2251 				(DMA_RX_SIZE - 1), chan);
2252 }
2253 
2254 /**
2255  *  stmmac_set_tx_queue_weight - Set TX queue weight
2256  *  @priv: driver private structure
2257  *  Description: It is used for setting TX queues weight
2258  */
2259 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2260 {
2261 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2262 	u32 weight;
2263 	u32 queue;
2264 
2265 	for (queue = 0; queue < tx_queues_count; queue++) {
2266 		weight = priv->plat->tx_queues_cfg[queue].weight;
2267 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2268 	}
2269 }
2270 
2271 /**
2272  *  stmmac_configure_cbs - Configure CBS in TX queue
2273  *  @priv: driver private structure
2274  *  Description: It is used for configuring CBS in AVB TX queues
2275  */
2276 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2277 {
2278 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2279 	u32 mode_to_use;
2280 	u32 queue;
2281 
2282 	/* queue 0 is reserved for legacy traffic */
2283 	for (queue = 1; queue < tx_queues_count; queue++) {
2284 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2285 		if (mode_to_use == MTL_QUEUE_DCB)
2286 			continue;
2287 
2288 		stmmac_config_cbs(priv, priv->hw,
2289 				priv->plat->tx_queues_cfg[queue].send_slope,
2290 				priv->plat->tx_queues_cfg[queue].idle_slope,
2291 				priv->plat->tx_queues_cfg[queue].high_credit,
2292 				priv->plat->tx_queues_cfg[queue].low_credit,
2293 				queue);
2294 	}
2295 }
2296 
2297 /**
2298  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2299  *  @priv: driver private structure
2300  *  Description: It is used for mapping RX queues to RX dma channels
2301  */
2302 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2303 {
2304 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2305 	u32 queue;
2306 	u32 chan;
2307 
2308 	for (queue = 0; queue < rx_queues_count; queue++) {
2309 		chan = priv->plat->rx_queues_cfg[queue].chan;
2310 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2311 	}
2312 }
2313 
2314 /**
2315  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2316  *  @priv: driver private structure
2317  *  Description: It is used for configuring the RX Queue Priority
2318  */
2319 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2320 {
2321 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322 	u32 queue;
2323 	u32 prio;
2324 
2325 	for (queue = 0; queue < rx_queues_count; queue++) {
2326 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2327 			continue;
2328 
2329 		prio = priv->plat->rx_queues_cfg[queue].prio;
2330 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2331 	}
2332 }
2333 
2334 /**
2335  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2336  *  @priv: driver private structure
2337  *  Description: It is used for configuring the TX Queue Priority
2338  */
2339 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2340 {
2341 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2342 	u32 queue;
2343 	u32 prio;
2344 
2345 	for (queue = 0; queue < tx_queues_count; queue++) {
2346 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2347 			continue;
2348 
2349 		prio = priv->plat->tx_queues_cfg[queue].prio;
2350 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2351 	}
2352 }
2353 
2354 /**
2355  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2356  *  @priv: driver private structure
2357  *  Description: It is used for configuring the RX queue routing
2358  */
2359 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2360 {
2361 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2362 	u32 queue;
2363 	u8 packet;
2364 
2365 	for (queue = 0; queue < rx_queues_count; queue++) {
2366 		/* no specific packet type routing specified for the queue */
2367 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2368 			continue;
2369 
2370 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2371 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2372 	}
2373 }
2374 
2375 /**
2376  *  stmmac_mtl_configuration - Configure MTL
2377  *  @priv: driver private structure
2378  *  Description: It is used for configurring MTL
2379  */
2380 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2381 {
2382 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2383 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2384 
2385 	if (tx_queues_count > 1)
2386 		stmmac_set_tx_queue_weight(priv);
2387 
2388 	/* Configure MTL RX algorithms */
2389 	if (rx_queues_count > 1)
2390 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2391 				priv->plat->rx_sched_algorithm);
2392 
2393 	/* Configure MTL TX algorithms */
2394 	if (tx_queues_count > 1)
2395 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2396 				priv->plat->tx_sched_algorithm);
2397 
2398 	/* Configure CBS in AVB TX queues */
2399 	if (tx_queues_count > 1)
2400 		stmmac_configure_cbs(priv);
2401 
2402 	/* Map RX MTL to DMA channels */
2403 	stmmac_rx_queue_dma_chan_map(priv);
2404 
2405 	/* Enable MAC RX Queues */
2406 	stmmac_mac_enable_rx_queues(priv);
2407 
2408 	/* Set RX priorities */
2409 	if (rx_queues_count > 1)
2410 		stmmac_mac_config_rx_queues_prio(priv);
2411 
2412 	/* Set TX priorities */
2413 	if (tx_queues_count > 1)
2414 		stmmac_mac_config_tx_queues_prio(priv);
2415 
2416 	/* Set RX routing */
2417 	if (rx_queues_count > 1)
2418 		stmmac_mac_config_rx_queues_routing(priv);
2419 }
2420 
2421 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2422 {
2423 	if (priv->dma_cap.asp) {
2424 		netdev_info(priv->dev, "Enabling Safety Features\n");
2425 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2426 	} else {
2427 		netdev_info(priv->dev, "No Safety Features support found\n");
2428 	}
2429 }
2430 
2431 /**
2432  * stmmac_hw_setup - setup mac in a usable state.
2433  *  @dev : pointer to the device structure.
2434  *  Description:
2435  *  this is the main function to setup the HW in a usable state because the
2436  *  dma engine is reset, the core registers are configured (e.g. AXI,
2437  *  Checksum features, timers). The DMA is ready to start receiving and
2438  *  transmitting.
2439  *  Return value:
2440  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2441  *  file on failure.
2442  */
2443 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2444 {
2445 	struct stmmac_priv *priv = netdev_priv(dev);
2446 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2447 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2448 	u32 chan;
2449 	int ret;
2450 
2451 	/* DMA initialization and SW reset */
2452 	ret = stmmac_init_dma_engine(priv);
2453 	if (ret < 0) {
2454 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2455 			   __func__);
2456 		return ret;
2457 	}
2458 
2459 	/* Copy the MAC addr into the HW  */
2460 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2461 
2462 	/* PS and related bits will be programmed according to the speed */
2463 	if (priv->hw->pcs) {
2464 		int speed = priv->plat->mac_port_sel_speed;
2465 
2466 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2467 		    (speed == SPEED_1000)) {
2468 			priv->hw->ps = speed;
2469 		} else {
2470 			dev_warn(priv->device, "invalid port speed\n");
2471 			priv->hw->ps = 0;
2472 		}
2473 	}
2474 
2475 	/* Initialize the MAC Core */
2476 	stmmac_core_init(priv, priv->hw, dev);
2477 
2478 	/* Initialize MTL*/
2479 	stmmac_mtl_configuration(priv);
2480 
2481 	/* Initialize Safety Features */
2482 	stmmac_safety_feat_configuration(priv);
2483 
2484 	ret = stmmac_rx_ipc(priv, priv->hw);
2485 	if (!ret) {
2486 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2487 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2488 		priv->hw->rx_csum = 0;
2489 	}
2490 
2491 	/* Enable the MAC Rx/Tx */
2492 	stmmac_mac_set(priv, priv->ioaddr, true);
2493 
2494 	/* Set the HW DMA mode and the COE */
2495 	stmmac_dma_operation_mode(priv);
2496 
2497 	stmmac_mmc_setup(priv);
2498 
2499 	if (init_ptp) {
2500 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2501 		if (ret < 0)
2502 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2503 
2504 		ret = stmmac_init_ptp(priv);
2505 		if (ret == -EOPNOTSUPP)
2506 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2507 		else if (ret)
2508 			netdev_warn(priv->dev, "PTP init failed\n");
2509 	}
2510 
2511 #ifdef CONFIG_DEBUG_FS
2512 	ret = stmmac_init_fs(dev);
2513 	if (ret < 0)
2514 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2515 			    __func__);
2516 #endif
2517 	/* Start the ball rolling... */
2518 	stmmac_start_all_dma(priv);
2519 
2520 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2521 
2522 	if (priv->use_riwt) {
2523 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2524 		if (!ret)
2525 			priv->rx_riwt = MAX_DMA_RIWT;
2526 	}
2527 
2528 	if (priv->hw->pcs)
2529 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2530 
2531 	/* set TX and RX rings length */
2532 	stmmac_set_rings_length(priv);
2533 
2534 	/* Enable TSO */
2535 	if (priv->tso) {
2536 		for (chan = 0; chan < tx_cnt; chan++)
2537 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static void stmmac_hw_teardown(struct net_device *dev)
2544 {
2545 	struct stmmac_priv *priv = netdev_priv(dev);
2546 
2547 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2548 }
2549 
2550 /**
2551  *  stmmac_open - open entry point of the driver
2552  *  @dev : pointer to the device structure.
2553  *  Description:
2554  *  This function is the open entry point of the driver.
2555  *  Return value:
2556  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2557  *  file on failure.
2558  */
2559 static int stmmac_open(struct net_device *dev)
2560 {
2561 	struct stmmac_priv *priv = netdev_priv(dev);
2562 	int ret;
2563 
2564 	stmmac_check_ether_addr(priv);
2565 
2566 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2567 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2568 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2569 		ret = stmmac_init_phy(dev);
2570 		if (ret) {
2571 			netdev_err(priv->dev,
2572 				   "%s: Cannot attach to PHY (error: %d)\n",
2573 				   __func__, ret);
2574 			return ret;
2575 		}
2576 	}
2577 
2578 	/* Extra statistics */
2579 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2580 	priv->xstats.threshold = tc;
2581 
2582 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2583 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2584 
2585 	ret = alloc_dma_desc_resources(priv);
2586 	if (ret < 0) {
2587 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2588 			   __func__);
2589 		goto dma_desc_error;
2590 	}
2591 
2592 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2593 	if (ret < 0) {
2594 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2595 			   __func__);
2596 		goto init_error;
2597 	}
2598 
2599 	ret = stmmac_hw_setup(dev, true);
2600 	if (ret < 0) {
2601 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2602 		goto init_error;
2603 	}
2604 
2605 	stmmac_init_tx_coalesce(priv);
2606 
2607 	if (dev->phydev)
2608 		phy_start(dev->phydev);
2609 
2610 	/* Request the IRQ lines */
2611 	ret = request_irq(dev->irq, stmmac_interrupt,
2612 			  IRQF_SHARED, dev->name, dev);
2613 	if (unlikely(ret < 0)) {
2614 		netdev_err(priv->dev,
2615 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2616 			   __func__, dev->irq, ret);
2617 		goto irq_error;
2618 	}
2619 
2620 	/* Request the Wake IRQ in case of another line is used for WoL */
2621 	if (priv->wol_irq != dev->irq) {
2622 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2623 				  IRQF_SHARED, dev->name, dev);
2624 		if (unlikely(ret < 0)) {
2625 			netdev_err(priv->dev,
2626 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2627 				   __func__, priv->wol_irq, ret);
2628 			goto wolirq_error;
2629 		}
2630 	}
2631 
2632 	/* Request the IRQ lines */
2633 	if (priv->lpi_irq > 0) {
2634 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2635 				  dev->name, dev);
2636 		if (unlikely(ret < 0)) {
2637 			netdev_err(priv->dev,
2638 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2639 				   __func__, priv->lpi_irq, ret);
2640 			goto lpiirq_error;
2641 		}
2642 	}
2643 
2644 	stmmac_enable_all_queues(priv);
2645 	stmmac_start_all_queues(priv);
2646 
2647 	return 0;
2648 
2649 lpiirq_error:
2650 	if (priv->wol_irq != dev->irq)
2651 		free_irq(priv->wol_irq, dev);
2652 wolirq_error:
2653 	free_irq(dev->irq, dev);
2654 irq_error:
2655 	if (dev->phydev)
2656 		phy_stop(dev->phydev);
2657 
2658 	del_timer_sync(&priv->txtimer);
2659 	stmmac_hw_teardown(dev);
2660 init_error:
2661 	free_dma_desc_resources(priv);
2662 dma_desc_error:
2663 	if (dev->phydev)
2664 		phy_disconnect(dev->phydev);
2665 
2666 	return ret;
2667 }
2668 
2669 /**
2670  *  stmmac_release - close entry point of the driver
2671  *  @dev : device pointer.
2672  *  Description:
2673  *  This is the stop entry point of the driver.
2674  */
2675 static int stmmac_release(struct net_device *dev)
2676 {
2677 	struct stmmac_priv *priv = netdev_priv(dev);
2678 
2679 	if (priv->eee_enabled)
2680 		del_timer_sync(&priv->eee_ctrl_timer);
2681 
2682 	/* Stop and disconnect the PHY */
2683 	if (dev->phydev) {
2684 		phy_stop(dev->phydev);
2685 		phy_disconnect(dev->phydev);
2686 	}
2687 
2688 	stmmac_stop_all_queues(priv);
2689 
2690 	stmmac_disable_all_queues(priv);
2691 
2692 	del_timer_sync(&priv->txtimer);
2693 
2694 	/* Free the IRQ lines */
2695 	free_irq(dev->irq, dev);
2696 	if (priv->wol_irq != dev->irq)
2697 		free_irq(priv->wol_irq, dev);
2698 	if (priv->lpi_irq > 0)
2699 		free_irq(priv->lpi_irq, dev);
2700 
2701 	/* Stop TX/RX DMA and clear the descriptors */
2702 	stmmac_stop_all_dma(priv);
2703 
2704 	/* Release and free the Rx/Tx resources */
2705 	free_dma_desc_resources(priv);
2706 
2707 	/* Disable the MAC Rx/Tx */
2708 	stmmac_mac_set(priv, priv->ioaddr, false);
2709 
2710 	netif_carrier_off(dev);
2711 
2712 #ifdef CONFIG_DEBUG_FS
2713 	stmmac_exit_fs(dev);
2714 #endif
2715 
2716 	stmmac_release_ptp(priv);
2717 
2718 	return 0;
2719 }
2720 
2721 /**
2722  *  stmmac_tso_allocator - close entry point of the driver
2723  *  @priv: driver private structure
2724  *  @des: buffer start address
2725  *  @total_len: total length to fill in descriptors
2726  *  @last_segmant: condition for the last descriptor
2727  *  @queue: TX queue index
2728  *  Description:
2729  *  This function fills descriptor and request new descriptors according to
2730  *  buffer length to fill
2731  */
2732 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2733 				 int total_len, bool last_segment, u32 queue)
2734 {
2735 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2736 	struct dma_desc *desc;
2737 	u32 buff_size;
2738 	int tmp_len;
2739 
2740 	tmp_len = total_len;
2741 
2742 	while (tmp_len > 0) {
2743 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2744 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2745 		desc = tx_q->dma_tx + tx_q->cur_tx;
2746 
2747 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2748 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2749 			    TSO_MAX_BUFF_SIZE : tmp_len;
2750 
2751 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2752 				0, 1,
2753 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2754 				0, 0);
2755 
2756 		tmp_len -= TSO_MAX_BUFF_SIZE;
2757 	}
2758 }
2759 
2760 /**
2761  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2762  *  @skb : the socket buffer
2763  *  @dev : device pointer
2764  *  Description: this is the transmit function that is called on TSO frames
2765  *  (support available on GMAC4 and newer chips).
2766  *  Diagram below show the ring programming in case of TSO frames:
2767  *
2768  *  First Descriptor
2769  *   --------
2770  *   | DES0 |---> buffer1 = L2/L3/L4 header
2771  *   | DES1 |---> TCP Payload (can continue on next descr...)
2772  *   | DES2 |---> buffer 1 and 2 len
2773  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2774  *   --------
2775  *	|
2776  *     ...
2777  *	|
2778  *   --------
2779  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2780  *   | DES1 | --|
2781  *   | DES2 | --> buffer 1 and 2 len
2782  *   | DES3 |
2783  *   --------
2784  *
2785  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2786  */
2787 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2788 {
2789 	struct dma_desc *desc, *first, *mss_desc = NULL;
2790 	struct stmmac_priv *priv = netdev_priv(dev);
2791 	int nfrags = skb_shinfo(skb)->nr_frags;
2792 	u32 queue = skb_get_queue_mapping(skb);
2793 	unsigned int first_entry, des;
2794 	struct stmmac_tx_queue *tx_q;
2795 	int tmp_pay_len = 0;
2796 	u32 pay_len, mss;
2797 	u8 proto_hdr_len;
2798 	int i;
2799 
2800 	tx_q = &priv->tx_queue[queue];
2801 
2802 	/* Compute header lengths */
2803 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2804 
2805 	/* Desc availability based on threshold should be enough safe */
2806 	if (unlikely(stmmac_tx_avail(priv, queue) <
2807 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2808 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2809 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2810 								queue));
2811 			/* This is a hard error, log it. */
2812 			netdev_err(priv->dev,
2813 				   "%s: Tx Ring full when queue awake\n",
2814 				   __func__);
2815 		}
2816 		return NETDEV_TX_BUSY;
2817 	}
2818 
2819 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2820 
2821 	mss = skb_shinfo(skb)->gso_size;
2822 
2823 	/* set new MSS value if needed */
2824 	if (mss != tx_q->mss) {
2825 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2826 		stmmac_set_mss(priv, mss_desc, mss);
2827 		tx_q->mss = mss;
2828 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2829 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2830 	}
2831 
2832 	if (netif_msg_tx_queued(priv)) {
2833 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2834 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2835 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2836 			skb->data_len);
2837 	}
2838 
2839 	first_entry = tx_q->cur_tx;
2840 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2841 
2842 	desc = tx_q->dma_tx + first_entry;
2843 	first = desc;
2844 
2845 	/* first descriptor: fill Headers on Buf1 */
2846 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2847 			     DMA_TO_DEVICE);
2848 	if (dma_mapping_error(priv->device, des))
2849 		goto dma_map_err;
2850 
2851 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2852 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2853 
2854 	first->des0 = cpu_to_le32(des);
2855 
2856 	/* Fill start of payload in buff2 of first descriptor */
2857 	if (pay_len)
2858 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2859 
2860 	/* If needed take extra descriptors to fill the remaining payload */
2861 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2862 
2863 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2864 
2865 	/* Prepare fragments */
2866 	for (i = 0; i < nfrags; i++) {
2867 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2868 
2869 		des = skb_frag_dma_map(priv->device, frag, 0,
2870 				       skb_frag_size(frag),
2871 				       DMA_TO_DEVICE);
2872 		if (dma_mapping_error(priv->device, des))
2873 			goto dma_map_err;
2874 
2875 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2876 				     (i == nfrags - 1), queue);
2877 
2878 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2879 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2880 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2881 	}
2882 
2883 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2884 
2885 	/* Only the last descriptor gets to point to the skb. */
2886 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2887 
2888 	/* We've used all descriptors we need for this skb, however,
2889 	 * advance cur_tx so that it references a fresh descriptor.
2890 	 * ndo_start_xmit will fill this descriptor the next time it's
2891 	 * called and stmmac_tx_clean may clean up to this descriptor.
2892 	 */
2893 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2894 
2895 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2896 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2897 			  __func__);
2898 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2899 	}
2900 
2901 	dev->stats.tx_bytes += skb->len;
2902 	priv->xstats.tx_tso_frames++;
2903 	priv->xstats.tx_tso_nfrags += nfrags;
2904 
2905 	/* Manage tx mitigation */
2906 	priv->tx_count_frames += nfrags + 1;
2907 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2908 		mod_timer(&priv->txtimer,
2909 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2910 	} else {
2911 		priv->tx_count_frames = 0;
2912 		stmmac_set_tx_ic(priv, desc);
2913 		priv->xstats.tx_set_ic_bit++;
2914 	}
2915 
2916 	skb_tx_timestamp(skb);
2917 
2918 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2919 		     priv->hwts_tx_en)) {
2920 		/* declare that device is doing timestamping */
2921 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2922 		stmmac_enable_tx_timestamp(priv, first);
2923 	}
2924 
2925 	/* Complete the first descriptor before granting the DMA */
2926 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2927 			proto_hdr_len,
2928 			pay_len,
2929 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2930 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2931 
2932 	/* If context desc is used to change MSS */
2933 	if (mss_desc) {
2934 		/* Make sure that first descriptor has been completely
2935 		 * written, including its own bit. This is because MSS is
2936 		 * actually before first descriptor, so we need to make
2937 		 * sure that MSS's own bit is the last thing written.
2938 		 */
2939 		dma_wmb();
2940 		stmmac_set_tx_owner(priv, mss_desc);
2941 	}
2942 
2943 	/* The own bit must be the latest setting done when prepare the
2944 	 * descriptor and then barrier is needed to make sure that
2945 	 * all is coherent before granting the DMA engine.
2946 	 */
2947 	wmb();
2948 
2949 	if (netif_msg_pktdata(priv)) {
2950 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2951 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2952 			tx_q->cur_tx, first, nfrags);
2953 
2954 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2955 
2956 		pr_info(">>> frame to be transmitted: ");
2957 		print_pkt(skb->data, skb_headlen(skb));
2958 	}
2959 
2960 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2961 
2962 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2963 
2964 	return NETDEV_TX_OK;
2965 
2966 dma_map_err:
2967 	dev_err(priv->device, "Tx dma map failed\n");
2968 	dev_kfree_skb(skb);
2969 	priv->dev->stats.tx_dropped++;
2970 	return NETDEV_TX_OK;
2971 }
2972 
2973 /**
2974  *  stmmac_xmit - Tx entry point of the driver
2975  *  @skb : the socket buffer
2976  *  @dev : device pointer
2977  *  Description : this is the tx entry point of the driver.
2978  *  It programs the chain or the ring and supports oversized frames
2979  *  and SG feature.
2980  */
2981 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2982 {
2983 	struct stmmac_priv *priv = netdev_priv(dev);
2984 	unsigned int nopaged_len = skb_headlen(skb);
2985 	int i, csum_insertion = 0, is_jumbo = 0;
2986 	u32 queue = skb_get_queue_mapping(skb);
2987 	int nfrags = skb_shinfo(skb)->nr_frags;
2988 	int entry;
2989 	unsigned int first_entry;
2990 	struct dma_desc *desc, *first;
2991 	struct stmmac_tx_queue *tx_q;
2992 	unsigned int enh_desc;
2993 	unsigned int des;
2994 
2995 	tx_q = &priv->tx_queue[queue];
2996 
2997 	/* Manage oversized TCP frames for GMAC4 device */
2998 	if (skb_is_gso(skb) && priv->tso) {
2999 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3000 			return stmmac_tso_xmit(skb, dev);
3001 	}
3002 
3003 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3004 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3005 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3006 								queue));
3007 			/* This is a hard error, log it. */
3008 			netdev_err(priv->dev,
3009 				   "%s: Tx Ring full when queue awake\n",
3010 				   __func__);
3011 		}
3012 		return NETDEV_TX_BUSY;
3013 	}
3014 
3015 	if (priv->tx_path_in_lpi_mode)
3016 		stmmac_disable_eee_mode(priv);
3017 
3018 	entry = tx_q->cur_tx;
3019 	first_entry = entry;
3020 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3021 
3022 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3023 
3024 	if (likely(priv->extend_desc))
3025 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3026 	else
3027 		desc = tx_q->dma_tx + entry;
3028 
3029 	first = desc;
3030 
3031 	enh_desc = priv->plat->enh_desc;
3032 	/* To program the descriptors according to the size of the frame */
3033 	if (enh_desc)
3034 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3035 
3036 	if (unlikely(is_jumbo)) {
3037 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3038 		if (unlikely(entry < 0) && (entry != -EINVAL))
3039 			goto dma_map_err;
3040 	}
3041 
3042 	for (i = 0; i < nfrags; i++) {
3043 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3044 		int len = skb_frag_size(frag);
3045 		bool last_segment = (i == (nfrags - 1));
3046 
3047 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3048 		WARN_ON(tx_q->tx_skbuff[entry]);
3049 
3050 		if (likely(priv->extend_desc))
3051 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3052 		else
3053 			desc = tx_q->dma_tx + entry;
3054 
3055 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3056 				       DMA_TO_DEVICE);
3057 		if (dma_mapping_error(priv->device, des))
3058 			goto dma_map_err; /* should reuse desc w/o issues */
3059 
3060 		tx_q->tx_skbuff_dma[entry].buf = des;
3061 
3062 		stmmac_set_desc_addr(priv, desc, des);
3063 
3064 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3065 		tx_q->tx_skbuff_dma[entry].len = len;
3066 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3067 
3068 		/* Prepare the descriptor and set the own bit too */
3069 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3070 				priv->mode, 1, last_segment, skb->len);
3071 	}
3072 
3073 	/* Only the last descriptor gets to point to the skb. */
3074 	tx_q->tx_skbuff[entry] = skb;
3075 
3076 	/* We've used all descriptors we need for this skb, however,
3077 	 * advance cur_tx so that it references a fresh descriptor.
3078 	 * ndo_start_xmit will fill this descriptor the next time it's
3079 	 * called and stmmac_tx_clean may clean up to this descriptor.
3080 	 */
3081 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3082 	tx_q->cur_tx = entry;
3083 
3084 	if (netif_msg_pktdata(priv)) {
3085 		void *tx_head;
3086 
3087 		netdev_dbg(priv->dev,
3088 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3089 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3090 			   entry, first, nfrags);
3091 
3092 		if (priv->extend_desc)
3093 			tx_head = (void *)tx_q->dma_etx;
3094 		else
3095 			tx_head = (void *)tx_q->dma_tx;
3096 
3097 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3098 
3099 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3100 		print_pkt(skb->data, skb->len);
3101 	}
3102 
3103 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3104 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3105 			  __func__);
3106 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3107 	}
3108 
3109 	dev->stats.tx_bytes += skb->len;
3110 
3111 	/* According to the coalesce parameter the IC bit for the latest
3112 	 * segment is reset and the timer re-started to clean the tx status.
3113 	 * This approach takes care about the fragments: desc is the first
3114 	 * element in case of no SG.
3115 	 */
3116 	priv->tx_count_frames += nfrags + 1;
3117 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3118 	    !priv->tx_timer_armed) {
3119 		mod_timer(&priv->txtimer,
3120 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3121 		priv->tx_timer_armed = true;
3122 	} else {
3123 		priv->tx_count_frames = 0;
3124 		stmmac_set_tx_ic(priv, desc);
3125 		priv->xstats.tx_set_ic_bit++;
3126 		priv->tx_timer_armed = false;
3127 	}
3128 
3129 	skb_tx_timestamp(skb);
3130 
3131 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3132 	 * problems because all the descriptors are actually ready to be
3133 	 * passed to the DMA engine.
3134 	 */
3135 	if (likely(!is_jumbo)) {
3136 		bool last_segment = (nfrags == 0);
3137 
3138 		des = dma_map_single(priv->device, skb->data,
3139 				     nopaged_len, DMA_TO_DEVICE);
3140 		if (dma_mapping_error(priv->device, des))
3141 			goto dma_map_err;
3142 
3143 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3144 
3145 		stmmac_set_desc_addr(priv, first, des);
3146 
3147 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3148 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3149 
3150 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3151 			     priv->hwts_tx_en)) {
3152 			/* declare that device is doing timestamping */
3153 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3154 			stmmac_enable_tx_timestamp(priv, first);
3155 		}
3156 
3157 		/* Prepare the first descriptor setting the OWN bit too */
3158 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3159 				csum_insertion, priv->mode, 1, last_segment,
3160 				skb->len);
3161 
3162 		/* The own bit must be the latest setting done when prepare the
3163 		 * descriptor and then barrier is needed to make sure that
3164 		 * all is coherent before granting the DMA engine.
3165 		 */
3166 		wmb();
3167 	}
3168 
3169 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3170 
3171 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3172 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3173 
3174 	return NETDEV_TX_OK;
3175 
3176 dma_map_err:
3177 	netdev_err(priv->dev, "Tx DMA map failed\n");
3178 	dev_kfree_skb(skb);
3179 	priv->dev->stats.tx_dropped++;
3180 	return NETDEV_TX_OK;
3181 }
3182 
3183 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3184 {
3185 	struct ethhdr *ehdr;
3186 	u16 vlanid;
3187 
3188 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3189 	    NETIF_F_HW_VLAN_CTAG_RX &&
3190 	    !__vlan_get_tag(skb, &vlanid)) {
3191 		/* pop the vlan tag */
3192 		ehdr = (struct ethhdr *)skb->data;
3193 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3194 		skb_pull(skb, VLAN_HLEN);
3195 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3196 	}
3197 }
3198 
3199 
3200 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3201 {
3202 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3203 		return 0;
3204 
3205 	return 1;
3206 }
3207 
3208 /**
3209  * stmmac_rx_refill - refill used skb preallocated buffers
3210  * @priv: driver private structure
3211  * @queue: RX queue index
3212  * Description : this is to reallocate the skb for the reception process
3213  * that is based on zero-copy.
3214  */
3215 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3216 {
3217 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3218 	int dirty = stmmac_rx_dirty(priv, queue);
3219 	unsigned int entry = rx_q->dirty_rx;
3220 
3221 	int bfsize = priv->dma_buf_sz;
3222 
3223 	while (dirty-- > 0) {
3224 		struct dma_desc *p;
3225 
3226 		if (priv->extend_desc)
3227 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3228 		else
3229 			p = rx_q->dma_rx + entry;
3230 
3231 		if (likely(!rx_q->rx_skbuff[entry])) {
3232 			struct sk_buff *skb;
3233 
3234 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3235 			if (unlikely(!skb)) {
3236 				/* so for a while no zero-copy! */
3237 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3238 				if (unlikely(net_ratelimit()))
3239 					dev_err(priv->device,
3240 						"fail to alloc skb entry %d\n",
3241 						entry);
3242 				break;
3243 			}
3244 
3245 			rx_q->rx_skbuff[entry] = skb;
3246 			rx_q->rx_skbuff_dma[entry] =
3247 			    dma_map_single(priv->device, skb->data, bfsize,
3248 					   DMA_FROM_DEVICE);
3249 			if (dma_mapping_error(priv->device,
3250 					      rx_q->rx_skbuff_dma[entry])) {
3251 				netdev_err(priv->dev, "Rx DMA map failed\n");
3252 				dev_kfree_skb(skb);
3253 				break;
3254 			}
3255 
3256 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3257 			stmmac_refill_desc3(priv, rx_q, p);
3258 
3259 			if (rx_q->rx_zeroc_thresh > 0)
3260 				rx_q->rx_zeroc_thresh--;
3261 
3262 			netif_dbg(priv, rx_status, priv->dev,
3263 				  "refill entry #%d\n", entry);
3264 		}
3265 		dma_wmb();
3266 
3267 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3268 
3269 		dma_wmb();
3270 
3271 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3272 	}
3273 	rx_q->dirty_rx = entry;
3274 }
3275 
3276 /**
3277  * stmmac_rx - manage the receive process
3278  * @priv: driver private structure
3279  * @limit: napi bugget
3280  * @queue: RX queue index.
3281  * Description :  this the function called by the napi poll method.
3282  * It gets all the frames inside the ring.
3283  */
3284 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3285 {
3286 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3287 	unsigned int entry = rx_q->cur_rx;
3288 	int coe = priv->hw->rx_csum;
3289 	unsigned int next_entry;
3290 	unsigned int count = 0;
3291 
3292 	if (netif_msg_rx_status(priv)) {
3293 		void *rx_head;
3294 
3295 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3296 		if (priv->extend_desc)
3297 			rx_head = (void *)rx_q->dma_erx;
3298 		else
3299 			rx_head = (void *)rx_q->dma_rx;
3300 
3301 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3302 	}
3303 	while (count < limit) {
3304 		int status;
3305 		struct dma_desc *p;
3306 		struct dma_desc *np;
3307 
3308 		if (priv->extend_desc)
3309 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3310 		else
3311 			p = rx_q->dma_rx + entry;
3312 
3313 		/* read the status of the incoming frame */
3314 		status = stmmac_rx_status(priv, &priv->dev->stats,
3315 				&priv->xstats, p);
3316 		/* check if managed by the DMA otherwise go ahead */
3317 		if (unlikely(status & dma_own))
3318 			break;
3319 
3320 		count++;
3321 
3322 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3323 		next_entry = rx_q->cur_rx;
3324 
3325 		if (priv->extend_desc)
3326 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3327 		else
3328 			np = rx_q->dma_rx + next_entry;
3329 
3330 		prefetch(np);
3331 
3332 		if (priv->extend_desc)
3333 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3334 					&priv->xstats, rx_q->dma_erx + entry);
3335 		if (unlikely(status == discard_frame)) {
3336 			priv->dev->stats.rx_errors++;
3337 			if (priv->hwts_rx_en && !priv->extend_desc) {
3338 				/* DESC2 & DESC3 will be overwritten by device
3339 				 * with timestamp value, hence reinitialize
3340 				 * them in stmmac_rx_refill() function so that
3341 				 * device can reuse it.
3342 				 */
3343 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3344 				rx_q->rx_skbuff[entry] = NULL;
3345 				dma_unmap_single(priv->device,
3346 						 rx_q->rx_skbuff_dma[entry],
3347 						 priv->dma_buf_sz,
3348 						 DMA_FROM_DEVICE);
3349 			}
3350 		} else {
3351 			struct sk_buff *skb;
3352 			int frame_len;
3353 			unsigned int des;
3354 
3355 			stmmac_get_desc_addr(priv, p, &des);
3356 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3357 
3358 			/*  If frame length is greater than skb buffer size
3359 			 *  (preallocated during init) then the packet is
3360 			 *  ignored
3361 			 */
3362 			if (frame_len > priv->dma_buf_sz) {
3363 				netdev_err(priv->dev,
3364 					   "len %d larger than size (%d)\n",
3365 					   frame_len, priv->dma_buf_sz);
3366 				priv->dev->stats.rx_length_errors++;
3367 				break;
3368 			}
3369 
3370 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3371 			 * Type frames (LLC/LLC-SNAP)
3372 			 *
3373 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3374 			 * feature is always disabled and packets need to be
3375 			 * stripped manually.
3376 			 */
3377 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3378 			    unlikely(status != llc_snap))
3379 				frame_len -= ETH_FCS_LEN;
3380 
3381 			if (netif_msg_rx_status(priv)) {
3382 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3383 					   p, entry, des);
3384 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3385 					   frame_len, status);
3386 			}
3387 
3388 			/* The zero-copy is always used for all the sizes
3389 			 * in case of GMAC4 because it needs
3390 			 * to refill the used descriptors, always.
3391 			 */
3392 			if (unlikely(!priv->plat->has_gmac4 &&
3393 				     ((frame_len < priv->rx_copybreak) ||
3394 				     stmmac_rx_threshold_count(rx_q)))) {
3395 				skb = netdev_alloc_skb_ip_align(priv->dev,
3396 								frame_len);
3397 				if (unlikely(!skb)) {
3398 					if (net_ratelimit())
3399 						dev_warn(priv->device,
3400 							 "packet dropped\n");
3401 					priv->dev->stats.rx_dropped++;
3402 					break;
3403 				}
3404 
3405 				dma_sync_single_for_cpu(priv->device,
3406 							rx_q->rx_skbuff_dma
3407 							[entry], frame_len,
3408 							DMA_FROM_DEVICE);
3409 				skb_copy_to_linear_data(skb,
3410 							rx_q->
3411 							rx_skbuff[entry]->data,
3412 							frame_len);
3413 
3414 				skb_put(skb, frame_len);
3415 				dma_sync_single_for_device(priv->device,
3416 							   rx_q->rx_skbuff_dma
3417 							   [entry], frame_len,
3418 							   DMA_FROM_DEVICE);
3419 			} else {
3420 				skb = rx_q->rx_skbuff[entry];
3421 				if (unlikely(!skb)) {
3422 					netdev_err(priv->dev,
3423 						   "%s: Inconsistent Rx chain\n",
3424 						   priv->dev->name);
3425 					priv->dev->stats.rx_dropped++;
3426 					break;
3427 				}
3428 				prefetch(skb->data - NET_IP_ALIGN);
3429 				rx_q->rx_skbuff[entry] = NULL;
3430 				rx_q->rx_zeroc_thresh++;
3431 
3432 				skb_put(skb, frame_len);
3433 				dma_unmap_single(priv->device,
3434 						 rx_q->rx_skbuff_dma[entry],
3435 						 priv->dma_buf_sz,
3436 						 DMA_FROM_DEVICE);
3437 			}
3438 
3439 			if (netif_msg_pktdata(priv)) {
3440 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3441 					   frame_len);
3442 				print_pkt(skb->data, frame_len);
3443 			}
3444 
3445 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3446 
3447 			stmmac_rx_vlan(priv->dev, skb);
3448 
3449 			skb->protocol = eth_type_trans(skb, priv->dev);
3450 
3451 			if (unlikely(!coe))
3452 				skb_checksum_none_assert(skb);
3453 			else
3454 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3455 
3456 			napi_gro_receive(&rx_q->napi, skb);
3457 
3458 			priv->dev->stats.rx_packets++;
3459 			priv->dev->stats.rx_bytes += frame_len;
3460 		}
3461 		entry = next_entry;
3462 	}
3463 
3464 	stmmac_rx_refill(priv, queue);
3465 
3466 	priv->xstats.rx_pkt_n += count;
3467 
3468 	return count;
3469 }
3470 
3471 /**
3472  *  stmmac_poll - stmmac poll method (NAPI)
3473  *  @napi : pointer to the napi structure.
3474  *  @budget : maximum number of packets that the current CPU can receive from
3475  *	      all interfaces.
3476  *  Description :
3477  *  To look at the incoming frames and clear the tx resources.
3478  */
3479 static int stmmac_poll(struct napi_struct *napi, int budget)
3480 {
3481 	struct stmmac_rx_queue *rx_q =
3482 		container_of(napi, struct stmmac_rx_queue, napi);
3483 	struct stmmac_priv *priv = rx_q->priv_data;
3484 	u32 tx_count = priv->plat->tx_queues_to_use;
3485 	u32 chan = rx_q->queue_index;
3486 	int work_done = 0;
3487 	u32 queue;
3488 
3489 	priv->xstats.napi_poll++;
3490 
3491 	/* check all the queues */
3492 	for (queue = 0; queue < tx_count; queue++)
3493 		stmmac_tx_clean(priv, queue);
3494 
3495 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3496 	if (work_done < budget) {
3497 		napi_complete_done(napi, work_done);
3498 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3499 	}
3500 	return work_done;
3501 }
3502 
3503 /**
3504  *  stmmac_tx_timeout
3505  *  @dev : Pointer to net device structure
3506  *  Description: this function is called when a packet transmission fails to
3507  *   complete within a reasonable time. The driver will mark the error in the
3508  *   netdev structure and arrange for the device to be reset to a sane state
3509  *   in order to transmit a new packet.
3510  */
3511 static void stmmac_tx_timeout(struct net_device *dev)
3512 {
3513 	struct stmmac_priv *priv = netdev_priv(dev);
3514 
3515 	stmmac_global_err(priv);
3516 }
3517 
3518 /**
3519  *  stmmac_set_rx_mode - entry point for multicast addressing
3520  *  @dev : pointer to the device structure
3521  *  Description:
3522  *  This function is a driver entry point which gets called by the kernel
3523  *  whenever multicast addresses must be enabled/disabled.
3524  *  Return value:
3525  *  void.
3526  */
3527 static void stmmac_set_rx_mode(struct net_device *dev)
3528 {
3529 	struct stmmac_priv *priv = netdev_priv(dev);
3530 
3531 	stmmac_set_filter(priv, priv->hw, dev);
3532 }
3533 
3534 /**
3535  *  stmmac_change_mtu - entry point to change MTU size for the device.
3536  *  @dev : device pointer.
3537  *  @new_mtu : the new MTU size for the device.
3538  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3539  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3540  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3541  *  Return value:
3542  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3543  *  file on failure.
3544  */
3545 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3546 {
3547 	struct stmmac_priv *priv = netdev_priv(dev);
3548 
3549 	if (netif_running(dev)) {
3550 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3551 		return -EBUSY;
3552 	}
3553 
3554 	dev->mtu = new_mtu;
3555 
3556 	netdev_update_features(dev);
3557 
3558 	return 0;
3559 }
3560 
3561 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3562 					     netdev_features_t features)
3563 {
3564 	struct stmmac_priv *priv = netdev_priv(dev);
3565 
3566 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3567 		features &= ~NETIF_F_RXCSUM;
3568 
3569 	if (!priv->plat->tx_coe)
3570 		features &= ~NETIF_F_CSUM_MASK;
3571 
3572 	/* Some GMAC devices have a bugged Jumbo frame support that
3573 	 * needs to have the Tx COE disabled for oversized frames
3574 	 * (due to limited buffer sizes). In this case we disable
3575 	 * the TX csum insertion in the TDES and not use SF.
3576 	 */
3577 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3578 		features &= ~NETIF_F_CSUM_MASK;
3579 
3580 	/* Disable tso if asked by ethtool */
3581 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3582 		if (features & NETIF_F_TSO)
3583 			priv->tso = true;
3584 		else
3585 			priv->tso = false;
3586 	}
3587 
3588 	return features;
3589 }
3590 
3591 static int stmmac_set_features(struct net_device *netdev,
3592 			       netdev_features_t features)
3593 {
3594 	struct stmmac_priv *priv = netdev_priv(netdev);
3595 
3596 	/* Keep the COE Type in case of csum is supporting */
3597 	if (features & NETIF_F_RXCSUM)
3598 		priv->hw->rx_csum = priv->plat->rx_coe;
3599 	else
3600 		priv->hw->rx_csum = 0;
3601 	/* No check needed because rx_coe has been set before and it will be
3602 	 * fixed in case of issue.
3603 	 */
3604 	stmmac_rx_ipc(priv, priv->hw);
3605 
3606 	return 0;
3607 }
3608 
3609 /**
3610  *  stmmac_interrupt - main ISR
3611  *  @irq: interrupt number.
3612  *  @dev_id: to pass the net device pointer.
3613  *  Description: this is the main driver interrupt service routine.
3614  *  It can call:
3615  *  o DMA service routine (to manage incoming frame reception and transmission
3616  *    status)
3617  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3618  *    interrupts.
3619  */
3620 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3621 {
3622 	struct net_device *dev = (struct net_device *)dev_id;
3623 	struct stmmac_priv *priv = netdev_priv(dev);
3624 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3625 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3626 	u32 queues_count;
3627 	u32 queue;
3628 
3629 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3630 
3631 	if (priv->irq_wake)
3632 		pm_wakeup_event(priv->device, 0);
3633 
3634 	if (unlikely(!dev)) {
3635 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3636 		return IRQ_NONE;
3637 	}
3638 
3639 	/* Check if adapter is up */
3640 	if (test_bit(STMMAC_DOWN, &priv->state))
3641 		return IRQ_HANDLED;
3642 	/* Check if a fatal error happened */
3643 	if (stmmac_safety_feat_interrupt(priv))
3644 		return IRQ_HANDLED;
3645 
3646 	/* To handle GMAC own interrupts */
3647 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3648 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3649 		int mtl_status;
3650 
3651 		if (unlikely(status)) {
3652 			/* For LPI we need to save the tx status */
3653 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3654 				priv->tx_path_in_lpi_mode = true;
3655 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3656 				priv->tx_path_in_lpi_mode = false;
3657 		}
3658 
3659 		for (queue = 0; queue < queues_count; queue++) {
3660 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3661 
3662 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3663 								queue);
3664 			if (mtl_status != -EINVAL)
3665 				status |= mtl_status;
3666 
3667 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3668 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3669 						       rx_q->rx_tail_addr,
3670 						       queue);
3671 		}
3672 
3673 		/* PCS link status */
3674 		if (priv->hw->pcs) {
3675 			if (priv->xstats.pcs_link)
3676 				netif_carrier_on(dev);
3677 			else
3678 				netif_carrier_off(dev);
3679 		}
3680 	}
3681 
3682 	/* To handle DMA interrupts */
3683 	stmmac_dma_interrupt(priv);
3684 
3685 	return IRQ_HANDLED;
3686 }
3687 
3688 #ifdef CONFIG_NET_POLL_CONTROLLER
3689 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3690  * to allow network I/O with interrupts disabled.
3691  */
3692 static void stmmac_poll_controller(struct net_device *dev)
3693 {
3694 	disable_irq(dev->irq);
3695 	stmmac_interrupt(dev->irq, dev);
3696 	enable_irq(dev->irq);
3697 }
3698 #endif
3699 
3700 /**
3701  *  stmmac_ioctl - Entry point for the Ioctl
3702  *  @dev: Device pointer.
3703  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3704  *  a proprietary structure used to pass information to the driver.
3705  *  @cmd: IOCTL command
3706  *  Description:
3707  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3708  */
3709 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3710 {
3711 	int ret = -EOPNOTSUPP;
3712 
3713 	if (!netif_running(dev))
3714 		return -EINVAL;
3715 
3716 	switch (cmd) {
3717 	case SIOCGMIIPHY:
3718 	case SIOCGMIIREG:
3719 	case SIOCSMIIREG:
3720 		if (!dev->phydev)
3721 			return -EINVAL;
3722 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3723 		break;
3724 	case SIOCSHWTSTAMP:
3725 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3726 		break;
3727 	default:
3728 		break;
3729 	}
3730 
3731 	return ret;
3732 }
3733 
3734 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3735 				    void *cb_priv)
3736 {
3737 	struct stmmac_priv *priv = cb_priv;
3738 	int ret = -EOPNOTSUPP;
3739 
3740 	stmmac_disable_all_queues(priv);
3741 
3742 	switch (type) {
3743 	case TC_SETUP_CLSU32:
3744 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3745 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3746 		break;
3747 	default:
3748 		break;
3749 	}
3750 
3751 	stmmac_enable_all_queues(priv);
3752 	return ret;
3753 }
3754 
3755 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3756 				 struct tc_block_offload *f)
3757 {
3758 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3759 		return -EOPNOTSUPP;
3760 
3761 	switch (f->command) {
3762 	case TC_BLOCK_BIND:
3763 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3764 				priv, priv);
3765 	case TC_BLOCK_UNBIND:
3766 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3767 		return 0;
3768 	default:
3769 		return -EOPNOTSUPP;
3770 	}
3771 }
3772 
3773 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3774 			   void *type_data)
3775 {
3776 	struct stmmac_priv *priv = netdev_priv(ndev);
3777 
3778 	switch (type) {
3779 	case TC_SETUP_BLOCK:
3780 		return stmmac_setup_tc_block(priv, type_data);
3781 	default:
3782 		return -EOPNOTSUPP;
3783 	}
3784 }
3785 
3786 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3787 {
3788 	struct stmmac_priv *priv = netdev_priv(ndev);
3789 	int ret = 0;
3790 
3791 	ret = eth_mac_addr(ndev, addr);
3792 	if (ret)
3793 		return ret;
3794 
3795 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3796 
3797 	return ret;
3798 }
3799 
3800 #ifdef CONFIG_DEBUG_FS
3801 static struct dentry *stmmac_fs_dir;
3802 
3803 static void sysfs_display_ring(void *head, int size, int extend_desc,
3804 			       struct seq_file *seq)
3805 {
3806 	int i;
3807 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3808 	struct dma_desc *p = (struct dma_desc *)head;
3809 
3810 	for (i = 0; i < size; i++) {
3811 		if (extend_desc) {
3812 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3813 				   i, (unsigned int)virt_to_phys(ep),
3814 				   le32_to_cpu(ep->basic.des0),
3815 				   le32_to_cpu(ep->basic.des1),
3816 				   le32_to_cpu(ep->basic.des2),
3817 				   le32_to_cpu(ep->basic.des3));
3818 			ep++;
3819 		} else {
3820 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3821 				   i, (unsigned int)virt_to_phys(p),
3822 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3823 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3824 			p++;
3825 		}
3826 		seq_printf(seq, "\n");
3827 	}
3828 }
3829 
3830 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3831 {
3832 	struct net_device *dev = seq->private;
3833 	struct stmmac_priv *priv = netdev_priv(dev);
3834 	u32 rx_count = priv->plat->rx_queues_to_use;
3835 	u32 tx_count = priv->plat->tx_queues_to_use;
3836 	u32 queue;
3837 
3838 	for (queue = 0; queue < rx_count; queue++) {
3839 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3840 
3841 		seq_printf(seq, "RX Queue %d:\n", queue);
3842 
3843 		if (priv->extend_desc) {
3844 			seq_printf(seq, "Extended descriptor ring:\n");
3845 			sysfs_display_ring((void *)rx_q->dma_erx,
3846 					   DMA_RX_SIZE, 1, seq);
3847 		} else {
3848 			seq_printf(seq, "Descriptor ring:\n");
3849 			sysfs_display_ring((void *)rx_q->dma_rx,
3850 					   DMA_RX_SIZE, 0, seq);
3851 		}
3852 	}
3853 
3854 	for (queue = 0; queue < tx_count; queue++) {
3855 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3856 
3857 		seq_printf(seq, "TX Queue %d:\n", queue);
3858 
3859 		if (priv->extend_desc) {
3860 			seq_printf(seq, "Extended descriptor ring:\n");
3861 			sysfs_display_ring((void *)tx_q->dma_etx,
3862 					   DMA_TX_SIZE, 1, seq);
3863 		} else {
3864 			seq_printf(seq, "Descriptor ring:\n");
3865 			sysfs_display_ring((void *)tx_q->dma_tx,
3866 					   DMA_TX_SIZE, 0, seq);
3867 		}
3868 	}
3869 
3870 	return 0;
3871 }
3872 
3873 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3874 {
3875 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3876 }
3877 
3878 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3879 
3880 static const struct file_operations stmmac_rings_status_fops = {
3881 	.owner = THIS_MODULE,
3882 	.open = stmmac_sysfs_ring_open,
3883 	.read = seq_read,
3884 	.llseek = seq_lseek,
3885 	.release = single_release,
3886 };
3887 
3888 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3889 {
3890 	struct net_device *dev = seq->private;
3891 	struct stmmac_priv *priv = netdev_priv(dev);
3892 
3893 	if (!priv->hw_cap_support) {
3894 		seq_printf(seq, "DMA HW features not supported\n");
3895 		return 0;
3896 	}
3897 
3898 	seq_printf(seq, "==============================\n");
3899 	seq_printf(seq, "\tDMA HW features\n");
3900 	seq_printf(seq, "==============================\n");
3901 
3902 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3903 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3904 	seq_printf(seq, "\t1000 Mbps: %s\n",
3905 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3906 	seq_printf(seq, "\tHalf duplex: %s\n",
3907 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3908 	seq_printf(seq, "\tHash Filter: %s\n",
3909 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3910 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3911 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3912 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3913 		   (priv->dma_cap.pcs) ? "Y" : "N");
3914 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3915 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3916 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3917 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3918 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3919 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3920 	seq_printf(seq, "\tRMON module: %s\n",
3921 		   (priv->dma_cap.rmon) ? "Y" : "N");
3922 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3923 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3924 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3925 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3926 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3927 		   (priv->dma_cap.eee) ? "Y" : "N");
3928 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3929 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3930 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3931 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3932 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3933 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3934 	} else {
3935 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3936 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3937 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3938 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3939 	}
3940 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3941 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3942 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3943 		   priv->dma_cap.number_rx_channel);
3944 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3945 		   priv->dma_cap.number_tx_channel);
3946 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3947 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3948 
3949 	return 0;
3950 }
3951 
3952 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3953 {
3954 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3955 }
3956 
3957 static const struct file_operations stmmac_dma_cap_fops = {
3958 	.owner = THIS_MODULE,
3959 	.open = stmmac_sysfs_dma_cap_open,
3960 	.read = seq_read,
3961 	.llseek = seq_lseek,
3962 	.release = single_release,
3963 };
3964 
3965 static int stmmac_init_fs(struct net_device *dev)
3966 {
3967 	struct stmmac_priv *priv = netdev_priv(dev);
3968 
3969 	/* Create per netdev entries */
3970 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3971 
3972 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3973 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3974 
3975 		return -ENOMEM;
3976 	}
3977 
3978 	/* Entry to report DMA RX/TX rings */
3979 	priv->dbgfs_rings_status =
3980 		debugfs_create_file("descriptors_status", 0444,
3981 				    priv->dbgfs_dir, dev,
3982 				    &stmmac_rings_status_fops);
3983 
3984 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3985 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3986 		debugfs_remove_recursive(priv->dbgfs_dir);
3987 
3988 		return -ENOMEM;
3989 	}
3990 
3991 	/* Entry to report the DMA HW features */
3992 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3993 						  priv->dbgfs_dir,
3994 						  dev, &stmmac_dma_cap_fops);
3995 
3996 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3997 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3998 		debugfs_remove_recursive(priv->dbgfs_dir);
3999 
4000 		return -ENOMEM;
4001 	}
4002 
4003 	return 0;
4004 }
4005 
4006 static void stmmac_exit_fs(struct net_device *dev)
4007 {
4008 	struct stmmac_priv *priv = netdev_priv(dev);
4009 
4010 	debugfs_remove_recursive(priv->dbgfs_dir);
4011 }
4012 #endif /* CONFIG_DEBUG_FS */
4013 
4014 static const struct net_device_ops stmmac_netdev_ops = {
4015 	.ndo_open = stmmac_open,
4016 	.ndo_start_xmit = stmmac_xmit,
4017 	.ndo_stop = stmmac_release,
4018 	.ndo_change_mtu = stmmac_change_mtu,
4019 	.ndo_fix_features = stmmac_fix_features,
4020 	.ndo_set_features = stmmac_set_features,
4021 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4022 	.ndo_tx_timeout = stmmac_tx_timeout,
4023 	.ndo_do_ioctl = stmmac_ioctl,
4024 	.ndo_setup_tc = stmmac_setup_tc,
4025 #ifdef CONFIG_NET_POLL_CONTROLLER
4026 	.ndo_poll_controller = stmmac_poll_controller,
4027 #endif
4028 	.ndo_set_mac_address = stmmac_set_mac_address,
4029 };
4030 
4031 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4032 {
4033 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4034 		return;
4035 	if (test_bit(STMMAC_DOWN, &priv->state))
4036 		return;
4037 
4038 	netdev_err(priv->dev, "Reset adapter.\n");
4039 
4040 	rtnl_lock();
4041 	netif_trans_update(priv->dev);
4042 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4043 		usleep_range(1000, 2000);
4044 
4045 	set_bit(STMMAC_DOWN, &priv->state);
4046 	dev_close(priv->dev);
4047 	dev_open(priv->dev);
4048 	clear_bit(STMMAC_DOWN, &priv->state);
4049 	clear_bit(STMMAC_RESETING, &priv->state);
4050 	rtnl_unlock();
4051 }
4052 
4053 static void stmmac_service_task(struct work_struct *work)
4054 {
4055 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4056 			service_task);
4057 
4058 	stmmac_reset_subtask(priv);
4059 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4060 }
4061 
4062 /**
4063  *  stmmac_hw_init - Init the MAC device
4064  *  @priv: driver private structure
4065  *  Description: this function is to configure the MAC device according to
4066  *  some platform parameters or the HW capability register. It prepares the
4067  *  driver to use either ring or chain modes and to setup either enhanced or
4068  *  normal descriptors.
4069  */
4070 static int stmmac_hw_init(struct stmmac_priv *priv)
4071 {
4072 	int ret;
4073 
4074 	/* dwmac-sun8i only work in chain mode */
4075 	if (priv->plat->has_sun8i)
4076 		chain_mode = 1;
4077 	priv->chain_mode = chain_mode;
4078 
4079 	/* Initialize HW Interface */
4080 	ret = stmmac_hwif_init(priv);
4081 	if (ret)
4082 		return ret;
4083 
4084 	/* Get the HW capability (new GMAC newer than 3.50a) */
4085 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4086 	if (priv->hw_cap_support) {
4087 		dev_info(priv->device, "DMA HW capability register supported\n");
4088 
4089 		/* We can override some gmac/dma configuration fields: e.g.
4090 		 * enh_desc, tx_coe (e.g. that are passed through the
4091 		 * platform) with the values from the HW capability
4092 		 * register (if supported).
4093 		 */
4094 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4095 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4096 		priv->hw->pmt = priv->plat->pmt;
4097 
4098 		/* TXCOE doesn't work in thresh DMA mode */
4099 		if (priv->plat->force_thresh_dma_mode)
4100 			priv->plat->tx_coe = 0;
4101 		else
4102 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4103 
4104 		/* In case of GMAC4 rx_coe is from HW cap register. */
4105 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4106 
4107 		if (priv->dma_cap.rx_coe_type2)
4108 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4109 		else if (priv->dma_cap.rx_coe_type1)
4110 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4111 
4112 	} else {
4113 		dev_info(priv->device, "No HW DMA feature register supported\n");
4114 	}
4115 
4116 	if (priv->plat->rx_coe) {
4117 		priv->hw->rx_csum = priv->plat->rx_coe;
4118 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4119 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4120 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4121 	}
4122 	if (priv->plat->tx_coe)
4123 		dev_info(priv->device, "TX Checksum insertion supported\n");
4124 
4125 	if (priv->plat->pmt) {
4126 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4127 		device_set_wakeup_capable(priv->device, 1);
4128 	}
4129 
4130 	if (priv->dma_cap.tsoen)
4131 		dev_info(priv->device, "TSO supported\n");
4132 
4133 	return 0;
4134 }
4135 
4136 /**
4137  * stmmac_dvr_probe
4138  * @device: device pointer
4139  * @plat_dat: platform data pointer
4140  * @res: stmmac resource pointer
4141  * Description: this is the main probe function used to
4142  * call the alloc_etherdev, allocate the priv structure.
4143  * Return:
4144  * returns 0 on success, otherwise errno.
4145  */
4146 int stmmac_dvr_probe(struct device *device,
4147 		     struct plat_stmmacenet_data *plat_dat,
4148 		     struct stmmac_resources *res)
4149 {
4150 	struct net_device *ndev = NULL;
4151 	struct stmmac_priv *priv;
4152 	int ret = 0;
4153 	u32 queue;
4154 
4155 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4156 				  MTL_MAX_TX_QUEUES,
4157 				  MTL_MAX_RX_QUEUES);
4158 	if (!ndev)
4159 		return -ENOMEM;
4160 
4161 	SET_NETDEV_DEV(ndev, device);
4162 
4163 	priv = netdev_priv(ndev);
4164 	priv->device = device;
4165 	priv->dev = ndev;
4166 
4167 	stmmac_set_ethtool_ops(ndev);
4168 	priv->pause = pause;
4169 	priv->plat = plat_dat;
4170 	priv->ioaddr = res->addr;
4171 	priv->dev->base_addr = (unsigned long)res->addr;
4172 
4173 	priv->dev->irq = res->irq;
4174 	priv->wol_irq = res->wol_irq;
4175 	priv->lpi_irq = res->lpi_irq;
4176 
4177 	if (res->mac)
4178 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4179 
4180 	dev_set_drvdata(device, priv->dev);
4181 
4182 	/* Verify driver arguments */
4183 	stmmac_verify_args();
4184 
4185 	/* Allocate workqueue */
4186 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4187 	if (!priv->wq) {
4188 		dev_err(priv->device, "failed to create workqueue\n");
4189 		goto error_wq;
4190 	}
4191 
4192 	INIT_WORK(&priv->service_task, stmmac_service_task);
4193 
4194 	/* Override with kernel parameters if supplied XXX CRS XXX
4195 	 * this needs to have multiple instances
4196 	 */
4197 	if ((phyaddr >= 0) && (phyaddr <= 31))
4198 		priv->plat->phy_addr = phyaddr;
4199 
4200 	if (priv->plat->stmmac_rst) {
4201 		ret = reset_control_assert(priv->plat->stmmac_rst);
4202 		reset_control_deassert(priv->plat->stmmac_rst);
4203 		/* Some reset controllers have only reset callback instead of
4204 		 * assert + deassert callbacks pair.
4205 		 */
4206 		if (ret == -ENOTSUPP)
4207 			reset_control_reset(priv->plat->stmmac_rst);
4208 	}
4209 
4210 	/* Init MAC and get the capabilities */
4211 	ret = stmmac_hw_init(priv);
4212 	if (ret)
4213 		goto error_hw_init;
4214 
4215 	/* Configure real RX and TX queues */
4216 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4217 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4218 
4219 	ndev->netdev_ops = &stmmac_netdev_ops;
4220 
4221 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4222 			    NETIF_F_RXCSUM;
4223 
4224 	ret = stmmac_tc_init(priv, priv);
4225 	if (!ret) {
4226 		ndev->hw_features |= NETIF_F_HW_TC;
4227 	}
4228 
4229 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4230 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4231 		priv->tso = true;
4232 		dev_info(priv->device, "TSO feature enabled\n");
4233 	}
4234 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4235 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4236 #ifdef STMMAC_VLAN_TAG_USED
4237 	/* Both mac100 and gmac support receive VLAN tag detection */
4238 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4239 #endif
4240 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4241 
4242 	/* MTU range: 46 - hw-specific max */
4243 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4244 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4245 		ndev->max_mtu = JUMBO_LEN;
4246 	else
4247 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4248 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4249 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4250 	 */
4251 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4252 	    (priv->plat->maxmtu >= ndev->min_mtu))
4253 		ndev->max_mtu = priv->plat->maxmtu;
4254 	else if (priv->plat->maxmtu < ndev->min_mtu)
4255 		dev_warn(priv->device,
4256 			 "%s: warning: maxmtu having invalid value (%d)\n",
4257 			 __func__, priv->plat->maxmtu);
4258 
4259 	if (flow_ctrl)
4260 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4261 
4262 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4263 	 * In some case, for example on bugged HW this feature
4264 	 * has to be disable and this can be done by passing the
4265 	 * riwt_off field from the platform.
4266 	 */
4267 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4268 		priv->use_riwt = 1;
4269 		dev_info(priv->device,
4270 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4271 	}
4272 
4273 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4274 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4275 
4276 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4277 			       (8 * priv->plat->rx_queues_to_use));
4278 	}
4279 
4280 	mutex_init(&priv->lock);
4281 
4282 	/* If a specific clk_csr value is passed from the platform
4283 	 * this means that the CSR Clock Range selection cannot be
4284 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4285 	 * set the MDC clock dynamically according to the csr actual
4286 	 * clock input.
4287 	 */
4288 	if (!priv->plat->clk_csr)
4289 		stmmac_clk_csr_set(priv);
4290 	else
4291 		priv->clk_csr = priv->plat->clk_csr;
4292 
4293 	stmmac_check_pcs_mode(priv);
4294 
4295 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4296 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4297 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4298 		/* MDIO bus Registration */
4299 		ret = stmmac_mdio_register(ndev);
4300 		if (ret < 0) {
4301 			dev_err(priv->device,
4302 				"%s: MDIO bus (id: %d) registration failed",
4303 				__func__, priv->plat->bus_id);
4304 			goto error_mdio_register;
4305 		}
4306 	}
4307 
4308 	ret = register_netdev(ndev);
4309 	if (ret) {
4310 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4311 			__func__, ret);
4312 		goto error_netdev_register;
4313 	}
4314 
4315 	return ret;
4316 
4317 error_netdev_register:
4318 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4319 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4320 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4321 		stmmac_mdio_unregister(ndev);
4322 error_mdio_register:
4323 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4324 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4325 
4326 		netif_napi_del(&rx_q->napi);
4327 	}
4328 error_hw_init:
4329 	destroy_workqueue(priv->wq);
4330 error_wq:
4331 	free_netdev(ndev);
4332 
4333 	return ret;
4334 }
4335 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4336 
4337 /**
4338  * stmmac_dvr_remove
4339  * @dev: device pointer
4340  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4341  * changes the link status, releases the DMA descriptor rings.
4342  */
4343 int stmmac_dvr_remove(struct device *dev)
4344 {
4345 	struct net_device *ndev = dev_get_drvdata(dev);
4346 	struct stmmac_priv *priv = netdev_priv(ndev);
4347 
4348 	netdev_info(priv->dev, "%s: removing driver", __func__);
4349 
4350 	stmmac_stop_all_dma(priv);
4351 
4352 	stmmac_mac_set(priv, priv->ioaddr, false);
4353 	netif_carrier_off(ndev);
4354 	unregister_netdev(ndev);
4355 	if (priv->plat->stmmac_rst)
4356 		reset_control_assert(priv->plat->stmmac_rst);
4357 	clk_disable_unprepare(priv->plat->pclk);
4358 	clk_disable_unprepare(priv->plat->stmmac_clk);
4359 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4360 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4361 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4362 		stmmac_mdio_unregister(ndev);
4363 	destroy_workqueue(priv->wq);
4364 	mutex_destroy(&priv->lock);
4365 	free_netdev(ndev);
4366 
4367 	return 0;
4368 }
4369 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4370 
4371 /**
4372  * stmmac_suspend - suspend callback
4373  * @dev: device pointer
4374  * Description: this is the function to suspend the device and it is called
4375  * by the platform driver to stop the network queue, release the resources,
4376  * program the PMT register (for WoL), clean and release driver resources.
4377  */
4378 int stmmac_suspend(struct device *dev)
4379 {
4380 	struct net_device *ndev = dev_get_drvdata(dev);
4381 	struct stmmac_priv *priv = netdev_priv(ndev);
4382 
4383 	if (!ndev || !netif_running(ndev))
4384 		return 0;
4385 
4386 	if (ndev->phydev)
4387 		phy_stop(ndev->phydev);
4388 
4389 	mutex_lock(&priv->lock);
4390 
4391 	netif_device_detach(ndev);
4392 	stmmac_stop_all_queues(priv);
4393 
4394 	stmmac_disable_all_queues(priv);
4395 
4396 	/* Stop TX/RX DMA */
4397 	stmmac_stop_all_dma(priv);
4398 
4399 	/* Enable Power down mode by programming the PMT regs */
4400 	if (device_may_wakeup(priv->device)) {
4401 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4402 		priv->irq_wake = 1;
4403 	} else {
4404 		stmmac_mac_set(priv, priv->ioaddr, false);
4405 		pinctrl_pm_select_sleep_state(priv->device);
4406 		/* Disable clock in case of PWM is off */
4407 		clk_disable(priv->plat->pclk);
4408 		clk_disable(priv->plat->stmmac_clk);
4409 	}
4410 	mutex_unlock(&priv->lock);
4411 
4412 	priv->oldlink = false;
4413 	priv->speed = SPEED_UNKNOWN;
4414 	priv->oldduplex = DUPLEX_UNKNOWN;
4415 	return 0;
4416 }
4417 EXPORT_SYMBOL_GPL(stmmac_suspend);
4418 
4419 /**
4420  * stmmac_reset_queues_param - reset queue parameters
4421  * @dev: device pointer
4422  */
4423 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4424 {
4425 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4426 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4427 	u32 queue;
4428 
4429 	for (queue = 0; queue < rx_cnt; queue++) {
4430 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4431 
4432 		rx_q->cur_rx = 0;
4433 		rx_q->dirty_rx = 0;
4434 	}
4435 
4436 	for (queue = 0; queue < tx_cnt; queue++) {
4437 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4438 
4439 		tx_q->cur_tx = 0;
4440 		tx_q->dirty_tx = 0;
4441 		tx_q->mss = 0;
4442 	}
4443 }
4444 
4445 /**
4446  * stmmac_resume - resume callback
4447  * @dev: device pointer
4448  * Description: when resume this function is invoked to setup the DMA and CORE
4449  * in a usable state.
4450  */
4451 int stmmac_resume(struct device *dev)
4452 {
4453 	struct net_device *ndev = dev_get_drvdata(dev);
4454 	struct stmmac_priv *priv = netdev_priv(ndev);
4455 
4456 	if (!netif_running(ndev))
4457 		return 0;
4458 
4459 	/* Power Down bit, into the PM register, is cleared
4460 	 * automatically as soon as a magic packet or a Wake-up frame
4461 	 * is received. Anyway, it's better to manually clear
4462 	 * this bit because it can generate problems while resuming
4463 	 * from another devices (e.g. serial console).
4464 	 */
4465 	if (device_may_wakeup(priv->device)) {
4466 		mutex_lock(&priv->lock);
4467 		stmmac_pmt(priv, priv->hw, 0);
4468 		mutex_unlock(&priv->lock);
4469 		priv->irq_wake = 0;
4470 	} else {
4471 		pinctrl_pm_select_default_state(priv->device);
4472 		/* enable the clk previously disabled */
4473 		clk_enable(priv->plat->stmmac_clk);
4474 		clk_enable(priv->plat->pclk);
4475 		/* reset the phy so that it's ready */
4476 		if (priv->mii)
4477 			stmmac_mdio_reset(priv->mii);
4478 	}
4479 
4480 	netif_device_attach(ndev);
4481 
4482 	mutex_lock(&priv->lock);
4483 
4484 	stmmac_reset_queues_param(priv);
4485 
4486 	stmmac_clear_descriptors(priv);
4487 
4488 	stmmac_hw_setup(ndev, false);
4489 	stmmac_init_tx_coalesce(priv);
4490 	stmmac_set_rx_mode(ndev);
4491 
4492 	stmmac_enable_all_queues(priv);
4493 
4494 	stmmac_start_all_queues(priv);
4495 
4496 	mutex_unlock(&priv->lock);
4497 
4498 	if (ndev->phydev)
4499 		phy_start(ndev->phydev);
4500 
4501 	return 0;
4502 }
4503 EXPORT_SYMBOL_GPL(stmmac_resume);
4504 
4505 #ifndef MODULE
4506 static int __init stmmac_cmdline_opt(char *str)
4507 {
4508 	char *opt;
4509 
4510 	if (!str || !*str)
4511 		return -EINVAL;
4512 	while ((opt = strsep(&str, ",")) != NULL) {
4513 		if (!strncmp(opt, "debug:", 6)) {
4514 			if (kstrtoint(opt + 6, 0, &debug))
4515 				goto err;
4516 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4517 			if (kstrtoint(opt + 8, 0, &phyaddr))
4518 				goto err;
4519 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4520 			if (kstrtoint(opt + 7, 0, &buf_sz))
4521 				goto err;
4522 		} else if (!strncmp(opt, "tc:", 3)) {
4523 			if (kstrtoint(opt + 3, 0, &tc))
4524 				goto err;
4525 		} else if (!strncmp(opt, "watchdog:", 9)) {
4526 			if (kstrtoint(opt + 9, 0, &watchdog))
4527 				goto err;
4528 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4529 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4530 				goto err;
4531 		} else if (!strncmp(opt, "pause:", 6)) {
4532 			if (kstrtoint(opt + 6, 0, &pause))
4533 				goto err;
4534 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4535 			if (kstrtoint(opt + 10, 0, &eee_timer))
4536 				goto err;
4537 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4538 			if (kstrtoint(opt + 11, 0, &chain_mode))
4539 				goto err;
4540 		}
4541 	}
4542 	return 0;
4543 
4544 err:
4545 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4546 	return -EINVAL;
4547 }
4548 
4549 __setup("stmmaceth=", stmmac_cmdline_opt);
4550 #endif /* MODULE */
4551 
4552 static int __init stmmac_init(void)
4553 {
4554 #ifdef CONFIG_DEBUG_FS
4555 	/* Create debugfs main directory if it doesn't exist yet */
4556 	if (!stmmac_fs_dir) {
4557 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4558 
4559 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4560 			pr_err("ERROR %s, debugfs create directory failed\n",
4561 			       STMMAC_RESOURCE_NAME);
4562 
4563 			return -ENOMEM;
4564 		}
4565 	}
4566 #endif
4567 
4568 	return 0;
4569 }
4570 
4571 static void __exit stmmac_exit(void)
4572 {
4573 #ifdef CONFIG_DEBUG_FS
4574 	debugfs_remove_recursive(stmmac_fs_dir);
4575 #endif
4576 }
4577 
4578 module_init(stmmac_init)
4579 module_exit(stmmac_exit)
4580 
4581 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4582 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4583 MODULE_LICENSE("GPL");
4584