xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision c4c8f39a57bf5057fc51a848d42b7e348ecfa31d)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	unsigned long flags;
385 	bool ret = false;
386 
387 	if ((interface != PHY_INTERFACE_MODE_MII) &&
388 	    (interface != PHY_INTERFACE_MODE_GMII) &&
389 	    !phy_interface_mode_is_rgmii(interface))
390 		goto out;
391 
392 	/* Using PCS we cannot dial with the phy registers at this stage
393 	 * so we do not support extra feature like EEE.
394 	 */
395 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
396 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
397 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
398 		goto out;
399 
400 	/* MAC core supports the EEE feature. */
401 	if (priv->dma_cap.eee) {
402 		int tx_lpi_timer = priv->tx_lpi_timer;
403 
404 		/* Check if the PHY supports EEE */
405 		if (phy_init_eee(ndev->phydev, 1)) {
406 			/* To manage at run-time if the EEE cannot be supported
407 			 * anymore (for example because the lp caps have been
408 			 * changed).
409 			 * In that case the driver disable own timers.
410 			 */
411 			spin_lock_irqsave(&priv->lock, flags);
412 			if (priv->eee_active) {
413 				netdev_dbg(priv->dev, "disable EEE\n");
414 				del_timer_sync(&priv->eee_ctrl_timer);
415 				stmmac_set_eee_timer(priv, priv->hw, 0,
416 						tx_lpi_timer);
417 			}
418 			priv->eee_active = 0;
419 			spin_unlock_irqrestore(&priv->lock, flags);
420 			goto out;
421 		}
422 		/* Activate the EEE and start timers */
423 		spin_lock_irqsave(&priv->lock, flags);
424 		if (!priv->eee_active) {
425 			priv->eee_active = 1;
426 			timer_setup(&priv->eee_ctrl_timer,
427 				    stmmac_eee_ctrl_timer, 0);
428 			mod_timer(&priv->eee_ctrl_timer,
429 				  STMMAC_LPI_T(eee_timer));
430 
431 			stmmac_set_eee_timer(priv, priv->hw,
432 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
433 		}
434 		/* Set HW EEE according to the speed */
435 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
436 
437 		ret = true;
438 		spin_unlock_irqrestore(&priv->lock, flags);
439 
440 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
441 	}
442 out:
443 	return ret;
444 }
445 
446 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
447  * @priv: driver private structure
448  * @p : descriptor pointer
449  * @skb : the socket buffer
450  * Description :
451  * This function will read timestamp from the descriptor & pass it to stack.
452  * and also perform some sanity checks.
453  */
454 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
455 				   struct dma_desc *p, struct sk_buff *skb)
456 {
457 	struct skb_shared_hwtstamps shhwtstamp;
458 	u64 ns;
459 
460 	if (!priv->hwts_tx_en)
461 		return;
462 
463 	/* exit if skb doesn't support hw tstamp */
464 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
465 		return;
466 
467 	/* check tx tstamp status */
468 	if (stmmac_get_tx_timestamp_status(priv, p)) {
469 		/* get the valid tstamp */
470 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
471 
472 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
473 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
474 
475 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
476 		/* pass tstamp to stack */
477 		skb_tstamp_tx(skb, &shhwtstamp);
478 	}
479 
480 	return;
481 }
482 
483 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
484  * @priv: driver private structure
485  * @p : descriptor pointer
486  * @np : next descriptor pointer
487  * @skb : the socket buffer
488  * Description :
489  * This function will read received packet's timestamp from the descriptor
490  * and pass it to stack. It also perform some sanity checks.
491  */
492 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
493 				   struct dma_desc *np, struct sk_buff *skb)
494 {
495 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
496 	struct dma_desc *desc = p;
497 	u64 ns;
498 
499 	if (!priv->hwts_rx_en)
500 		return;
501 	/* For GMAC4, the valid timestamp is from CTX next desc. */
502 	if (priv->plat->has_gmac4)
503 		desc = np;
504 
505 	/* Check if timestamp is available */
506 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
507 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
508 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
509 		shhwtstamp = skb_hwtstamps(skb);
510 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
511 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
512 	} else  {
513 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
514 	}
515 }
516 
517 /**
518  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
519  *  @dev: device pointer.
520  *  @ifr: An IOCTL specific structure, that can contain a pointer to
521  *  a proprietary structure used to pass information to the driver.
522  *  Description:
523  *  This function configures the MAC to enable/disable both outgoing(TX)
524  *  and incoming(RX) packets time stamping based on user input.
525  *  Return Value:
526  *  0 on success and an appropriate -ve integer on failure.
527  */
528 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
529 {
530 	struct stmmac_priv *priv = netdev_priv(dev);
531 	struct hwtstamp_config config;
532 	struct timespec64 now;
533 	u64 temp = 0;
534 	u32 ptp_v2 = 0;
535 	u32 tstamp_all = 0;
536 	u32 ptp_over_ipv4_udp = 0;
537 	u32 ptp_over_ipv6_udp = 0;
538 	u32 ptp_over_ethernet = 0;
539 	u32 snap_type_sel = 0;
540 	u32 ts_master_en = 0;
541 	u32 ts_event_en = 0;
542 	u32 value = 0;
543 	u32 sec_inc;
544 
545 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
546 		netdev_alert(priv->dev, "No support for HW time stamping\n");
547 		priv->hwts_tx_en = 0;
548 		priv->hwts_rx_en = 0;
549 
550 		return -EOPNOTSUPP;
551 	}
552 
553 	if (copy_from_user(&config, ifr->ifr_data,
554 			   sizeof(struct hwtstamp_config)))
555 		return -EFAULT;
556 
557 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
558 		   __func__, config.flags, config.tx_type, config.rx_filter);
559 
560 	/* reserved for future extensions */
561 	if (config.flags)
562 		return -EINVAL;
563 
564 	if (config.tx_type != HWTSTAMP_TX_OFF &&
565 	    config.tx_type != HWTSTAMP_TX_ON)
566 		return -ERANGE;
567 
568 	if (priv->adv_ts) {
569 		switch (config.rx_filter) {
570 		case HWTSTAMP_FILTER_NONE:
571 			/* time stamp no incoming packet at all */
572 			config.rx_filter = HWTSTAMP_FILTER_NONE;
573 			break;
574 
575 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
576 			/* PTP v1, UDP, any kind of event packet */
577 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
578 			/* take time stamp for all event messages */
579 			if (priv->plat->has_gmac4)
580 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
581 			else
582 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
589 			/* PTP v1, UDP, Sync packet */
590 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
591 			/* take time stamp for SYNC messages only */
592 			ts_event_en = PTP_TCR_TSEVNTENA;
593 
594 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 			break;
597 
598 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
599 			/* PTP v1, UDP, Delay_req packet */
600 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
601 			/* take time stamp for Delay_Req messages only */
602 			ts_master_en = PTP_TCR_TSMSTRENA;
603 			ts_event_en = PTP_TCR_TSEVNTENA;
604 
605 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 			break;
608 
609 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
610 			/* PTP v2, UDP, any kind of event packet */
611 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
612 			ptp_v2 = PTP_TCR_TSVER2ENA;
613 			/* take time stamp for all event messages */
614 			if (priv->plat->has_gmac4)
615 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
616 			else
617 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
618 
619 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
620 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
621 			break;
622 
623 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
624 			/* PTP v2, UDP, Sync packet */
625 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
626 			ptp_v2 = PTP_TCR_TSVER2ENA;
627 			/* take time stamp for SYNC messages only */
628 			ts_event_en = PTP_TCR_TSEVNTENA;
629 
630 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
631 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
632 			break;
633 
634 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
635 			/* PTP v2, UDP, Delay_req packet */
636 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
637 			ptp_v2 = PTP_TCR_TSVER2ENA;
638 			/* take time stamp for Delay_Req messages only */
639 			ts_master_en = PTP_TCR_TSMSTRENA;
640 			ts_event_en = PTP_TCR_TSEVNTENA;
641 
642 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 			break;
645 
646 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
647 			/* PTP v2/802.AS1 any layer, any kind of event packet */
648 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
649 			ptp_v2 = PTP_TCR_TSVER2ENA;
650 			/* take time stamp for all event messages */
651 			if (priv->plat->has_gmac4)
652 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
653 			else
654 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			ptp_over_ethernet = PTP_TCR_TSIPENA;
659 			break;
660 
661 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
662 			/* PTP v2/802.AS1, any layer, Sync packet */
663 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
664 			ptp_v2 = PTP_TCR_TSVER2ENA;
665 			/* take time stamp for SYNC messages only */
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			ptp_over_ethernet = PTP_TCR_TSIPENA;
671 			break;
672 
673 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
674 			/* PTP v2/802.AS1, any layer, Delay_req packet */
675 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
676 			ptp_v2 = PTP_TCR_TSVER2ENA;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			ptp_over_ethernet = PTP_TCR_TSIPENA;
684 			break;
685 
686 		case HWTSTAMP_FILTER_NTP_ALL:
687 		case HWTSTAMP_FILTER_ALL:
688 			/* time stamp any incoming packet */
689 			config.rx_filter = HWTSTAMP_FILTER_ALL;
690 			tstamp_all = PTP_TCR_TSENALL;
691 			break;
692 
693 		default:
694 			return -ERANGE;
695 		}
696 	} else {
697 		switch (config.rx_filter) {
698 		case HWTSTAMP_FILTER_NONE:
699 			config.rx_filter = HWTSTAMP_FILTER_NONE;
700 			break;
701 		default:
702 			/* PTP v1, UDP, any kind of event packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
704 			break;
705 		}
706 	}
707 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
708 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
709 
710 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
711 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
712 	else {
713 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
714 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
715 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
716 			 ts_master_en | snap_type_sel);
717 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
718 
719 		/* program Sub Second Increment reg */
720 		stmmac_config_sub_second_increment(priv,
721 				priv->ptpaddr, priv->plat->clk_ptp_rate,
722 				priv->plat->has_gmac4, &sec_inc);
723 		temp = div_u64(1000000000ULL, sec_inc);
724 
725 		/* calculate default added value:
726 		 * formula is :
727 		 * addend = (2^32)/freq_div_ratio;
728 		 * where, freq_div_ratio = 1e9ns/sec_inc
729 		 */
730 		temp = (u64)(temp << 32);
731 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733 
734 		/* initialize system time */
735 		ktime_get_real_ts64(&now);
736 
737 		/* lower 32 bits of tv_sec are safe until y2106 */
738 		stmmac_init_systime(priv, priv->ptpaddr,
739 				(u32)now.tv_sec, now.tv_nsec);
740 	}
741 
742 	return copy_to_user(ifr->ifr_data, &config,
743 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745 
746 /**
747  * stmmac_init_ptp - init PTP
748  * @priv: driver private structure
749  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750  * This is done by looking at the HW cap. register.
751  * This function also registers the ptp driver.
752  */
753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
756 		return -EOPNOTSUPP;
757 
758 	priv->adv_ts = 0;
759 	/* Check if adv_ts can be enabled for dwmac 4.x core */
760 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
761 		priv->adv_ts = 1;
762 	/* Dwmac 3.x core with extend_desc can support adv_ts */
763 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
764 		priv->adv_ts = 1;
765 
766 	if (priv->dma_cap.time_stamp)
767 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
768 
769 	if (priv->adv_ts)
770 		netdev_info(priv->dev,
771 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
772 
773 	priv->hwts_tx_en = 0;
774 	priv->hwts_rx_en = 0;
775 
776 	stmmac_ptp_register(priv);
777 
778 	return 0;
779 }
780 
781 static void stmmac_release_ptp(struct stmmac_priv *priv)
782 {
783 	if (priv->plat->clk_ptp_ref)
784 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
785 	stmmac_ptp_unregister(priv);
786 }
787 
788 /**
789  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
790  *  @priv: driver private structure
791  *  Description: It is used for configuring the flow control in all queues
792  */
793 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
794 {
795 	u32 tx_cnt = priv->plat->tx_queues_to_use;
796 
797 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
798 			priv->pause, tx_cnt);
799 }
800 
801 /**
802  * stmmac_adjust_link - adjusts the link parameters
803  * @dev: net device structure
804  * Description: this is the helper called by the physical abstraction layer
805  * drivers to communicate the phy link status. According the speed and duplex
806  * this driver can invoke registered glue-logic as well.
807  * It also invoke the eee initialization because it could happen when switch
808  * on different networks (that are eee capable).
809  */
810 static void stmmac_adjust_link(struct net_device *dev)
811 {
812 	struct stmmac_priv *priv = netdev_priv(dev);
813 	struct phy_device *phydev = dev->phydev;
814 	unsigned long flags;
815 	bool new_state = false;
816 
817 	if (!phydev)
818 		return;
819 
820 	spin_lock_irqsave(&priv->lock, flags);
821 
822 	if (phydev->link) {
823 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
824 
825 		/* Now we make sure that we can be in full duplex mode.
826 		 * If not, we operate in half-duplex mode. */
827 		if (phydev->duplex != priv->oldduplex) {
828 			new_state = true;
829 			if (!phydev->duplex)
830 				ctrl &= ~priv->hw->link.duplex;
831 			else
832 				ctrl |= priv->hw->link.duplex;
833 			priv->oldduplex = phydev->duplex;
834 		}
835 		/* Flow Control operation */
836 		if (phydev->pause)
837 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
838 
839 		if (phydev->speed != priv->speed) {
840 			new_state = true;
841 			ctrl &= ~priv->hw->link.speed_mask;
842 			switch (phydev->speed) {
843 			case SPEED_1000:
844 				ctrl |= priv->hw->link.speed1000;
845 				break;
846 			case SPEED_100:
847 				ctrl |= priv->hw->link.speed100;
848 				break;
849 			case SPEED_10:
850 				ctrl |= priv->hw->link.speed10;
851 				break;
852 			default:
853 				netif_warn(priv, link, priv->dev,
854 					   "broken speed: %d\n", phydev->speed);
855 				phydev->speed = SPEED_UNKNOWN;
856 				break;
857 			}
858 			if (phydev->speed != SPEED_UNKNOWN)
859 				stmmac_hw_fix_mac_speed(priv);
860 			priv->speed = phydev->speed;
861 		}
862 
863 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
864 
865 		if (!priv->oldlink) {
866 			new_state = true;
867 			priv->oldlink = true;
868 		}
869 	} else if (priv->oldlink) {
870 		new_state = true;
871 		priv->oldlink = false;
872 		priv->speed = SPEED_UNKNOWN;
873 		priv->oldduplex = DUPLEX_UNKNOWN;
874 	}
875 
876 	if (new_state && netif_msg_link(priv))
877 		phy_print_status(phydev);
878 
879 	spin_unlock_irqrestore(&priv->lock, flags);
880 
881 	if (phydev->is_pseudo_fixed_link)
882 		/* Stop PHY layer to call the hook to adjust the link in case
883 		 * of a switch is attached to the stmmac driver.
884 		 */
885 		phydev->irq = PHY_IGNORE_INTERRUPT;
886 	else
887 		/* At this stage, init the EEE if supported.
888 		 * Never called in case of fixed_link.
889 		 */
890 		priv->eee_enabled = stmmac_eee_init(priv);
891 }
892 
893 /**
894  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
895  * @priv: driver private structure
896  * Description: this is to verify if the HW supports the PCS.
897  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
898  * configured for the TBI, RTBI, or SGMII PHY interface.
899  */
900 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
901 {
902 	int interface = priv->plat->interface;
903 
904 	if (priv->dma_cap.pcs) {
905 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
906 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
907 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
908 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
909 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
910 			priv->hw->pcs = STMMAC_PCS_RGMII;
911 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
912 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
913 			priv->hw->pcs = STMMAC_PCS_SGMII;
914 		}
915 	}
916 }
917 
918 /**
919  * stmmac_init_phy - PHY initialization
920  * @dev: net device structure
921  * Description: it initializes the driver's PHY state, and attaches the PHY
922  * to the mac driver.
923  *  Return value:
924  *  0 on success
925  */
926 static int stmmac_init_phy(struct net_device *dev)
927 {
928 	struct stmmac_priv *priv = netdev_priv(dev);
929 	struct phy_device *phydev;
930 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
931 	char bus_id[MII_BUS_ID_SIZE];
932 	int interface = priv->plat->interface;
933 	int max_speed = priv->plat->max_speed;
934 	priv->oldlink = false;
935 	priv->speed = SPEED_UNKNOWN;
936 	priv->oldduplex = DUPLEX_UNKNOWN;
937 
938 	if (priv->plat->phy_node) {
939 		phydev = of_phy_connect(dev, priv->plat->phy_node,
940 					&stmmac_adjust_link, 0, interface);
941 	} else {
942 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
943 			 priv->plat->bus_id);
944 
945 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
946 			 priv->plat->phy_addr);
947 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
948 			   phy_id_fmt);
949 
950 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
951 				     interface);
952 	}
953 
954 	if (IS_ERR_OR_NULL(phydev)) {
955 		netdev_err(priv->dev, "Could not attach to PHY\n");
956 		if (!phydev)
957 			return -ENODEV;
958 
959 		return PTR_ERR(phydev);
960 	}
961 
962 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
963 	if ((interface == PHY_INTERFACE_MODE_MII) ||
964 	    (interface == PHY_INTERFACE_MODE_RMII) ||
965 		(max_speed < 1000 && max_speed > 0))
966 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
967 					 SUPPORTED_1000baseT_Full);
968 
969 	/*
970 	 * Broken HW is sometimes missing the pull-up resistor on the
971 	 * MDIO line, which results in reads to non-existent devices returning
972 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
973 	 * device as well.
974 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
975 	 */
976 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
977 		phy_disconnect(phydev);
978 		return -ENODEV;
979 	}
980 
981 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
982 	 * subsequent PHY polling, make sure we force a link transition if
983 	 * we have a UP/DOWN/UP transition
984 	 */
985 	if (phydev->is_pseudo_fixed_link)
986 		phydev->irq = PHY_POLL;
987 
988 	phy_attached_info(phydev);
989 	return 0;
990 }
991 
992 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
993 {
994 	u32 rx_cnt = priv->plat->rx_queues_to_use;
995 	void *head_rx;
996 	u32 queue;
997 
998 	/* Display RX rings */
999 	for (queue = 0; queue < rx_cnt; queue++) {
1000 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1001 
1002 		pr_info("\tRX Queue %u rings\n", queue);
1003 
1004 		if (priv->extend_desc)
1005 			head_rx = (void *)rx_q->dma_erx;
1006 		else
1007 			head_rx = (void *)rx_q->dma_rx;
1008 
1009 		/* Display RX ring */
1010 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1011 	}
1012 }
1013 
1014 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1015 {
1016 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1017 	void *head_tx;
1018 	u32 queue;
1019 
1020 	/* Display TX rings */
1021 	for (queue = 0; queue < tx_cnt; queue++) {
1022 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1023 
1024 		pr_info("\tTX Queue %d rings\n", queue);
1025 
1026 		if (priv->extend_desc)
1027 			head_tx = (void *)tx_q->dma_etx;
1028 		else
1029 			head_tx = (void *)tx_q->dma_tx;
1030 
1031 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1032 	}
1033 }
1034 
1035 static void stmmac_display_rings(struct stmmac_priv *priv)
1036 {
1037 	/* Display RX ring */
1038 	stmmac_display_rx_rings(priv);
1039 
1040 	/* Display TX ring */
1041 	stmmac_display_tx_rings(priv);
1042 }
1043 
1044 static int stmmac_set_bfsize(int mtu, int bufsize)
1045 {
1046 	int ret = bufsize;
1047 
1048 	if (mtu >= BUF_SIZE_4KiB)
1049 		ret = BUF_SIZE_8KiB;
1050 	else if (mtu >= BUF_SIZE_2KiB)
1051 		ret = BUF_SIZE_4KiB;
1052 	else if (mtu > DEFAULT_BUFSIZE)
1053 		ret = BUF_SIZE_2KiB;
1054 	else
1055 		ret = DEFAULT_BUFSIZE;
1056 
1057 	return ret;
1058 }
1059 
1060 /**
1061  * stmmac_clear_rx_descriptors - clear RX descriptors
1062  * @priv: driver private structure
1063  * @queue: RX queue index
1064  * Description: this function is called to clear the RX descriptors
1065  * in case of both basic and extended descriptors are used.
1066  */
1067 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1068 {
1069 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1070 	int i;
1071 
1072 	/* Clear the RX descriptors */
1073 	for (i = 0; i < DMA_RX_SIZE; i++)
1074 		if (priv->extend_desc)
1075 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1076 					priv->use_riwt, priv->mode,
1077 					(i == DMA_RX_SIZE - 1));
1078 		else
1079 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1080 					priv->use_riwt, priv->mode,
1081 					(i == DMA_RX_SIZE - 1));
1082 }
1083 
1084 /**
1085  * stmmac_clear_tx_descriptors - clear tx descriptors
1086  * @priv: driver private structure
1087  * @queue: TX queue index.
1088  * Description: this function is called to clear the TX descriptors
1089  * in case of both basic and extended descriptors are used.
1090  */
1091 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1092 {
1093 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1094 	int i;
1095 
1096 	/* Clear the TX descriptors */
1097 	for (i = 0; i < DMA_TX_SIZE; i++)
1098 		if (priv->extend_desc)
1099 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1100 					priv->mode, (i == DMA_TX_SIZE - 1));
1101 		else
1102 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1103 					priv->mode, (i == DMA_TX_SIZE - 1));
1104 }
1105 
1106 /**
1107  * stmmac_clear_descriptors - clear descriptors
1108  * @priv: driver private structure
1109  * Description: this function is called to clear the TX and RX descriptors
1110  * in case of both basic and extended descriptors are used.
1111  */
1112 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1113 {
1114 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1115 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1116 	u32 queue;
1117 
1118 	/* Clear the RX descriptors */
1119 	for (queue = 0; queue < rx_queue_cnt; queue++)
1120 		stmmac_clear_rx_descriptors(priv, queue);
1121 
1122 	/* Clear the TX descriptors */
1123 	for (queue = 0; queue < tx_queue_cnt; queue++)
1124 		stmmac_clear_tx_descriptors(priv, queue);
1125 }
1126 
1127 /**
1128  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1129  * @priv: driver private structure
1130  * @p: descriptor pointer
1131  * @i: descriptor index
1132  * @flags: gfp flag
1133  * @queue: RX queue index
1134  * Description: this function is called to allocate a receive buffer, perform
1135  * the DMA mapping and init the descriptor.
1136  */
1137 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1138 				  int i, gfp_t flags, u32 queue)
1139 {
1140 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1141 	struct sk_buff *skb;
1142 
1143 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1144 	if (!skb) {
1145 		netdev_err(priv->dev,
1146 			   "%s: Rx init fails; skb is NULL\n", __func__);
1147 		return -ENOMEM;
1148 	}
1149 	rx_q->rx_skbuff[i] = skb;
1150 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1151 						priv->dma_buf_sz,
1152 						DMA_FROM_DEVICE);
1153 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1154 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1155 		dev_kfree_skb_any(skb);
1156 		return -EINVAL;
1157 	}
1158 
1159 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1160 
1161 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1162 		stmmac_init_desc3(priv, p);
1163 
1164 	return 0;
1165 }
1166 
1167 /**
1168  * stmmac_free_rx_buffer - free RX dma buffers
1169  * @priv: private structure
1170  * @queue: RX queue index
1171  * @i: buffer index.
1172  */
1173 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1174 {
1175 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1176 
1177 	if (rx_q->rx_skbuff[i]) {
1178 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1179 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1180 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1181 	}
1182 	rx_q->rx_skbuff[i] = NULL;
1183 }
1184 
1185 /**
1186  * stmmac_free_tx_buffer - free RX dma buffers
1187  * @priv: private structure
1188  * @queue: RX queue index
1189  * @i: buffer index.
1190  */
1191 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1192 {
1193 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1194 
1195 	if (tx_q->tx_skbuff_dma[i].buf) {
1196 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1197 			dma_unmap_page(priv->device,
1198 				       tx_q->tx_skbuff_dma[i].buf,
1199 				       tx_q->tx_skbuff_dma[i].len,
1200 				       DMA_TO_DEVICE);
1201 		else
1202 			dma_unmap_single(priv->device,
1203 					 tx_q->tx_skbuff_dma[i].buf,
1204 					 tx_q->tx_skbuff_dma[i].len,
1205 					 DMA_TO_DEVICE);
1206 	}
1207 
1208 	if (tx_q->tx_skbuff[i]) {
1209 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1210 		tx_q->tx_skbuff[i] = NULL;
1211 		tx_q->tx_skbuff_dma[i].buf = 0;
1212 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1213 	}
1214 }
1215 
1216 /**
1217  * init_dma_rx_desc_rings - init the RX descriptor rings
1218  * @dev: net device structure
1219  * @flags: gfp flag.
1220  * Description: this function initializes the DMA RX descriptors
1221  * and allocates the socket buffers. It supports the chained and ring
1222  * modes.
1223  */
1224 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1225 {
1226 	struct stmmac_priv *priv = netdev_priv(dev);
1227 	u32 rx_count = priv->plat->rx_queues_to_use;
1228 	int ret = -ENOMEM;
1229 	int bfsize = 0;
1230 	int queue;
1231 	int i;
1232 
1233 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1234 	if (bfsize < 0)
1235 		bfsize = 0;
1236 
1237 	if (bfsize < BUF_SIZE_16KiB)
1238 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1239 
1240 	priv->dma_buf_sz = bfsize;
1241 
1242 	/* RX INITIALIZATION */
1243 	netif_dbg(priv, probe, priv->dev,
1244 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1245 
1246 	for (queue = 0; queue < rx_count; queue++) {
1247 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1248 
1249 		netif_dbg(priv, probe, priv->dev,
1250 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1251 			  (u32)rx_q->dma_rx_phy);
1252 
1253 		for (i = 0; i < DMA_RX_SIZE; i++) {
1254 			struct dma_desc *p;
1255 
1256 			if (priv->extend_desc)
1257 				p = &((rx_q->dma_erx + i)->basic);
1258 			else
1259 				p = rx_q->dma_rx + i;
1260 
1261 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1262 						     queue);
1263 			if (ret)
1264 				goto err_init_rx_buffers;
1265 
1266 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1267 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1268 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1269 		}
1270 
1271 		rx_q->cur_rx = 0;
1272 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1273 
1274 		stmmac_clear_rx_descriptors(priv, queue);
1275 
1276 		/* Setup the chained descriptor addresses */
1277 		if (priv->mode == STMMAC_CHAIN_MODE) {
1278 			if (priv->extend_desc)
1279 				stmmac_mode_init(priv, rx_q->dma_erx,
1280 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1281 			else
1282 				stmmac_mode_init(priv, rx_q->dma_rx,
1283 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1284 		}
1285 	}
1286 
1287 	buf_sz = bfsize;
1288 
1289 	return 0;
1290 
1291 err_init_rx_buffers:
1292 	while (queue >= 0) {
1293 		while (--i >= 0)
1294 			stmmac_free_rx_buffer(priv, queue, i);
1295 
1296 		if (queue == 0)
1297 			break;
1298 
1299 		i = DMA_RX_SIZE;
1300 		queue--;
1301 	}
1302 
1303 	return ret;
1304 }
1305 
1306 /**
1307  * init_dma_tx_desc_rings - init the TX descriptor rings
1308  * @dev: net device structure.
1309  * Description: this function initializes the DMA TX descriptors
1310  * and allocates the socket buffers. It supports the chained and ring
1311  * modes.
1312  */
1313 static int init_dma_tx_desc_rings(struct net_device *dev)
1314 {
1315 	struct stmmac_priv *priv = netdev_priv(dev);
1316 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1317 	u32 queue;
1318 	int i;
1319 
1320 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1321 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1322 
1323 		netif_dbg(priv, probe, priv->dev,
1324 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1325 			 (u32)tx_q->dma_tx_phy);
1326 
1327 		/* Setup the chained descriptor addresses */
1328 		if (priv->mode == STMMAC_CHAIN_MODE) {
1329 			if (priv->extend_desc)
1330 				stmmac_mode_init(priv, tx_q->dma_etx,
1331 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1332 			else
1333 				stmmac_mode_init(priv, tx_q->dma_tx,
1334 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1335 		}
1336 
1337 		for (i = 0; i < DMA_TX_SIZE; i++) {
1338 			struct dma_desc *p;
1339 			if (priv->extend_desc)
1340 				p = &((tx_q->dma_etx + i)->basic);
1341 			else
1342 				p = tx_q->dma_tx + i;
1343 
1344 			stmmac_clear_desc(priv, p);
1345 
1346 			tx_q->tx_skbuff_dma[i].buf = 0;
1347 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1348 			tx_q->tx_skbuff_dma[i].len = 0;
1349 			tx_q->tx_skbuff_dma[i].last_segment = false;
1350 			tx_q->tx_skbuff[i] = NULL;
1351 		}
1352 
1353 		tx_q->dirty_tx = 0;
1354 		tx_q->cur_tx = 0;
1355 		tx_q->mss = 0;
1356 
1357 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 /**
1364  * init_dma_desc_rings - init the RX/TX descriptor rings
1365  * @dev: net device structure
1366  * @flags: gfp flag.
1367  * Description: this function initializes the DMA RX/TX descriptors
1368  * and allocates the socket buffers. It supports the chained and ring
1369  * modes.
1370  */
1371 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1372 {
1373 	struct stmmac_priv *priv = netdev_priv(dev);
1374 	int ret;
1375 
1376 	ret = init_dma_rx_desc_rings(dev, flags);
1377 	if (ret)
1378 		return ret;
1379 
1380 	ret = init_dma_tx_desc_rings(dev);
1381 
1382 	stmmac_clear_descriptors(priv);
1383 
1384 	if (netif_msg_hw(priv))
1385 		stmmac_display_rings(priv);
1386 
1387 	return ret;
1388 }
1389 
1390 /**
1391  * dma_free_rx_skbufs - free RX dma buffers
1392  * @priv: private structure
1393  * @queue: RX queue index
1394  */
1395 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1396 {
1397 	int i;
1398 
1399 	for (i = 0; i < DMA_RX_SIZE; i++)
1400 		stmmac_free_rx_buffer(priv, queue, i);
1401 }
1402 
1403 /**
1404  * dma_free_tx_skbufs - free TX dma buffers
1405  * @priv: private structure
1406  * @queue: TX queue index
1407  */
1408 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1409 {
1410 	int i;
1411 
1412 	for (i = 0; i < DMA_TX_SIZE; i++)
1413 		stmmac_free_tx_buffer(priv, queue, i);
1414 }
1415 
1416 /**
1417  * free_dma_rx_desc_resources - free RX dma desc resources
1418  * @priv: private structure
1419  */
1420 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1421 {
1422 	u32 rx_count = priv->plat->rx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Free RX queue resources */
1426 	for (queue = 0; queue < rx_count; queue++) {
1427 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1428 
1429 		/* Release the DMA RX socket buffers */
1430 		dma_free_rx_skbufs(priv, queue);
1431 
1432 		/* Free DMA regions of consistent memory previously allocated */
1433 		if (!priv->extend_desc)
1434 			dma_free_coherent(priv->device,
1435 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1436 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1437 		else
1438 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1439 					  sizeof(struct dma_extended_desc),
1440 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1441 
1442 		kfree(rx_q->rx_skbuff_dma);
1443 		kfree(rx_q->rx_skbuff);
1444 	}
1445 }
1446 
1447 /**
1448  * free_dma_tx_desc_resources - free TX dma desc resources
1449  * @priv: private structure
1450  */
1451 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1452 {
1453 	u32 tx_count = priv->plat->tx_queues_to_use;
1454 	u32 queue;
1455 
1456 	/* Free TX queue resources */
1457 	for (queue = 0; queue < tx_count; queue++) {
1458 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1459 
1460 		/* Release the DMA TX socket buffers */
1461 		dma_free_tx_skbufs(priv, queue);
1462 
1463 		/* Free DMA regions of consistent memory previously allocated */
1464 		if (!priv->extend_desc)
1465 			dma_free_coherent(priv->device,
1466 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1467 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1468 		else
1469 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1470 					  sizeof(struct dma_extended_desc),
1471 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1472 
1473 		kfree(tx_q->tx_skbuff_dma);
1474 		kfree(tx_q->tx_skbuff);
1475 	}
1476 }
1477 
1478 /**
1479  * alloc_dma_rx_desc_resources - alloc RX resources.
1480  * @priv: private structure
1481  * Description: according to which descriptor can be used (extend or basic)
1482  * this function allocates the resources for TX and RX paths. In case of
1483  * reception, for example, it pre-allocated the RX socket buffer in order to
1484  * allow zero-copy mechanism.
1485  */
1486 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1487 {
1488 	u32 rx_count = priv->plat->rx_queues_to_use;
1489 	int ret = -ENOMEM;
1490 	u32 queue;
1491 
1492 	/* RX queues buffers and DMA */
1493 	for (queue = 0; queue < rx_count; queue++) {
1494 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1495 
1496 		rx_q->queue_index = queue;
1497 		rx_q->priv_data = priv;
1498 
1499 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1500 						    sizeof(dma_addr_t),
1501 						    GFP_KERNEL);
1502 		if (!rx_q->rx_skbuff_dma)
1503 			goto err_dma;
1504 
1505 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1506 						sizeof(struct sk_buff *),
1507 						GFP_KERNEL);
1508 		if (!rx_q->rx_skbuff)
1509 			goto err_dma;
1510 
1511 		if (priv->extend_desc) {
1512 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1513 							    DMA_RX_SIZE *
1514 							    sizeof(struct
1515 							    dma_extended_desc),
1516 							    &rx_q->dma_rx_phy,
1517 							    GFP_KERNEL);
1518 			if (!rx_q->dma_erx)
1519 				goto err_dma;
1520 
1521 		} else {
1522 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1523 							   DMA_RX_SIZE *
1524 							   sizeof(struct
1525 							   dma_desc),
1526 							   &rx_q->dma_rx_phy,
1527 							   GFP_KERNEL);
1528 			if (!rx_q->dma_rx)
1529 				goto err_dma;
1530 		}
1531 	}
1532 
1533 	return 0;
1534 
1535 err_dma:
1536 	free_dma_rx_desc_resources(priv);
1537 
1538 	return ret;
1539 }
1540 
1541 /**
1542  * alloc_dma_tx_desc_resources - alloc TX resources.
1543  * @priv: private structure
1544  * Description: according to which descriptor can be used (extend or basic)
1545  * this function allocates the resources for TX and RX paths. In case of
1546  * reception, for example, it pre-allocated the RX socket buffer in order to
1547  * allow zero-copy mechanism.
1548  */
1549 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1550 {
1551 	u32 tx_count = priv->plat->tx_queues_to_use;
1552 	int ret = -ENOMEM;
1553 	u32 queue;
1554 
1555 	/* TX queues buffers and DMA */
1556 	for (queue = 0; queue < tx_count; queue++) {
1557 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1558 
1559 		tx_q->queue_index = queue;
1560 		tx_q->priv_data = priv;
1561 
1562 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1563 						    sizeof(*tx_q->tx_skbuff_dma),
1564 						    GFP_KERNEL);
1565 		if (!tx_q->tx_skbuff_dma)
1566 			goto err_dma;
1567 
1568 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1569 						sizeof(struct sk_buff *),
1570 						GFP_KERNEL);
1571 		if (!tx_q->tx_skbuff)
1572 			goto err_dma;
1573 
1574 		if (priv->extend_desc) {
1575 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1576 							    DMA_TX_SIZE *
1577 							    sizeof(struct
1578 							    dma_extended_desc),
1579 							    &tx_q->dma_tx_phy,
1580 							    GFP_KERNEL);
1581 			if (!tx_q->dma_etx)
1582 				goto err_dma;
1583 		} else {
1584 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1585 							   DMA_TX_SIZE *
1586 							   sizeof(struct
1587 								  dma_desc),
1588 							   &tx_q->dma_tx_phy,
1589 							   GFP_KERNEL);
1590 			if (!tx_q->dma_tx)
1591 				goto err_dma;
1592 		}
1593 	}
1594 
1595 	return 0;
1596 
1597 err_dma:
1598 	free_dma_tx_desc_resources(priv);
1599 
1600 	return ret;
1601 }
1602 
1603 /**
1604  * alloc_dma_desc_resources - alloc TX/RX resources.
1605  * @priv: private structure
1606  * Description: according to which descriptor can be used (extend or basic)
1607  * this function allocates the resources for TX and RX paths. In case of
1608  * reception, for example, it pre-allocated the RX socket buffer in order to
1609  * allow zero-copy mechanism.
1610  */
1611 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1612 {
1613 	/* RX Allocation */
1614 	int ret = alloc_dma_rx_desc_resources(priv);
1615 
1616 	if (ret)
1617 		return ret;
1618 
1619 	ret = alloc_dma_tx_desc_resources(priv);
1620 
1621 	return ret;
1622 }
1623 
1624 /**
1625  * free_dma_desc_resources - free dma desc resources
1626  * @priv: private structure
1627  */
1628 static void free_dma_desc_resources(struct stmmac_priv *priv)
1629 {
1630 	/* Release the DMA RX socket buffers */
1631 	free_dma_rx_desc_resources(priv);
1632 
1633 	/* Release the DMA TX socket buffers */
1634 	free_dma_tx_desc_resources(priv);
1635 }
1636 
1637 /**
1638  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1639  *  @priv: driver private structure
1640  *  Description: It is used for enabling the rx queues in the MAC
1641  */
1642 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1643 {
1644 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1645 	int queue;
1646 	u8 mode;
1647 
1648 	for (queue = 0; queue < rx_queues_count; queue++) {
1649 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1650 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1651 	}
1652 }
1653 
1654 /**
1655  * stmmac_start_rx_dma - start RX DMA channel
1656  * @priv: driver private structure
1657  * @chan: RX channel index
1658  * Description:
1659  * This starts a RX DMA channel
1660  */
1661 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1662 {
1663 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1664 	stmmac_start_rx(priv, priv->ioaddr, chan);
1665 }
1666 
1667 /**
1668  * stmmac_start_tx_dma - start TX DMA channel
1669  * @priv: driver private structure
1670  * @chan: TX channel index
1671  * Description:
1672  * This starts a TX DMA channel
1673  */
1674 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1675 {
1676 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1677 	stmmac_start_tx(priv, priv->ioaddr, chan);
1678 }
1679 
1680 /**
1681  * stmmac_stop_rx_dma - stop RX DMA channel
1682  * @priv: driver private structure
1683  * @chan: RX channel index
1684  * Description:
1685  * This stops a RX DMA channel
1686  */
1687 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1688 {
1689 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1690 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1691 }
1692 
1693 /**
1694  * stmmac_stop_tx_dma - stop TX DMA channel
1695  * @priv: driver private structure
1696  * @chan: TX channel index
1697  * Description:
1698  * This stops a TX DMA channel
1699  */
1700 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1701 {
1702 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1703 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1704 }
1705 
1706 /**
1707  * stmmac_start_all_dma - start all RX and TX DMA channels
1708  * @priv: driver private structure
1709  * Description:
1710  * This starts all the RX and TX DMA channels
1711  */
1712 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1713 {
1714 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1715 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1716 	u32 chan = 0;
1717 
1718 	for (chan = 0; chan < rx_channels_count; chan++)
1719 		stmmac_start_rx_dma(priv, chan);
1720 
1721 	for (chan = 0; chan < tx_channels_count; chan++)
1722 		stmmac_start_tx_dma(priv, chan);
1723 }
1724 
1725 /**
1726  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1727  * @priv: driver private structure
1728  * Description:
1729  * This stops the RX and TX DMA channels
1730  */
1731 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1732 {
1733 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1734 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1735 	u32 chan = 0;
1736 
1737 	for (chan = 0; chan < rx_channels_count; chan++)
1738 		stmmac_stop_rx_dma(priv, chan);
1739 
1740 	for (chan = 0; chan < tx_channels_count; chan++)
1741 		stmmac_stop_tx_dma(priv, chan);
1742 }
1743 
1744 /**
1745  *  stmmac_dma_operation_mode - HW DMA operation mode
1746  *  @priv: driver private structure
1747  *  Description: it is used for configuring the DMA operation mode register in
1748  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1749  */
1750 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1751 {
1752 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1753 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1754 	int rxfifosz = priv->plat->rx_fifo_size;
1755 	int txfifosz = priv->plat->tx_fifo_size;
1756 	u32 txmode = 0;
1757 	u32 rxmode = 0;
1758 	u32 chan = 0;
1759 	u8 qmode = 0;
1760 
1761 	if (rxfifosz == 0)
1762 		rxfifosz = priv->dma_cap.rx_fifo_size;
1763 	if (txfifosz == 0)
1764 		txfifosz = priv->dma_cap.tx_fifo_size;
1765 
1766 	/* Adjust for real per queue fifo size */
1767 	rxfifosz /= rx_channels_count;
1768 	txfifosz /= tx_channels_count;
1769 
1770 	if (priv->plat->force_thresh_dma_mode) {
1771 		txmode = tc;
1772 		rxmode = tc;
1773 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1774 		/*
1775 		 * In case of GMAC, SF mode can be enabled
1776 		 * to perform the TX COE in HW. This depends on:
1777 		 * 1) TX COE if actually supported
1778 		 * 2) There is no bugged Jumbo frame support
1779 		 *    that needs to not insert csum in the TDES.
1780 		 */
1781 		txmode = SF_DMA_MODE;
1782 		rxmode = SF_DMA_MODE;
1783 		priv->xstats.threshold = SF_DMA_MODE;
1784 	} else {
1785 		txmode = tc;
1786 		rxmode = SF_DMA_MODE;
1787 	}
1788 
1789 	/* configure all channels */
1790 	for (chan = 0; chan < rx_channels_count; chan++) {
1791 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1792 
1793 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1794 				rxfifosz, qmode);
1795 	}
1796 
1797 	for (chan = 0; chan < tx_channels_count; chan++) {
1798 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1799 
1800 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1801 				txfifosz, qmode);
1802 	}
1803 }
1804 
1805 /**
1806  * stmmac_tx_clean - to manage the transmission completion
1807  * @priv: driver private structure
1808  * @queue: TX queue index
1809  * Description: it reclaims the transmit resources after transmission completes.
1810  */
1811 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1812 {
1813 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1814 	unsigned int bytes_compl = 0, pkts_compl = 0;
1815 	unsigned int entry;
1816 
1817 	netif_tx_lock(priv->dev);
1818 
1819 	priv->xstats.tx_clean++;
1820 
1821 	entry = tx_q->dirty_tx;
1822 	while (entry != tx_q->cur_tx) {
1823 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1824 		struct dma_desc *p;
1825 		int status;
1826 
1827 		if (priv->extend_desc)
1828 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1829 		else
1830 			p = tx_q->dma_tx + entry;
1831 
1832 		status = stmmac_tx_status(priv, &priv->dev->stats,
1833 				&priv->xstats, p, priv->ioaddr);
1834 		/* Check if the descriptor is owned by the DMA */
1835 		if (unlikely(status & tx_dma_own))
1836 			break;
1837 
1838 		/* Make sure descriptor fields are read after reading
1839 		 * the own bit.
1840 		 */
1841 		dma_rmb();
1842 
1843 		/* Just consider the last segment and ...*/
1844 		if (likely(!(status & tx_not_ls))) {
1845 			/* ... verify the status error condition */
1846 			if (unlikely(status & tx_err)) {
1847 				priv->dev->stats.tx_errors++;
1848 			} else {
1849 				priv->dev->stats.tx_packets++;
1850 				priv->xstats.tx_pkt_n++;
1851 			}
1852 			stmmac_get_tx_hwtstamp(priv, p, skb);
1853 		}
1854 
1855 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1856 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1857 				dma_unmap_page(priv->device,
1858 					       tx_q->tx_skbuff_dma[entry].buf,
1859 					       tx_q->tx_skbuff_dma[entry].len,
1860 					       DMA_TO_DEVICE);
1861 			else
1862 				dma_unmap_single(priv->device,
1863 						 tx_q->tx_skbuff_dma[entry].buf,
1864 						 tx_q->tx_skbuff_dma[entry].len,
1865 						 DMA_TO_DEVICE);
1866 			tx_q->tx_skbuff_dma[entry].buf = 0;
1867 			tx_q->tx_skbuff_dma[entry].len = 0;
1868 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1869 		}
1870 
1871 		stmmac_clean_desc3(priv, tx_q, p);
1872 
1873 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1874 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1875 
1876 		if (likely(skb != NULL)) {
1877 			pkts_compl++;
1878 			bytes_compl += skb->len;
1879 			dev_consume_skb_any(skb);
1880 			tx_q->tx_skbuff[entry] = NULL;
1881 		}
1882 
1883 		stmmac_release_tx_desc(priv, p, priv->mode);
1884 
1885 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1886 	}
1887 	tx_q->dirty_tx = entry;
1888 
1889 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1890 				  pkts_compl, bytes_compl);
1891 
1892 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1893 								queue))) &&
1894 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1895 
1896 		netif_dbg(priv, tx_done, priv->dev,
1897 			  "%s: restart transmit\n", __func__);
1898 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1899 	}
1900 
1901 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1902 		stmmac_enable_eee_mode(priv);
1903 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1904 	}
1905 	netif_tx_unlock(priv->dev);
1906 }
1907 
1908 /**
1909  * stmmac_tx_err - to manage the tx error
1910  * @priv: driver private structure
1911  * @chan: channel index
1912  * Description: it cleans the descriptors and restarts the transmission
1913  * in case of transmission errors.
1914  */
1915 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1916 {
1917 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1918 	int i;
1919 
1920 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1921 
1922 	stmmac_stop_tx_dma(priv, chan);
1923 	dma_free_tx_skbufs(priv, chan);
1924 	for (i = 0; i < DMA_TX_SIZE; i++)
1925 		if (priv->extend_desc)
1926 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1927 					priv->mode, (i == DMA_TX_SIZE - 1));
1928 		else
1929 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1930 					priv->mode, (i == DMA_TX_SIZE - 1));
1931 	tx_q->dirty_tx = 0;
1932 	tx_q->cur_tx = 0;
1933 	tx_q->mss = 0;
1934 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1935 	stmmac_start_tx_dma(priv, chan);
1936 
1937 	priv->dev->stats.tx_errors++;
1938 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1939 }
1940 
1941 /**
1942  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1943  *  @priv: driver private structure
1944  *  @txmode: TX operating mode
1945  *  @rxmode: RX operating mode
1946  *  @chan: channel index
1947  *  Description: it is used for configuring of the DMA operation mode in
1948  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1949  *  mode.
1950  */
1951 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1952 					  u32 rxmode, u32 chan)
1953 {
1954 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1955 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1956 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1957 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1958 	int rxfifosz = priv->plat->rx_fifo_size;
1959 	int txfifosz = priv->plat->tx_fifo_size;
1960 
1961 	if (rxfifosz == 0)
1962 		rxfifosz = priv->dma_cap.rx_fifo_size;
1963 	if (txfifosz == 0)
1964 		txfifosz = priv->dma_cap.tx_fifo_size;
1965 
1966 	/* Adjust for real per queue fifo size */
1967 	rxfifosz /= rx_channels_count;
1968 	txfifosz /= tx_channels_count;
1969 
1970 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1971 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1972 }
1973 
1974 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1975 {
1976 	int ret;
1977 
1978 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1979 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1980 	if (ret && (ret != -EINVAL)) {
1981 		stmmac_global_err(priv);
1982 		return true;
1983 	}
1984 
1985 	return false;
1986 }
1987 
1988 /**
1989  * stmmac_dma_interrupt - DMA ISR
1990  * @priv: driver private structure
1991  * Description: this is the DMA ISR. It is called by the main ISR.
1992  * It calls the dwmac dma routine and schedule poll method in case of some
1993  * work can be done.
1994  */
1995 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1996 {
1997 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
1998 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
1999 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2000 				tx_channel_count : rx_channel_count;
2001 	u32 chan;
2002 	bool poll_scheduled = false;
2003 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2004 
2005 	/* Make sure we never check beyond our status buffer. */
2006 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2007 		channels_to_check = ARRAY_SIZE(status);
2008 
2009 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2010 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2011 	 * stmmac_channel struct.
2012 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2013 	 * all tx queues rather than just a single tx queue.
2014 	 */
2015 	for (chan = 0; chan < channels_to_check; chan++)
2016 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2017 				&priv->xstats, chan);
2018 
2019 	for (chan = 0; chan < rx_channel_count; chan++) {
2020 		if (likely(status[chan] & handle_rx)) {
2021 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2022 
2023 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2024 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2025 				__napi_schedule(&rx_q->napi);
2026 				poll_scheduled = true;
2027 			}
2028 		}
2029 	}
2030 
2031 	/* If we scheduled poll, we already know that tx queues will be checked.
2032 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2033 	 * completed transmission, if so, call stmmac_poll (once).
2034 	 */
2035 	if (!poll_scheduled) {
2036 		for (chan = 0; chan < tx_channel_count; chan++) {
2037 			if (status[chan] & handle_tx) {
2038 				/* It doesn't matter what rx queue we choose
2039 				 * here. We use 0 since it always exists.
2040 				 */
2041 				struct stmmac_rx_queue *rx_q =
2042 					&priv->rx_queue[0];
2043 
2044 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2045 					stmmac_disable_dma_irq(priv,
2046 							priv->ioaddr, chan);
2047 					__napi_schedule(&rx_q->napi);
2048 				}
2049 				break;
2050 			}
2051 		}
2052 	}
2053 
2054 	for (chan = 0; chan < tx_channel_count; chan++) {
2055 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2056 			/* Try to bump up the dma threshold on this failure */
2057 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2058 			    (tc <= 256)) {
2059 				tc += 64;
2060 				if (priv->plat->force_thresh_dma_mode)
2061 					stmmac_set_dma_operation_mode(priv,
2062 								      tc,
2063 								      tc,
2064 								      chan);
2065 				else
2066 					stmmac_set_dma_operation_mode(priv,
2067 								    tc,
2068 								    SF_DMA_MODE,
2069 								    chan);
2070 				priv->xstats.threshold = tc;
2071 			}
2072 		} else if (unlikely(status[chan] == tx_hard_error)) {
2073 			stmmac_tx_err(priv, chan);
2074 		}
2075 	}
2076 }
2077 
2078 /**
2079  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2080  * @priv: driver private structure
2081  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2082  */
2083 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2084 {
2085 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2086 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2087 
2088 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2089 
2090 	if (priv->dma_cap.rmon) {
2091 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2092 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2093 	} else
2094 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2095 }
2096 
2097 /**
2098  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2099  * @priv: driver private structure
2100  * Description:
2101  *  new GMAC chip generations have a new register to indicate the
2102  *  presence of the optional feature/functions.
2103  *  This can be also used to override the value passed through the
2104  *  platform and necessary for old MAC10/100 and GMAC chips.
2105  */
2106 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2107 {
2108 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2109 }
2110 
2111 /**
2112  * stmmac_check_ether_addr - check if the MAC addr is valid
2113  * @priv: driver private structure
2114  * Description:
2115  * it is to verify if the MAC address is valid, in case of failures it
2116  * generates a random MAC address
2117  */
2118 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2119 {
2120 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2121 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2122 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2123 			eth_hw_addr_random(priv->dev);
2124 		netdev_info(priv->dev, "device MAC address %pM\n",
2125 			    priv->dev->dev_addr);
2126 	}
2127 }
2128 
2129 /**
2130  * stmmac_init_dma_engine - DMA init.
2131  * @priv: driver private structure
2132  * Description:
2133  * It inits the DMA invoking the specific MAC/GMAC callback.
2134  * Some DMA parameters can be passed from the platform;
2135  * in case of these are not passed a default is kept for the MAC or GMAC.
2136  */
2137 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2138 {
2139 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2140 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2141 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2142 	struct stmmac_rx_queue *rx_q;
2143 	struct stmmac_tx_queue *tx_q;
2144 	u32 chan = 0;
2145 	int atds = 0;
2146 	int ret = 0;
2147 
2148 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2149 		dev_err(priv->device, "Invalid DMA configuration\n");
2150 		return -EINVAL;
2151 	}
2152 
2153 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2154 		atds = 1;
2155 
2156 	ret = stmmac_reset(priv, priv->ioaddr);
2157 	if (ret) {
2158 		dev_err(priv->device, "Failed to reset the dma\n");
2159 		return ret;
2160 	}
2161 
2162 	/* DMA RX Channel Configuration */
2163 	for (chan = 0; chan < rx_channels_count; chan++) {
2164 		rx_q = &priv->rx_queue[chan];
2165 
2166 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2167 				    rx_q->dma_rx_phy, chan);
2168 
2169 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2170 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2171 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2172 				       rx_q->rx_tail_addr, chan);
2173 	}
2174 
2175 	/* DMA TX Channel Configuration */
2176 	for (chan = 0; chan < tx_channels_count; chan++) {
2177 		tx_q = &priv->tx_queue[chan];
2178 
2179 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2180 				    tx_q->dma_tx_phy, chan);
2181 
2182 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2183 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2184 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2185 				       tx_q->tx_tail_addr, chan);
2186 	}
2187 
2188 	/* DMA CSR Channel configuration */
2189 	for (chan = 0; chan < dma_csr_ch; chan++)
2190 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2191 
2192 	/* DMA Configuration */
2193 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2194 
2195 	if (priv->plat->axi)
2196 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2197 
2198 	return ret;
2199 }
2200 
2201 /**
2202  * stmmac_tx_timer - mitigation sw timer for tx.
2203  * @data: data pointer
2204  * Description:
2205  * This is the timer handler to directly invoke the stmmac_tx_clean.
2206  */
2207 static void stmmac_tx_timer(struct timer_list *t)
2208 {
2209 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2210 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2211 	u32 queue;
2212 
2213 	/* let's scan all the tx queues */
2214 	for (queue = 0; queue < tx_queues_count; queue++)
2215 		stmmac_tx_clean(priv, queue);
2216 }
2217 
2218 /**
2219  * stmmac_init_tx_coalesce - init tx mitigation options.
2220  * @priv: driver private structure
2221  * Description:
2222  * This inits the transmit coalesce parameters: i.e. timer rate,
2223  * timer handler and default threshold used for enabling the
2224  * interrupt on completion bit.
2225  */
2226 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2227 {
2228 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2229 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2230 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2231 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2232 	add_timer(&priv->txtimer);
2233 }
2234 
2235 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2236 {
2237 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2238 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2239 	u32 chan;
2240 
2241 	/* set TX ring length */
2242 	for (chan = 0; chan < tx_channels_count; chan++)
2243 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2244 				(DMA_TX_SIZE - 1), chan);
2245 
2246 	/* set RX ring length */
2247 	for (chan = 0; chan < rx_channels_count; chan++)
2248 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2249 				(DMA_RX_SIZE - 1), chan);
2250 }
2251 
2252 /**
2253  *  stmmac_set_tx_queue_weight - Set TX queue weight
2254  *  @priv: driver private structure
2255  *  Description: It is used for setting TX queues weight
2256  */
2257 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2258 {
2259 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2260 	u32 weight;
2261 	u32 queue;
2262 
2263 	for (queue = 0; queue < tx_queues_count; queue++) {
2264 		weight = priv->plat->tx_queues_cfg[queue].weight;
2265 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2266 	}
2267 }
2268 
2269 /**
2270  *  stmmac_configure_cbs - Configure CBS in TX queue
2271  *  @priv: driver private structure
2272  *  Description: It is used for configuring CBS in AVB TX queues
2273  */
2274 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2275 {
2276 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2277 	u32 mode_to_use;
2278 	u32 queue;
2279 
2280 	/* queue 0 is reserved for legacy traffic */
2281 	for (queue = 1; queue < tx_queues_count; queue++) {
2282 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2283 		if (mode_to_use == MTL_QUEUE_DCB)
2284 			continue;
2285 
2286 		stmmac_config_cbs(priv, priv->hw,
2287 				priv->plat->tx_queues_cfg[queue].send_slope,
2288 				priv->plat->tx_queues_cfg[queue].idle_slope,
2289 				priv->plat->tx_queues_cfg[queue].high_credit,
2290 				priv->plat->tx_queues_cfg[queue].low_credit,
2291 				queue);
2292 	}
2293 }
2294 
2295 /**
2296  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2297  *  @priv: driver private structure
2298  *  Description: It is used for mapping RX queues to RX dma channels
2299  */
2300 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2301 {
2302 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2303 	u32 queue;
2304 	u32 chan;
2305 
2306 	for (queue = 0; queue < rx_queues_count; queue++) {
2307 		chan = priv->plat->rx_queues_cfg[queue].chan;
2308 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2309 	}
2310 }
2311 
2312 /**
2313  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2314  *  @priv: driver private structure
2315  *  Description: It is used for configuring the RX Queue Priority
2316  */
2317 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2318 {
2319 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2320 	u32 queue;
2321 	u32 prio;
2322 
2323 	for (queue = 0; queue < rx_queues_count; queue++) {
2324 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2325 			continue;
2326 
2327 		prio = priv->plat->rx_queues_cfg[queue].prio;
2328 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2329 	}
2330 }
2331 
2332 /**
2333  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2334  *  @priv: driver private structure
2335  *  Description: It is used for configuring the TX Queue Priority
2336  */
2337 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2338 {
2339 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2340 	u32 queue;
2341 	u32 prio;
2342 
2343 	for (queue = 0; queue < tx_queues_count; queue++) {
2344 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2345 			continue;
2346 
2347 		prio = priv->plat->tx_queues_cfg[queue].prio;
2348 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2349 	}
2350 }
2351 
2352 /**
2353  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2354  *  @priv: driver private structure
2355  *  Description: It is used for configuring the RX queue routing
2356  */
2357 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2358 {
2359 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2360 	u32 queue;
2361 	u8 packet;
2362 
2363 	for (queue = 0; queue < rx_queues_count; queue++) {
2364 		/* no specific packet type routing specified for the queue */
2365 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2366 			continue;
2367 
2368 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2369 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2370 	}
2371 }
2372 
2373 /**
2374  *  stmmac_mtl_configuration - Configure MTL
2375  *  @priv: driver private structure
2376  *  Description: It is used for configurring MTL
2377  */
2378 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2379 {
2380 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2381 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2382 
2383 	if (tx_queues_count > 1)
2384 		stmmac_set_tx_queue_weight(priv);
2385 
2386 	/* Configure MTL RX algorithms */
2387 	if (rx_queues_count > 1)
2388 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2389 				priv->plat->rx_sched_algorithm);
2390 
2391 	/* Configure MTL TX algorithms */
2392 	if (tx_queues_count > 1)
2393 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2394 				priv->plat->tx_sched_algorithm);
2395 
2396 	/* Configure CBS in AVB TX queues */
2397 	if (tx_queues_count > 1)
2398 		stmmac_configure_cbs(priv);
2399 
2400 	/* Map RX MTL to DMA channels */
2401 	stmmac_rx_queue_dma_chan_map(priv);
2402 
2403 	/* Enable MAC RX Queues */
2404 	stmmac_mac_enable_rx_queues(priv);
2405 
2406 	/* Set RX priorities */
2407 	if (rx_queues_count > 1)
2408 		stmmac_mac_config_rx_queues_prio(priv);
2409 
2410 	/* Set TX priorities */
2411 	if (tx_queues_count > 1)
2412 		stmmac_mac_config_tx_queues_prio(priv);
2413 
2414 	/* Set RX routing */
2415 	if (rx_queues_count > 1)
2416 		stmmac_mac_config_rx_queues_routing(priv);
2417 }
2418 
2419 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2420 {
2421 	if (priv->dma_cap.asp) {
2422 		netdev_info(priv->dev, "Enabling Safety Features\n");
2423 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2424 	} else {
2425 		netdev_info(priv->dev, "No Safety Features support found\n");
2426 	}
2427 }
2428 
2429 /**
2430  * stmmac_hw_setup - setup mac in a usable state.
2431  *  @dev : pointer to the device structure.
2432  *  Description:
2433  *  this is the main function to setup the HW in a usable state because the
2434  *  dma engine is reset, the core registers are configured (e.g. AXI,
2435  *  Checksum features, timers). The DMA is ready to start receiving and
2436  *  transmitting.
2437  *  Return value:
2438  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2439  *  file on failure.
2440  */
2441 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2442 {
2443 	struct stmmac_priv *priv = netdev_priv(dev);
2444 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2445 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2446 	u32 chan;
2447 	int ret;
2448 
2449 	/* DMA initialization and SW reset */
2450 	ret = stmmac_init_dma_engine(priv);
2451 	if (ret < 0) {
2452 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2453 			   __func__);
2454 		return ret;
2455 	}
2456 
2457 	/* Copy the MAC addr into the HW  */
2458 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2459 
2460 	/* PS and related bits will be programmed according to the speed */
2461 	if (priv->hw->pcs) {
2462 		int speed = priv->plat->mac_port_sel_speed;
2463 
2464 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2465 		    (speed == SPEED_1000)) {
2466 			priv->hw->ps = speed;
2467 		} else {
2468 			dev_warn(priv->device, "invalid port speed\n");
2469 			priv->hw->ps = 0;
2470 		}
2471 	}
2472 
2473 	/* Initialize the MAC Core */
2474 	stmmac_core_init(priv, priv->hw, dev);
2475 
2476 	/* Initialize MTL*/
2477 	stmmac_mtl_configuration(priv);
2478 
2479 	/* Initialize Safety Features */
2480 	stmmac_safety_feat_configuration(priv);
2481 
2482 	ret = stmmac_rx_ipc(priv, priv->hw);
2483 	if (!ret) {
2484 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2485 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2486 		priv->hw->rx_csum = 0;
2487 	}
2488 
2489 	/* Enable the MAC Rx/Tx */
2490 	stmmac_mac_set(priv, priv->ioaddr, true);
2491 
2492 	/* Set the HW DMA mode and the COE */
2493 	stmmac_dma_operation_mode(priv);
2494 
2495 	stmmac_mmc_setup(priv);
2496 
2497 	if (init_ptp) {
2498 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2499 		if (ret < 0)
2500 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2501 
2502 		ret = stmmac_init_ptp(priv);
2503 		if (ret == -EOPNOTSUPP)
2504 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2505 		else if (ret)
2506 			netdev_warn(priv->dev, "PTP init failed\n");
2507 	}
2508 
2509 #ifdef CONFIG_DEBUG_FS
2510 	ret = stmmac_init_fs(dev);
2511 	if (ret < 0)
2512 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2513 			    __func__);
2514 #endif
2515 	/* Start the ball rolling... */
2516 	stmmac_start_all_dma(priv);
2517 
2518 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2519 
2520 	if (priv->use_riwt) {
2521 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2522 		if (!ret)
2523 			priv->rx_riwt = MAX_DMA_RIWT;
2524 	}
2525 
2526 	if (priv->hw->pcs)
2527 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2528 
2529 	/* set TX and RX rings length */
2530 	stmmac_set_rings_length(priv);
2531 
2532 	/* Enable TSO */
2533 	if (priv->tso) {
2534 		for (chan = 0; chan < tx_cnt; chan++)
2535 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2536 	}
2537 
2538 	return 0;
2539 }
2540 
2541 static void stmmac_hw_teardown(struct net_device *dev)
2542 {
2543 	struct stmmac_priv *priv = netdev_priv(dev);
2544 
2545 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2546 }
2547 
2548 /**
2549  *  stmmac_open - open entry point of the driver
2550  *  @dev : pointer to the device structure.
2551  *  Description:
2552  *  This function is the open entry point of the driver.
2553  *  Return value:
2554  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2555  *  file on failure.
2556  */
2557 static int stmmac_open(struct net_device *dev)
2558 {
2559 	struct stmmac_priv *priv = netdev_priv(dev);
2560 	int ret;
2561 
2562 	stmmac_check_ether_addr(priv);
2563 
2564 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2565 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2566 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2567 		ret = stmmac_init_phy(dev);
2568 		if (ret) {
2569 			netdev_err(priv->dev,
2570 				   "%s: Cannot attach to PHY (error: %d)\n",
2571 				   __func__, ret);
2572 			return ret;
2573 		}
2574 	}
2575 
2576 	/* Extra statistics */
2577 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2578 	priv->xstats.threshold = tc;
2579 
2580 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2581 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2582 
2583 	ret = alloc_dma_desc_resources(priv);
2584 	if (ret < 0) {
2585 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2586 			   __func__);
2587 		goto dma_desc_error;
2588 	}
2589 
2590 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2591 	if (ret < 0) {
2592 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2593 			   __func__);
2594 		goto init_error;
2595 	}
2596 
2597 	ret = stmmac_hw_setup(dev, true);
2598 	if (ret < 0) {
2599 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2600 		goto init_error;
2601 	}
2602 
2603 	stmmac_init_tx_coalesce(priv);
2604 
2605 	if (dev->phydev)
2606 		phy_start(dev->phydev);
2607 
2608 	/* Request the IRQ lines */
2609 	ret = request_irq(dev->irq, stmmac_interrupt,
2610 			  IRQF_SHARED, dev->name, dev);
2611 	if (unlikely(ret < 0)) {
2612 		netdev_err(priv->dev,
2613 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2614 			   __func__, dev->irq, ret);
2615 		goto irq_error;
2616 	}
2617 
2618 	/* Request the Wake IRQ in case of another line is used for WoL */
2619 	if (priv->wol_irq != dev->irq) {
2620 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2621 				  IRQF_SHARED, dev->name, dev);
2622 		if (unlikely(ret < 0)) {
2623 			netdev_err(priv->dev,
2624 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2625 				   __func__, priv->wol_irq, ret);
2626 			goto wolirq_error;
2627 		}
2628 	}
2629 
2630 	/* Request the IRQ lines */
2631 	if (priv->lpi_irq > 0) {
2632 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2633 				  dev->name, dev);
2634 		if (unlikely(ret < 0)) {
2635 			netdev_err(priv->dev,
2636 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2637 				   __func__, priv->lpi_irq, ret);
2638 			goto lpiirq_error;
2639 		}
2640 	}
2641 
2642 	stmmac_enable_all_queues(priv);
2643 	stmmac_start_all_queues(priv);
2644 
2645 	return 0;
2646 
2647 lpiirq_error:
2648 	if (priv->wol_irq != dev->irq)
2649 		free_irq(priv->wol_irq, dev);
2650 wolirq_error:
2651 	free_irq(dev->irq, dev);
2652 irq_error:
2653 	if (dev->phydev)
2654 		phy_stop(dev->phydev);
2655 
2656 	del_timer_sync(&priv->txtimer);
2657 	stmmac_hw_teardown(dev);
2658 init_error:
2659 	free_dma_desc_resources(priv);
2660 dma_desc_error:
2661 	if (dev->phydev)
2662 		phy_disconnect(dev->phydev);
2663 
2664 	return ret;
2665 }
2666 
2667 /**
2668  *  stmmac_release - close entry point of the driver
2669  *  @dev : device pointer.
2670  *  Description:
2671  *  This is the stop entry point of the driver.
2672  */
2673 static int stmmac_release(struct net_device *dev)
2674 {
2675 	struct stmmac_priv *priv = netdev_priv(dev);
2676 
2677 	if (priv->eee_enabled)
2678 		del_timer_sync(&priv->eee_ctrl_timer);
2679 
2680 	/* Stop and disconnect the PHY */
2681 	if (dev->phydev) {
2682 		phy_stop(dev->phydev);
2683 		phy_disconnect(dev->phydev);
2684 	}
2685 
2686 	stmmac_stop_all_queues(priv);
2687 
2688 	stmmac_disable_all_queues(priv);
2689 
2690 	del_timer_sync(&priv->txtimer);
2691 
2692 	/* Free the IRQ lines */
2693 	free_irq(dev->irq, dev);
2694 	if (priv->wol_irq != dev->irq)
2695 		free_irq(priv->wol_irq, dev);
2696 	if (priv->lpi_irq > 0)
2697 		free_irq(priv->lpi_irq, dev);
2698 
2699 	/* Stop TX/RX DMA and clear the descriptors */
2700 	stmmac_stop_all_dma(priv);
2701 
2702 	/* Release and free the Rx/Tx resources */
2703 	free_dma_desc_resources(priv);
2704 
2705 	/* Disable the MAC Rx/Tx */
2706 	stmmac_mac_set(priv, priv->ioaddr, false);
2707 
2708 	netif_carrier_off(dev);
2709 
2710 #ifdef CONFIG_DEBUG_FS
2711 	stmmac_exit_fs(dev);
2712 #endif
2713 
2714 	stmmac_release_ptp(priv);
2715 
2716 	return 0;
2717 }
2718 
2719 /**
2720  *  stmmac_tso_allocator - close entry point of the driver
2721  *  @priv: driver private structure
2722  *  @des: buffer start address
2723  *  @total_len: total length to fill in descriptors
2724  *  @last_segmant: condition for the last descriptor
2725  *  @queue: TX queue index
2726  *  Description:
2727  *  This function fills descriptor and request new descriptors according to
2728  *  buffer length to fill
2729  */
2730 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2731 				 int total_len, bool last_segment, u32 queue)
2732 {
2733 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2734 	struct dma_desc *desc;
2735 	u32 buff_size;
2736 	int tmp_len;
2737 
2738 	tmp_len = total_len;
2739 
2740 	while (tmp_len > 0) {
2741 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2742 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2743 		desc = tx_q->dma_tx + tx_q->cur_tx;
2744 
2745 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2746 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2747 			    TSO_MAX_BUFF_SIZE : tmp_len;
2748 
2749 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2750 				0, 1,
2751 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2752 				0, 0);
2753 
2754 		tmp_len -= TSO_MAX_BUFF_SIZE;
2755 	}
2756 }
2757 
2758 /**
2759  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2760  *  @skb : the socket buffer
2761  *  @dev : device pointer
2762  *  Description: this is the transmit function that is called on TSO frames
2763  *  (support available on GMAC4 and newer chips).
2764  *  Diagram below show the ring programming in case of TSO frames:
2765  *
2766  *  First Descriptor
2767  *   --------
2768  *   | DES0 |---> buffer1 = L2/L3/L4 header
2769  *   | DES1 |---> TCP Payload (can continue on next descr...)
2770  *   | DES2 |---> buffer 1 and 2 len
2771  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2772  *   --------
2773  *	|
2774  *     ...
2775  *	|
2776  *   --------
2777  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2778  *   | DES1 | --|
2779  *   | DES2 | --> buffer 1 and 2 len
2780  *   | DES3 |
2781  *   --------
2782  *
2783  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2784  */
2785 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2786 {
2787 	struct dma_desc *desc, *first, *mss_desc = NULL;
2788 	struct stmmac_priv *priv = netdev_priv(dev);
2789 	int nfrags = skb_shinfo(skb)->nr_frags;
2790 	u32 queue = skb_get_queue_mapping(skb);
2791 	unsigned int first_entry, des;
2792 	struct stmmac_tx_queue *tx_q;
2793 	int tmp_pay_len = 0;
2794 	u32 pay_len, mss;
2795 	u8 proto_hdr_len;
2796 	int i;
2797 
2798 	tx_q = &priv->tx_queue[queue];
2799 
2800 	/* Compute header lengths */
2801 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2802 
2803 	/* Desc availability based on threshold should be enough safe */
2804 	if (unlikely(stmmac_tx_avail(priv, queue) <
2805 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2806 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2807 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2808 								queue));
2809 			/* This is a hard error, log it. */
2810 			netdev_err(priv->dev,
2811 				   "%s: Tx Ring full when queue awake\n",
2812 				   __func__);
2813 		}
2814 		return NETDEV_TX_BUSY;
2815 	}
2816 
2817 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2818 
2819 	mss = skb_shinfo(skb)->gso_size;
2820 
2821 	/* set new MSS value if needed */
2822 	if (mss != tx_q->mss) {
2823 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2824 		stmmac_set_mss(priv, mss_desc, mss);
2825 		tx_q->mss = mss;
2826 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2827 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2828 	}
2829 
2830 	if (netif_msg_tx_queued(priv)) {
2831 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2832 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2833 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2834 			skb->data_len);
2835 	}
2836 
2837 	first_entry = tx_q->cur_tx;
2838 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2839 
2840 	desc = tx_q->dma_tx + first_entry;
2841 	first = desc;
2842 
2843 	/* first descriptor: fill Headers on Buf1 */
2844 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2845 			     DMA_TO_DEVICE);
2846 	if (dma_mapping_error(priv->device, des))
2847 		goto dma_map_err;
2848 
2849 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2850 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2851 
2852 	first->des0 = cpu_to_le32(des);
2853 
2854 	/* Fill start of payload in buff2 of first descriptor */
2855 	if (pay_len)
2856 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2857 
2858 	/* If needed take extra descriptors to fill the remaining payload */
2859 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2860 
2861 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2862 
2863 	/* Prepare fragments */
2864 	for (i = 0; i < nfrags; i++) {
2865 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2866 
2867 		des = skb_frag_dma_map(priv->device, frag, 0,
2868 				       skb_frag_size(frag),
2869 				       DMA_TO_DEVICE);
2870 		if (dma_mapping_error(priv->device, des))
2871 			goto dma_map_err;
2872 
2873 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2874 				     (i == nfrags - 1), queue);
2875 
2876 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2877 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2878 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2879 	}
2880 
2881 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2882 
2883 	/* Only the last descriptor gets to point to the skb. */
2884 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2885 
2886 	/* We've used all descriptors we need for this skb, however,
2887 	 * advance cur_tx so that it references a fresh descriptor.
2888 	 * ndo_start_xmit will fill this descriptor the next time it's
2889 	 * called and stmmac_tx_clean may clean up to this descriptor.
2890 	 */
2891 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2892 
2893 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2894 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2895 			  __func__);
2896 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2897 	}
2898 
2899 	dev->stats.tx_bytes += skb->len;
2900 	priv->xstats.tx_tso_frames++;
2901 	priv->xstats.tx_tso_nfrags += nfrags;
2902 
2903 	/* Manage tx mitigation */
2904 	priv->tx_count_frames += nfrags + 1;
2905 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2906 		mod_timer(&priv->txtimer,
2907 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2908 	} else {
2909 		priv->tx_count_frames = 0;
2910 		stmmac_set_tx_ic(priv, desc);
2911 		priv->xstats.tx_set_ic_bit++;
2912 	}
2913 
2914 	skb_tx_timestamp(skb);
2915 
2916 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2917 		     priv->hwts_tx_en)) {
2918 		/* declare that device is doing timestamping */
2919 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2920 		stmmac_enable_tx_timestamp(priv, first);
2921 	}
2922 
2923 	/* Complete the first descriptor before granting the DMA */
2924 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2925 			proto_hdr_len,
2926 			pay_len,
2927 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2928 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2929 
2930 	/* If context desc is used to change MSS */
2931 	if (mss_desc) {
2932 		/* Make sure that first descriptor has been completely
2933 		 * written, including its own bit. This is because MSS is
2934 		 * actually before first descriptor, so we need to make
2935 		 * sure that MSS's own bit is the last thing written.
2936 		 */
2937 		dma_wmb();
2938 		stmmac_set_tx_owner(priv, mss_desc);
2939 	}
2940 
2941 	/* The own bit must be the latest setting done when prepare the
2942 	 * descriptor and then barrier is needed to make sure that
2943 	 * all is coherent before granting the DMA engine.
2944 	 */
2945 	wmb();
2946 
2947 	if (netif_msg_pktdata(priv)) {
2948 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2949 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2950 			tx_q->cur_tx, first, nfrags);
2951 
2952 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2953 
2954 		pr_info(">>> frame to be transmitted: ");
2955 		print_pkt(skb->data, skb_headlen(skb));
2956 	}
2957 
2958 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2959 
2960 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2961 
2962 	return NETDEV_TX_OK;
2963 
2964 dma_map_err:
2965 	dev_err(priv->device, "Tx dma map failed\n");
2966 	dev_kfree_skb(skb);
2967 	priv->dev->stats.tx_dropped++;
2968 	return NETDEV_TX_OK;
2969 }
2970 
2971 /**
2972  *  stmmac_xmit - Tx entry point of the driver
2973  *  @skb : the socket buffer
2974  *  @dev : device pointer
2975  *  Description : this is the tx entry point of the driver.
2976  *  It programs the chain or the ring and supports oversized frames
2977  *  and SG feature.
2978  */
2979 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2980 {
2981 	struct stmmac_priv *priv = netdev_priv(dev);
2982 	unsigned int nopaged_len = skb_headlen(skb);
2983 	int i, csum_insertion = 0, is_jumbo = 0;
2984 	u32 queue = skb_get_queue_mapping(skb);
2985 	int nfrags = skb_shinfo(skb)->nr_frags;
2986 	int entry;
2987 	unsigned int first_entry;
2988 	struct dma_desc *desc, *first;
2989 	struct stmmac_tx_queue *tx_q;
2990 	unsigned int enh_desc;
2991 	unsigned int des;
2992 
2993 	tx_q = &priv->tx_queue[queue];
2994 
2995 	/* Manage oversized TCP frames for GMAC4 device */
2996 	if (skb_is_gso(skb) && priv->tso) {
2997 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2998 			return stmmac_tso_xmit(skb, dev);
2999 	}
3000 
3001 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3002 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3003 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3004 								queue));
3005 			/* This is a hard error, log it. */
3006 			netdev_err(priv->dev,
3007 				   "%s: Tx Ring full when queue awake\n",
3008 				   __func__);
3009 		}
3010 		return NETDEV_TX_BUSY;
3011 	}
3012 
3013 	if (priv->tx_path_in_lpi_mode)
3014 		stmmac_disable_eee_mode(priv);
3015 
3016 	entry = tx_q->cur_tx;
3017 	first_entry = entry;
3018 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3019 
3020 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3021 
3022 	if (likely(priv->extend_desc))
3023 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3024 	else
3025 		desc = tx_q->dma_tx + entry;
3026 
3027 	first = desc;
3028 
3029 	enh_desc = priv->plat->enh_desc;
3030 	/* To program the descriptors according to the size of the frame */
3031 	if (enh_desc)
3032 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3033 
3034 	if (unlikely(is_jumbo)) {
3035 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3036 		if (unlikely(entry < 0) && (entry != -EINVAL))
3037 			goto dma_map_err;
3038 	}
3039 
3040 	for (i = 0; i < nfrags; i++) {
3041 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3042 		int len = skb_frag_size(frag);
3043 		bool last_segment = (i == (nfrags - 1));
3044 
3045 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3046 		WARN_ON(tx_q->tx_skbuff[entry]);
3047 
3048 		if (likely(priv->extend_desc))
3049 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3050 		else
3051 			desc = tx_q->dma_tx + entry;
3052 
3053 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3054 				       DMA_TO_DEVICE);
3055 		if (dma_mapping_error(priv->device, des))
3056 			goto dma_map_err; /* should reuse desc w/o issues */
3057 
3058 		tx_q->tx_skbuff_dma[entry].buf = des;
3059 
3060 		stmmac_set_desc_addr(priv, desc, des);
3061 
3062 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3063 		tx_q->tx_skbuff_dma[entry].len = len;
3064 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3065 
3066 		/* Prepare the descriptor and set the own bit too */
3067 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3068 				priv->mode, 1, last_segment, skb->len);
3069 	}
3070 
3071 	/* Only the last descriptor gets to point to the skb. */
3072 	tx_q->tx_skbuff[entry] = skb;
3073 
3074 	/* We've used all descriptors we need for this skb, however,
3075 	 * advance cur_tx so that it references a fresh descriptor.
3076 	 * ndo_start_xmit will fill this descriptor the next time it's
3077 	 * called and stmmac_tx_clean may clean up to this descriptor.
3078 	 */
3079 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3080 	tx_q->cur_tx = entry;
3081 
3082 	if (netif_msg_pktdata(priv)) {
3083 		void *tx_head;
3084 
3085 		netdev_dbg(priv->dev,
3086 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3087 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3088 			   entry, first, nfrags);
3089 
3090 		if (priv->extend_desc)
3091 			tx_head = (void *)tx_q->dma_etx;
3092 		else
3093 			tx_head = (void *)tx_q->dma_tx;
3094 
3095 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3096 
3097 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3098 		print_pkt(skb->data, skb->len);
3099 	}
3100 
3101 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3102 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3103 			  __func__);
3104 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3105 	}
3106 
3107 	dev->stats.tx_bytes += skb->len;
3108 
3109 	/* According to the coalesce parameter the IC bit for the latest
3110 	 * segment is reset and the timer re-started to clean the tx status.
3111 	 * This approach takes care about the fragments: desc is the first
3112 	 * element in case of no SG.
3113 	 */
3114 	priv->tx_count_frames += nfrags + 1;
3115 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3116 	    !priv->tx_timer_armed) {
3117 		mod_timer(&priv->txtimer,
3118 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3119 		priv->tx_timer_armed = true;
3120 	} else {
3121 		priv->tx_count_frames = 0;
3122 		stmmac_set_tx_ic(priv, desc);
3123 		priv->xstats.tx_set_ic_bit++;
3124 		priv->tx_timer_armed = false;
3125 	}
3126 
3127 	skb_tx_timestamp(skb);
3128 
3129 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3130 	 * problems because all the descriptors are actually ready to be
3131 	 * passed to the DMA engine.
3132 	 */
3133 	if (likely(!is_jumbo)) {
3134 		bool last_segment = (nfrags == 0);
3135 
3136 		des = dma_map_single(priv->device, skb->data,
3137 				     nopaged_len, DMA_TO_DEVICE);
3138 		if (dma_mapping_error(priv->device, des))
3139 			goto dma_map_err;
3140 
3141 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3142 
3143 		stmmac_set_desc_addr(priv, first, des);
3144 
3145 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3146 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3147 
3148 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3149 			     priv->hwts_tx_en)) {
3150 			/* declare that device is doing timestamping */
3151 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3152 			stmmac_enable_tx_timestamp(priv, first);
3153 		}
3154 
3155 		/* Prepare the first descriptor setting the OWN bit too */
3156 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3157 				csum_insertion, priv->mode, 1, last_segment,
3158 				skb->len);
3159 
3160 		/* The own bit must be the latest setting done when prepare the
3161 		 * descriptor and then barrier is needed to make sure that
3162 		 * all is coherent before granting the DMA engine.
3163 		 */
3164 		wmb();
3165 	}
3166 
3167 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3168 
3169 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3170 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3171 
3172 	return NETDEV_TX_OK;
3173 
3174 dma_map_err:
3175 	netdev_err(priv->dev, "Tx DMA map failed\n");
3176 	dev_kfree_skb(skb);
3177 	priv->dev->stats.tx_dropped++;
3178 	return NETDEV_TX_OK;
3179 }
3180 
3181 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3182 {
3183 	struct ethhdr *ehdr;
3184 	u16 vlanid;
3185 
3186 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3187 	    NETIF_F_HW_VLAN_CTAG_RX &&
3188 	    !__vlan_get_tag(skb, &vlanid)) {
3189 		/* pop the vlan tag */
3190 		ehdr = (struct ethhdr *)skb->data;
3191 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3192 		skb_pull(skb, VLAN_HLEN);
3193 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3194 	}
3195 }
3196 
3197 
3198 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3199 {
3200 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3201 		return 0;
3202 
3203 	return 1;
3204 }
3205 
3206 /**
3207  * stmmac_rx_refill - refill used skb preallocated buffers
3208  * @priv: driver private structure
3209  * @queue: RX queue index
3210  * Description : this is to reallocate the skb for the reception process
3211  * that is based on zero-copy.
3212  */
3213 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3214 {
3215 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3216 	int dirty = stmmac_rx_dirty(priv, queue);
3217 	unsigned int entry = rx_q->dirty_rx;
3218 
3219 	int bfsize = priv->dma_buf_sz;
3220 
3221 	while (dirty-- > 0) {
3222 		struct dma_desc *p;
3223 
3224 		if (priv->extend_desc)
3225 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3226 		else
3227 			p = rx_q->dma_rx + entry;
3228 
3229 		if (likely(!rx_q->rx_skbuff[entry])) {
3230 			struct sk_buff *skb;
3231 
3232 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3233 			if (unlikely(!skb)) {
3234 				/* so for a while no zero-copy! */
3235 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3236 				if (unlikely(net_ratelimit()))
3237 					dev_err(priv->device,
3238 						"fail to alloc skb entry %d\n",
3239 						entry);
3240 				break;
3241 			}
3242 
3243 			rx_q->rx_skbuff[entry] = skb;
3244 			rx_q->rx_skbuff_dma[entry] =
3245 			    dma_map_single(priv->device, skb->data, bfsize,
3246 					   DMA_FROM_DEVICE);
3247 			if (dma_mapping_error(priv->device,
3248 					      rx_q->rx_skbuff_dma[entry])) {
3249 				netdev_err(priv->dev, "Rx DMA map failed\n");
3250 				dev_kfree_skb(skb);
3251 				break;
3252 			}
3253 
3254 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3255 			stmmac_refill_desc3(priv, rx_q, p);
3256 
3257 			if (rx_q->rx_zeroc_thresh > 0)
3258 				rx_q->rx_zeroc_thresh--;
3259 
3260 			netif_dbg(priv, rx_status, priv->dev,
3261 				  "refill entry #%d\n", entry);
3262 		}
3263 		dma_wmb();
3264 
3265 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3266 
3267 		dma_wmb();
3268 
3269 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3270 	}
3271 	rx_q->dirty_rx = entry;
3272 }
3273 
3274 /**
3275  * stmmac_rx - manage the receive process
3276  * @priv: driver private structure
3277  * @limit: napi bugget
3278  * @queue: RX queue index.
3279  * Description :  this the function called by the napi poll method.
3280  * It gets all the frames inside the ring.
3281  */
3282 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3283 {
3284 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 	unsigned int entry = rx_q->cur_rx;
3286 	int coe = priv->hw->rx_csum;
3287 	unsigned int next_entry;
3288 	unsigned int count = 0;
3289 
3290 	if (netif_msg_rx_status(priv)) {
3291 		void *rx_head;
3292 
3293 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3294 		if (priv->extend_desc)
3295 			rx_head = (void *)rx_q->dma_erx;
3296 		else
3297 			rx_head = (void *)rx_q->dma_rx;
3298 
3299 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3300 	}
3301 	while (count < limit) {
3302 		int status;
3303 		struct dma_desc *p;
3304 		struct dma_desc *np;
3305 
3306 		if (priv->extend_desc)
3307 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3308 		else
3309 			p = rx_q->dma_rx + entry;
3310 
3311 		/* read the status of the incoming frame */
3312 		status = stmmac_rx_status(priv, &priv->dev->stats,
3313 				&priv->xstats, p);
3314 		/* check if managed by the DMA otherwise go ahead */
3315 		if (unlikely(status & dma_own))
3316 			break;
3317 
3318 		count++;
3319 
3320 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3321 		next_entry = rx_q->cur_rx;
3322 
3323 		if (priv->extend_desc)
3324 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3325 		else
3326 			np = rx_q->dma_rx + next_entry;
3327 
3328 		prefetch(np);
3329 
3330 		if (priv->extend_desc)
3331 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3332 					&priv->xstats, rx_q->dma_erx + entry);
3333 		if (unlikely(status == discard_frame)) {
3334 			priv->dev->stats.rx_errors++;
3335 			if (priv->hwts_rx_en && !priv->extend_desc) {
3336 				/* DESC2 & DESC3 will be overwritten by device
3337 				 * with timestamp value, hence reinitialize
3338 				 * them in stmmac_rx_refill() function so that
3339 				 * device can reuse it.
3340 				 */
3341 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3342 				rx_q->rx_skbuff[entry] = NULL;
3343 				dma_unmap_single(priv->device,
3344 						 rx_q->rx_skbuff_dma[entry],
3345 						 priv->dma_buf_sz,
3346 						 DMA_FROM_DEVICE);
3347 			}
3348 		} else {
3349 			struct sk_buff *skb;
3350 			int frame_len;
3351 			unsigned int des;
3352 
3353 			stmmac_get_desc_addr(priv, p, &des);
3354 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3355 
3356 			/*  If frame length is greater than skb buffer size
3357 			 *  (preallocated during init) then the packet is
3358 			 *  ignored
3359 			 */
3360 			if (frame_len > priv->dma_buf_sz) {
3361 				netdev_err(priv->dev,
3362 					   "len %d larger than size (%d)\n",
3363 					   frame_len, priv->dma_buf_sz);
3364 				priv->dev->stats.rx_length_errors++;
3365 				break;
3366 			}
3367 
3368 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3369 			 * Type frames (LLC/LLC-SNAP)
3370 			 *
3371 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3372 			 * feature is always disabled and packets need to be
3373 			 * stripped manually.
3374 			 */
3375 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3376 			    unlikely(status != llc_snap))
3377 				frame_len -= ETH_FCS_LEN;
3378 
3379 			if (netif_msg_rx_status(priv)) {
3380 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3381 					   p, entry, des);
3382 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3383 					   frame_len, status);
3384 			}
3385 
3386 			/* The zero-copy is always used for all the sizes
3387 			 * in case of GMAC4 because it needs
3388 			 * to refill the used descriptors, always.
3389 			 */
3390 			if (unlikely(!priv->plat->has_gmac4 &&
3391 				     ((frame_len < priv->rx_copybreak) ||
3392 				     stmmac_rx_threshold_count(rx_q)))) {
3393 				skb = netdev_alloc_skb_ip_align(priv->dev,
3394 								frame_len);
3395 				if (unlikely(!skb)) {
3396 					if (net_ratelimit())
3397 						dev_warn(priv->device,
3398 							 "packet dropped\n");
3399 					priv->dev->stats.rx_dropped++;
3400 					break;
3401 				}
3402 
3403 				dma_sync_single_for_cpu(priv->device,
3404 							rx_q->rx_skbuff_dma
3405 							[entry], frame_len,
3406 							DMA_FROM_DEVICE);
3407 				skb_copy_to_linear_data(skb,
3408 							rx_q->
3409 							rx_skbuff[entry]->data,
3410 							frame_len);
3411 
3412 				skb_put(skb, frame_len);
3413 				dma_sync_single_for_device(priv->device,
3414 							   rx_q->rx_skbuff_dma
3415 							   [entry], frame_len,
3416 							   DMA_FROM_DEVICE);
3417 			} else {
3418 				skb = rx_q->rx_skbuff[entry];
3419 				if (unlikely(!skb)) {
3420 					netdev_err(priv->dev,
3421 						   "%s: Inconsistent Rx chain\n",
3422 						   priv->dev->name);
3423 					priv->dev->stats.rx_dropped++;
3424 					break;
3425 				}
3426 				prefetch(skb->data - NET_IP_ALIGN);
3427 				rx_q->rx_skbuff[entry] = NULL;
3428 				rx_q->rx_zeroc_thresh++;
3429 
3430 				skb_put(skb, frame_len);
3431 				dma_unmap_single(priv->device,
3432 						 rx_q->rx_skbuff_dma[entry],
3433 						 priv->dma_buf_sz,
3434 						 DMA_FROM_DEVICE);
3435 			}
3436 
3437 			if (netif_msg_pktdata(priv)) {
3438 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3439 					   frame_len);
3440 				print_pkt(skb->data, frame_len);
3441 			}
3442 
3443 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3444 
3445 			stmmac_rx_vlan(priv->dev, skb);
3446 
3447 			skb->protocol = eth_type_trans(skb, priv->dev);
3448 
3449 			if (unlikely(!coe))
3450 				skb_checksum_none_assert(skb);
3451 			else
3452 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3453 
3454 			napi_gro_receive(&rx_q->napi, skb);
3455 
3456 			priv->dev->stats.rx_packets++;
3457 			priv->dev->stats.rx_bytes += frame_len;
3458 		}
3459 		entry = next_entry;
3460 	}
3461 
3462 	stmmac_rx_refill(priv, queue);
3463 
3464 	priv->xstats.rx_pkt_n += count;
3465 
3466 	return count;
3467 }
3468 
3469 /**
3470  *  stmmac_poll - stmmac poll method (NAPI)
3471  *  @napi : pointer to the napi structure.
3472  *  @budget : maximum number of packets that the current CPU can receive from
3473  *	      all interfaces.
3474  *  Description :
3475  *  To look at the incoming frames and clear the tx resources.
3476  */
3477 static int stmmac_poll(struct napi_struct *napi, int budget)
3478 {
3479 	struct stmmac_rx_queue *rx_q =
3480 		container_of(napi, struct stmmac_rx_queue, napi);
3481 	struct stmmac_priv *priv = rx_q->priv_data;
3482 	u32 tx_count = priv->plat->tx_queues_to_use;
3483 	u32 chan = rx_q->queue_index;
3484 	int work_done = 0;
3485 	u32 queue;
3486 
3487 	priv->xstats.napi_poll++;
3488 
3489 	/* check all the queues */
3490 	for (queue = 0; queue < tx_count; queue++)
3491 		stmmac_tx_clean(priv, queue);
3492 
3493 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3494 	if (work_done < budget) {
3495 		napi_complete_done(napi, work_done);
3496 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3497 	}
3498 	return work_done;
3499 }
3500 
3501 /**
3502  *  stmmac_tx_timeout
3503  *  @dev : Pointer to net device structure
3504  *  Description: this function is called when a packet transmission fails to
3505  *   complete within a reasonable time. The driver will mark the error in the
3506  *   netdev structure and arrange for the device to be reset to a sane state
3507  *   in order to transmit a new packet.
3508  */
3509 static void stmmac_tx_timeout(struct net_device *dev)
3510 {
3511 	struct stmmac_priv *priv = netdev_priv(dev);
3512 
3513 	stmmac_global_err(priv);
3514 }
3515 
3516 /**
3517  *  stmmac_set_rx_mode - entry point for multicast addressing
3518  *  @dev : pointer to the device structure
3519  *  Description:
3520  *  This function is a driver entry point which gets called by the kernel
3521  *  whenever multicast addresses must be enabled/disabled.
3522  *  Return value:
3523  *  void.
3524  */
3525 static void stmmac_set_rx_mode(struct net_device *dev)
3526 {
3527 	struct stmmac_priv *priv = netdev_priv(dev);
3528 
3529 	stmmac_set_filter(priv, priv->hw, dev);
3530 }
3531 
3532 /**
3533  *  stmmac_change_mtu - entry point to change MTU size for the device.
3534  *  @dev : device pointer.
3535  *  @new_mtu : the new MTU size for the device.
3536  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3537  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3538  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3539  *  Return value:
3540  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3541  *  file on failure.
3542  */
3543 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3544 {
3545 	struct stmmac_priv *priv = netdev_priv(dev);
3546 
3547 	if (netif_running(dev)) {
3548 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3549 		return -EBUSY;
3550 	}
3551 
3552 	dev->mtu = new_mtu;
3553 
3554 	netdev_update_features(dev);
3555 
3556 	return 0;
3557 }
3558 
3559 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3560 					     netdev_features_t features)
3561 {
3562 	struct stmmac_priv *priv = netdev_priv(dev);
3563 
3564 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3565 		features &= ~NETIF_F_RXCSUM;
3566 
3567 	if (!priv->plat->tx_coe)
3568 		features &= ~NETIF_F_CSUM_MASK;
3569 
3570 	/* Some GMAC devices have a bugged Jumbo frame support that
3571 	 * needs to have the Tx COE disabled for oversized frames
3572 	 * (due to limited buffer sizes). In this case we disable
3573 	 * the TX csum insertion in the TDES and not use SF.
3574 	 */
3575 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3576 		features &= ~NETIF_F_CSUM_MASK;
3577 
3578 	/* Disable tso if asked by ethtool */
3579 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3580 		if (features & NETIF_F_TSO)
3581 			priv->tso = true;
3582 		else
3583 			priv->tso = false;
3584 	}
3585 
3586 	return features;
3587 }
3588 
3589 static int stmmac_set_features(struct net_device *netdev,
3590 			       netdev_features_t features)
3591 {
3592 	struct stmmac_priv *priv = netdev_priv(netdev);
3593 
3594 	/* Keep the COE Type in case of csum is supporting */
3595 	if (features & NETIF_F_RXCSUM)
3596 		priv->hw->rx_csum = priv->plat->rx_coe;
3597 	else
3598 		priv->hw->rx_csum = 0;
3599 	/* No check needed because rx_coe has been set before and it will be
3600 	 * fixed in case of issue.
3601 	 */
3602 	stmmac_rx_ipc(priv, priv->hw);
3603 
3604 	return 0;
3605 }
3606 
3607 /**
3608  *  stmmac_interrupt - main ISR
3609  *  @irq: interrupt number.
3610  *  @dev_id: to pass the net device pointer.
3611  *  Description: this is the main driver interrupt service routine.
3612  *  It can call:
3613  *  o DMA service routine (to manage incoming frame reception and transmission
3614  *    status)
3615  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3616  *    interrupts.
3617  */
3618 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3619 {
3620 	struct net_device *dev = (struct net_device *)dev_id;
3621 	struct stmmac_priv *priv = netdev_priv(dev);
3622 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3623 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3624 	u32 queues_count;
3625 	u32 queue;
3626 
3627 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3628 
3629 	if (priv->irq_wake)
3630 		pm_wakeup_event(priv->device, 0);
3631 
3632 	if (unlikely(!dev)) {
3633 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3634 		return IRQ_NONE;
3635 	}
3636 
3637 	/* Check if adapter is up */
3638 	if (test_bit(STMMAC_DOWN, &priv->state))
3639 		return IRQ_HANDLED;
3640 	/* Check if a fatal error happened */
3641 	if (stmmac_safety_feat_interrupt(priv))
3642 		return IRQ_HANDLED;
3643 
3644 	/* To handle GMAC own interrupts */
3645 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3646 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3647 		int mtl_status;
3648 
3649 		if (unlikely(status)) {
3650 			/* For LPI we need to save the tx status */
3651 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3652 				priv->tx_path_in_lpi_mode = true;
3653 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3654 				priv->tx_path_in_lpi_mode = false;
3655 		}
3656 
3657 		for (queue = 0; queue < queues_count; queue++) {
3658 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3659 
3660 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3661 								queue);
3662 			if (mtl_status != -EINVAL)
3663 				status |= mtl_status;
3664 
3665 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3666 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3667 						       rx_q->rx_tail_addr,
3668 						       queue);
3669 		}
3670 
3671 		/* PCS link status */
3672 		if (priv->hw->pcs) {
3673 			if (priv->xstats.pcs_link)
3674 				netif_carrier_on(dev);
3675 			else
3676 				netif_carrier_off(dev);
3677 		}
3678 	}
3679 
3680 	/* To handle DMA interrupts */
3681 	stmmac_dma_interrupt(priv);
3682 
3683 	return IRQ_HANDLED;
3684 }
3685 
3686 #ifdef CONFIG_NET_POLL_CONTROLLER
3687 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3688  * to allow network I/O with interrupts disabled.
3689  */
3690 static void stmmac_poll_controller(struct net_device *dev)
3691 {
3692 	disable_irq(dev->irq);
3693 	stmmac_interrupt(dev->irq, dev);
3694 	enable_irq(dev->irq);
3695 }
3696 #endif
3697 
3698 /**
3699  *  stmmac_ioctl - Entry point for the Ioctl
3700  *  @dev: Device pointer.
3701  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3702  *  a proprietary structure used to pass information to the driver.
3703  *  @cmd: IOCTL command
3704  *  Description:
3705  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3706  */
3707 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3708 {
3709 	int ret = -EOPNOTSUPP;
3710 
3711 	if (!netif_running(dev))
3712 		return -EINVAL;
3713 
3714 	switch (cmd) {
3715 	case SIOCGMIIPHY:
3716 	case SIOCGMIIREG:
3717 	case SIOCSMIIREG:
3718 		if (!dev->phydev)
3719 			return -EINVAL;
3720 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3721 		break;
3722 	case SIOCSHWTSTAMP:
3723 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3724 		break;
3725 	default:
3726 		break;
3727 	}
3728 
3729 	return ret;
3730 }
3731 
3732 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3733 				    void *cb_priv)
3734 {
3735 	struct stmmac_priv *priv = cb_priv;
3736 	int ret = -EOPNOTSUPP;
3737 
3738 	stmmac_disable_all_queues(priv);
3739 
3740 	switch (type) {
3741 	case TC_SETUP_CLSU32:
3742 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3743 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3744 		break;
3745 	default:
3746 		break;
3747 	}
3748 
3749 	stmmac_enable_all_queues(priv);
3750 	return ret;
3751 }
3752 
3753 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3754 				 struct tc_block_offload *f)
3755 {
3756 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3757 		return -EOPNOTSUPP;
3758 
3759 	switch (f->command) {
3760 	case TC_BLOCK_BIND:
3761 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3762 				priv, priv);
3763 	case TC_BLOCK_UNBIND:
3764 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3765 		return 0;
3766 	default:
3767 		return -EOPNOTSUPP;
3768 	}
3769 }
3770 
3771 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3772 			   void *type_data)
3773 {
3774 	struct stmmac_priv *priv = netdev_priv(ndev);
3775 
3776 	switch (type) {
3777 	case TC_SETUP_BLOCK:
3778 		return stmmac_setup_tc_block(priv, type_data);
3779 	default:
3780 		return -EOPNOTSUPP;
3781 	}
3782 }
3783 
3784 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3785 {
3786 	struct stmmac_priv *priv = netdev_priv(ndev);
3787 	int ret = 0;
3788 
3789 	ret = eth_mac_addr(ndev, addr);
3790 	if (ret)
3791 		return ret;
3792 
3793 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3794 
3795 	return ret;
3796 }
3797 
3798 #ifdef CONFIG_DEBUG_FS
3799 static struct dentry *stmmac_fs_dir;
3800 
3801 static void sysfs_display_ring(void *head, int size, int extend_desc,
3802 			       struct seq_file *seq)
3803 {
3804 	int i;
3805 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3806 	struct dma_desc *p = (struct dma_desc *)head;
3807 
3808 	for (i = 0; i < size; i++) {
3809 		if (extend_desc) {
3810 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3811 				   i, (unsigned int)virt_to_phys(ep),
3812 				   le32_to_cpu(ep->basic.des0),
3813 				   le32_to_cpu(ep->basic.des1),
3814 				   le32_to_cpu(ep->basic.des2),
3815 				   le32_to_cpu(ep->basic.des3));
3816 			ep++;
3817 		} else {
3818 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3819 				   i, (unsigned int)virt_to_phys(p),
3820 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3821 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3822 			p++;
3823 		}
3824 		seq_printf(seq, "\n");
3825 	}
3826 }
3827 
3828 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3829 {
3830 	struct net_device *dev = seq->private;
3831 	struct stmmac_priv *priv = netdev_priv(dev);
3832 	u32 rx_count = priv->plat->rx_queues_to_use;
3833 	u32 tx_count = priv->plat->tx_queues_to_use;
3834 	u32 queue;
3835 
3836 	for (queue = 0; queue < rx_count; queue++) {
3837 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3838 
3839 		seq_printf(seq, "RX Queue %d:\n", queue);
3840 
3841 		if (priv->extend_desc) {
3842 			seq_printf(seq, "Extended descriptor ring:\n");
3843 			sysfs_display_ring((void *)rx_q->dma_erx,
3844 					   DMA_RX_SIZE, 1, seq);
3845 		} else {
3846 			seq_printf(seq, "Descriptor ring:\n");
3847 			sysfs_display_ring((void *)rx_q->dma_rx,
3848 					   DMA_RX_SIZE, 0, seq);
3849 		}
3850 	}
3851 
3852 	for (queue = 0; queue < tx_count; queue++) {
3853 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3854 
3855 		seq_printf(seq, "TX Queue %d:\n", queue);
3856 
3857 		if (priv->extend_desc) {
3858 			seq_printf(seq, "Extended descriptor ring:\n");
3859 			sysfs_display_ring((void *)tx_q->dma_etx,
3860 					   DMA_TX_SIZE, 1, seq);
3861 		} else {
3862 			seq_printf(seq, "Descriptor ring:\n");
3863 			sysfs_display_ring((void *)tx_q->dma_tx,
3864 					   DMA_TX_SIZE, 0, seq);
3865 		}
3866 	}
3867 
3868 	return 0;
3869 }
3870 
3871 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3872 {
3873 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3874 }
3875 
3876 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3877 
3878 static const struct file_operations stmmac_rings_status_fops = {
3879 	.owner = THIS_MODULE,
3880 	.open = stmmac_sysfs_ring_open,
3881 	.read = seq_read,
3882 	.llseek = seq_lseek,
3883 	.release = single_release,
3884 };
3885 
3886 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3887 {
3888 	struct net_device *dev = seq->private;
3889 	struct stmmac_priv *priv = netdev_priv(dev);
3890 
3891 	if (!priv->hw_cap_support) {
3892 		seq_printf(seq, "DMA HW features not supported\n");
3893 		return 0;
3894 	}
3895 
3896 	seq_printf(seq, "==============================\n");
3897 	seq_printf(seq, "\tDMA HW features\n");
3898 	seq_printf(seq, "==============================\n");
3899 
3900 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3901 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3902 	seq_printf(seq, "\t1000 Mbps: %s\n",
3903 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3904 	seq_printf(seq, "\tHalf duplex: %s\n",
3905 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3906 	seq_printf(seq, "\tHash Filter: %s\n",
3907 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3908 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3909 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3910 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3911 		   (priv->dma_cap.pcs) ? "Y" : "N");
3912 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3913 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3914 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3915 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3916 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3917 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3918 	seq_printf(seq, "\tRMON module: %s\n",
3919 		   (priv->dma_cap.rmon) ? "Y" : "N");
3920 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3921 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3922 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3923 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3924 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3925 		   (priv->dma_cap.eee) ? "Y" : "N");
3926 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3927 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3928 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3929 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3930 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3931 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3932 	} else {
3933 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3934 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3935 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3936 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3937 	}
3938 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3939 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3940 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3941 		   priv->dma_cap.number_rx_channel);
3942 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3943 		   priv->dma_cap.number_tx_channel);
3944 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3945 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3946 
3947 	return 0;
3948 }
3949 
3950 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3951 {
3952 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3953 }
3954 
3955 static const struct file_operations stmmac_dma_cap_fops = {
3956 	.owner = THIS_MODULE,
3957 	.open = stmmac_sysfs_dma_cap_open,
3958 	.read = seq_read,
3959 	.llseek = seq_lseek,
3960 	.release = single_release,
3961 };
3962 
3963 static int stmmac_init_fs(struct net_device *dev)
3964 {
3965 	struct stmmac_priv *priv = netdev_priv(dev);
3966 
3967 	/* Create per netdev entries */
3968 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3969 
3970 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3971 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3972 
3973 		return -ENOMEM;
3974 	}
3975 
3976 	/* Entry to report DMA RX/TX rings */
3977 	priv->dbgfs_rings_status =
3978 		debugfs_create_file("descriptors_status", 0444,
3979 				    priv->dbgfs_dir, dev,
3980 				    &stmmac_rings_status_fops);
3981 
3982 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3983 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3984 		debugfs_remove_recursive(priv->dbgfs_dir);
3985 
3986 		return -ENOMEM;
3987 	}
3988 
3989 	/* Entry to report the DMA HW features */
3990 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3991 						  priv->dbgfs_dir,
3992 						  dev, &stmmac_dma_cap_fops);
3993 
3994 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3995 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3996 		debugfs_remove_recursive(priv->dbgfs_dir);
3997 
3998 		return -ENOMEM;
3999 	}
4000 
4001 	return 0;
4002 }
4003 
4004 static void stmmac_exit_fs(struct net_device *dev)
4005 {
4006 	struct stmmac_priv *priv = netdev_priv(dev);
4007 
4008 	debugfs_remove_recursive(priv->dbgfs_dir);
4009 }
4010 #endif /* CONFIG_DEBUG_FS */
4011 
4012 static const struct net_device_ops stmmac_netdev_ops = {
4013 	.ndo_open = stmmac_open,
4014 	.ndo_start_xmit = stmmac_xmit,
4015 	.ndo_stop = stmmac_release,
4016 	.ndo_change_mtu = stmmac_change_mtu,
4017 	.ndo_fix_features = stmmac_fix_features,
4018 	.ndo_set_features = stmmac_set_features,
4019 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4020 	.ndo_tx_timeout = stmmac_tx_timeout,
4021 	.ndo_do_ioctl = stmmac_ioctl,
4022 	.ndo_setup_tc = stmmac_setup_tc,
4023 #ifdef CONFIG_NET_POLL_CONTROLLER
4024 	.ndo_poll_controller = stmmac_poll_controller,
4025 #endif
4026 	.ndo_set_mac_address = stmmac_set_mac_address,
4027 };
4028 
4029 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4030 {
4031 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4032 		return;
4033 	if (test_bit(STMMAC_DOWN, &priv->state))
4034 		return;
4035 
4036 	netdev_err(priv->dev, "Reset adapter.\n");
4037 
4038 	rtnl_lock();
4039 	netif_trans_update(priv->dev);
4040 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4041 		usleep_range(1000, 2000);
4042 
4043 	set_bit(STMMAC_DOWN, &priv->state);
4044 	dev_close(priv->dev);
4045 	dev_open(priv->dev);
4046 	clear_bit(STMMAC_DOWN, &priv->state);
4047 	clear_bit(STMMAC_RESETING, &priv->state);
4048 	rtnl_unlock();
4049 }
4050 
4051 static void stmmac_service_task(struct work_struct *work)
4052 {
4053 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4054 			service_task);
4055 
4056 	stmmac_reset_subtask(priv);
4057 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4058 }
4059 
4060 /**
4061  *  stmmac_hw_init - Init the MAC device
4062  *  @priv: driver private structure
4063  *  Description: this function is to configure the MAC device according to
4064  *  some platform parameters or the HW capability register. It prepares the
4065  *  driver to use either ring or chain modes and to setup either enhanced or
4066  *  normal descriptors.
4067  */
4068 static int stmmac_hw_init(struct stmmac_priv *priv)
4069 {
4070 	int ret;
4071 
4072 	/* dwmac-sun8i only work in chain mode */
4073 	if (priv->plat->has_sun8i)
4074 		chain_mode = 1;
4075 	priv->chain_mode = chain_mode;
4076 
4077 	/* Initialize HW Interface */
4078 	ret = stmmac_hwif_init(priv);
4079 	if (ret)
4080 		return ret;
4081 
4082 	/* Get the HW capability (new GMAC newer than 3.50a) */
4083 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4084 	if (priv->hw_cap_support) {
4085 		dev_info(priv->device, "DMA HW capability register supported\n");
4086 
4087 		/* We can override some gmac/dma configuration fields: e.g.
4088 		 * enh_desc, tx_coe (e.g. that are passed through the
4089 		 * platform) with the values from the HW capability
4090 		 * register (if supported).
4091 		 */
4092 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4093 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4094 		priv->hw->pmt = priv->plat->pmt;
4095 
4096 		/* TXCOE doesn't work in thresh DMA mode */
4097 		if (priv->plat->force_thresh_dma_mode)
4098 			priv->plat->tx_coe = 0;
4099 		else
4100 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4101 
4102 		/* In case of GMAC4 rx_coe is from HW cap register. */
4103 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4104 
4105 		if (priv->dma_cap.rx_coe_type2)
4106 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4107 		else if (priv->dma_cap.rx_coe_type1)
4108 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4109 
4110 	} else {
4111 		dev_info(priv->device, "No HW DMA feature register supported\n");
4112 	}
4113 
4114 	if (priv->plat->rx_coe) {
4115 		priv->hw->rx_csum = priv->plat->rx_coe;
4116 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4117 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4118 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4119 	}
4120 	if (priv->plat->tx_coe)
4121 		dev_info(priv->device, "TX Checksum insertion supported\n");
4122 
4123 	if (priv->plat->pmt) {
4124 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4125 		device_set_wakeup_capable(priv->device, 1);
4126 	}
4127 
4128 	if (priv->dma_cap.tsoen)
4129 		dev_info(priv->device, "TSO supported\n");
4130 
4131 	return 0;
4132 }
4133 
4134 /**
4135  * stmmac_dvr_probe
4136  * @device: device pointer
4137  * @plat_dat: platform data pointer
4138  * @res: stmmac resource pointer
4139  * Description: this is the main probe function used to
4140  * call the alloc_etherdev, allocate the priv structure.
4141  * Return:
4142  * returns 0 on success, otherwise errno.
4143  */
4144 int stmmac_dvr_probe(struct device *device,
4145 		     struct plat_stmmacenet_data *plat_dat,
4146 		     struct stmmac_resources *res)
4147 {
4148 	struct net_device *ndev = NULL;
4149 	struct stmmac_priv *priv;
4150 	int ret = 0;
4151 	u32 queue;
4152 
4153 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4154 				  MTL_MAX_TX_QUEUES,
4155 				  MTL_MAX_RX_QUEUES);
4156 	if (!ndev)
4157 		return -ENOMEM;
4158 
4159 	SET_NETDEV_DEV(ndev, device);
4160 
4161 	priv = netdev_priv(ndev);
4162 	priv->device = device;
4163 	priv->dev = ndev;
4164 
4165 	stmmac_set_ethtool_ops(ndev);
4166 	priv->pause = pause;
4167 	priv->plat = plat_dat;
4168 	priv->ioaddr = res->addr;
4169 	priv->dev->base_addr = (unsigned long)res->addr;
4170 
4171 	priv->dev->irq = res->irq;
4172 	priv->wol_irq = res->wol_irq;
4173 	priv->lpi_irq = res->lpi_irq;
4174 
4175 	if (res->mac)
4176 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4177 
4178 	dev_set_drvdata(device, priv->dev);
4179 
4180 	/* Verify driver arguments */
4181 	stmmac_verify_args();
4182 
4183 	/* Allocate workqueue */
4184 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4185 	if (!priv->wq) {
4186 		dev_err(priv->device, "failed to create workqueue\n");
4187 		goto error_wq;
4188 	}
4189 
4190 	INIT_WORK(&priv->service_task, stmmac_service_task);
4191 
4192 	/* Override with kernel parameters if supplied XXX CRS XXX
4193 	 * this needs to have multiple instances
4194 	 */
4195 	if ((phyaddr >= 0) && (phyaddr <= 31))
4196 		priv->plat->phy_addr = phyaddr;
4197 
4198 	if (priv->plat->stmmac_rst) {
4199 		ret = reset_control_assert(priv->plat->stmmac_rst);
4200 		reset_control_deassert(priv->plat->stmmac_rst);
4201 		/* Some reset controllers have only reset callback instead of
4202 		 * assert + deassert callbacks pair.
4203 		 */
4204 		if (ret == -ENOTSUPP)
4205 			reset_control_reset(priv->plat->stmmac_rst);
4206 	}
4207 
4208 	/* Init MAC and get the capabilities */
4209 	ret = stmmac_hw_init(priv);
4210 	if (ret)
4211 		goto error_hw_init;
4212 
4213 	/* Configure real RX and TX queues */
4214 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4215 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4216 
4217 	ndev->netdev_ops = &stmmac_netdev_ops;
4218 
4219 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4220 			    NETIF_F_RXCSUM;
4221 
4222 	ret = stmmac_tc_init(priv, priv);
4223 	if (!ret) {
4224 		ndev->hw_features |= NETIF_F_HW_TC;
4225 	}
4226 
4227 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4228 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4229 		priv->tso = true;
4230 		dev_info(priv->device, "TSO feature enabled\n");
4231 	}
4232 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4233 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4234 #ifdef STMMAC_VLAN_TAG_USED
4235 	/* Both mac100 and gmac support receive VLAN tag detection */
4236 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4237 #endif
4238 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4239 
4240 	/* MTU range: 46 - hw-specific max */
4241 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4242 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4243 		ndev->max_mtu = JUMBO_LEN;
4244 	else
4245 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4246 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4247 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4248 	 */
4249 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4250 	    (priv->plat->maxmtu >= ndev->min_mtu))
4251 		ndev->max_mtu = priv->plat->maxmtu;
4252 	else if (priv->plat->maxmtu < ndev->min_mtu)
4253 		dev_warn(priv->device,
4254 			 "%s: warning: maxmtu having invalid value (%d)\n",
4255 			 __func__, priv->plat->maxmtu);
4256 
4257 	if (flow_ctrl)
4258 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4259 
4260 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4261 	 * In some case, for example on bugged HW this feature
4262 	 * has to be disable and this can be done by passing the
4263 	 * riwt_off field from the platform.
4264 	 */
4265 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4266 		priv->use_riwt = 1;
4267 		dev_info(priv->device,
4268 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4269 	}
4270 
4271 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4272 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4273 
4274 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4275 			       (8 * priv->plat->rx_queues_to_use));
4276 	}
4277 
4278 	spin_lock_init(&priv->lock);
4279 
4280 	/* If a specific clk_csr value is passed from the platform
4281 	 * this means that the CSR Clock Range selection cannot be
4282 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4283 	 * set the MDC clock dynamically according to the csr actual
4284 	 * clock input.
4285 	 */
4286 	if (!priv->plat->clk_csr)
4287 		stmmac_clk_csr_set(priv);
4288 	else
4289 		priv->clk_csr = priv->plat->clk_csr;
4290 
4291 	stmmac_check_pcs_mode(priv);
4292 
4293 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4294 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4295 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4296 		/* MDIO bus Registration */
4297 		ret = stmmac_mdio_register(ndev);
4298 		if (ret < 0) {
4299 			dev_err(priv->device,
4300 				"%s: MDIO bus (id: %d) registration failed",
4301 				__func__, priv->plat->bus_id);
4302 			goto error_mdio_register;
4303 		}
4304 	}
4305 
4306 	ret = register_netdev(ndev);
4307 	if (ret) {
4308 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4309 			__func__, ret);
4310 		goto error_netdev_register;
4311 	}
4312 
4313 	return ret;
4314 
4315 error_netdev_register:
4316 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4317 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4318 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4319 		stmmac_mdio_unregister(ndev);
4320 error_mdio_register:
4321 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4322 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4323 
4324 		netif_napi_del(&rx_q->napi);
4325 	}
4326 error_hw_init:
4327 	destroy_workqueue(priv->wq);
4328 error_wq:
4329 	free_netdev(ndev);
4330 
4331 	return ret;
4332 }
4333 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4334 
4335 /**
4336  * stmmac_dvr_remove
4337  * @dev: device pointer
4338  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4339  * changes the link status, releases the DMA descriptor rings.
4340  */
4341 int stmmac_dvr_remove(struct device *dev)
4342 {
4343 	struct net_device *ndev = dev_get_drvdata(dev);
4344 	struct stmmac_priv *priv = netdev_priv(ndev);
4345 
4346 	netdev_info(priv->dev, "%s: removing driver", __func__);
4347 
4348 	stmmac_stop_all_dma(priv);
4349 
4350 	stmmac_mac_set(priv, priv->ioaddr, false);
4351 	netif_carrier_off(ndev);
4352 	unregister_netdev(ndev);
4353 	if (priv->plat->stmmac_rst)
4354 		reset_control_assert(priv->plat->stmmac_rst);
4355 	clk_disable_unprepare(priv->plat->pclk);
4356 	clk_disable_unprepare(priv->plat->stmmac_clk);
4357 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4358 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4359 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4360 		stmmac_mdio_unregister(ndev);
4361 	destroy_workqueue(priv->wq);
4362 	free_netdev(ndev);
4363 
4364 	return 0;
4365 }
4366 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4367 
4368 /**
4369  * stmmac_suspend - suspend callback
4370  * @dev: device pointer
4371  * Description: this is the function to suspend the device and it is called
4372  * by the platform driver to stop the network queue, release the resources,
4373  * program the PMT register (for WoL), clean and release driver resources.
4374  */
4375 int stmmac_suspend(struct device *dev)
4376 {
4377 	struct net_device *ndev = dev_get_drvdata(dev);
4378 	struct stmmac_priv *priv = netdev_priv(ndev);
4379 	unsigned long flags;
4380 
4381 	if (!ndev || !netif_running(ndev))
4382 		return 0;
4383 
4384 	if (ndev->phydev)
4385 		phy_stop(ndev->phydev);
4386 
4387 	spin_lock_irqsave(&priv->lock, flags);
4388 
4389 	netif_device_detach(ndev);
4390 	stmmac_stop_all_queues(priv);
4391 
4392 	stmmac_disable_all_queues(priv);
4393 
4394 	/* Stop TX/RX DMA */
4395 	stmmac_stop_all_dma(priv);
4396 
4397 	/* Enable Power down mode by programming the PMT regs */
4398 	if (device_may_wakeup(priv->device)) {
4399 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4400 		priv->irq_wake = 1;
4401 	} else {
4402 		stmmac_mac_set(priv, priv->ioaddr, false);
4403 		pinctrl_pm_select_sleep_state(priv->device);
4404 		/* Disable clock in case of PWM is off */
4405 		clk_disable(priv->plat->pclk);
4406 		clk_disable(priv->plat->stmmac_clk);
4407 	}
4408 	spin_unlock_irqrestore(&priv->lock, flags);
4409 
4410 	priv->oldlink = false;
4411 	priv->speed = SPEED_UNKNOWN;
4412 	priv->oldduplex = DUPLEX_UNKNOWN;
4413 	return 0;
4414 }
4415 EXPORT_SYMBOL_GPL(stmmac_suspend);
4416 
4417 /**
4418  * stmmac_reset_queues_param - reset queue parameters
4419  * @dev: device pointer
4420  */
4421 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4422 {
4423 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4424 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4425 	u32 queue;
4426 
4427 	for (queue = 0; queue < rx_cnt; queue++) {
4428 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4429 
4430 		rx_q->cur_rx = 0;
4431 		rx_q->dirty_rx = 0;
4432 	}
4433 
4434 	for (queue = 0; queue < tx_cnt; queue++) {
4435 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4436 
4437 		tx_q->cur_tx = 0;
4438 		tx_q->dirty_tx = 0;
4439 		tx_q->mss = 0;
4440 	}
4441 }
4442 
4443 /**
4444  * stmmac_resume - resume callback
4445  * @dev: device pointer
4446  * Description: when resume this function is invoked to setup the DMA and CORE
4447  * in a usable state.
4448  */
4449 int stmmac_resume(struct device *dev)
4450 {
4451 	struct net_device *ndev = dev_get_drvdata(dev);
4452 	struct stmmac_priv *priv = netdev_priv(ndev);
4453 	unsigned long flags;
4454 
4455 	if (!netif_running(ndev))
4456 		return 0;
4457 
4458 	/* Power Down bit, into the PM register, is cleared
4459 	 * automatically as soon as a magic packet or a Wake-up frame
4460 	 * is received. Anyway, it's better to manually clear
4461 	 * this bit because it can generate problems while resuming
4462 	 * from another devices (e.g. serial console).
4463 	 */
4464 	if (device_may_wakeup(priv->device)) {
4465 		spin_lock_irqsave(&priv->lock, flags);
4466 		stmmac_pmt(priv, priv->hw, 0);
4467 		spin_unlock_irqrestore(&priv->lock, flags);
4468 		priv->irq_wake = 0;
4469 	} else {
4470 		pinctrl_pm_select_default_state(priv->device);
4471 		/* enable the clk previously disabled */
4472 		clk_enable(priv->plat->stmmac_clk);
4473 		clk_enable(priv->plat->pclk);
4474 		/* reset the phy so that it's ready */
4475 		if (priv->mii)
4476 			stmmac_mdio_reset(priv->mii);
4477 	}
4478 
4479 	netif_device_attach(ndev);
4480 
4481 	spin_lock_irqsave(&priv->lock, flags);
4482 
4483 	stmmac_reset_queues_param(priv);
4484 
4485 	stmmac_clear_descriptors(priv);
4486 
4487 	stmmac_hw_setup(ndev, false);
4488 	stmmac_init_tx_coalesce(priv);
4489 	stmmac_set_rx_mode(ndev);
4490 
4491 	stmmac_enable_all_queues(priv);
4492 
4493 	stmmac_start_all_queues(priv);
4494 
4495 	spin_unlock_irqrestore(&priv->lock, flags);
4496 
4497 	if (ndev->phydev)
4498 		phy_start(ndev->phydev);
4499 
4500 	return 0;
4501 }
4502 EXPORT_SYMBOL_GPL(stmmac_resume);
4503 
4504 #ifndef MODULE
4505 static int __init stmmac_cmdline_opt(char *str)
4506 {
4507 	char *opt;
4508 
4509 	if (!str || !*str)
4510 		return -EINVAL;
4511 	while ((opt = strsep(&str, ",")) != NULL) {
4512 		if (!strncmp(opt, "debug:", 6)) {
4513 			if (kstrtoint(opt + 6, 0, &debug))
4514 				goto err;
4515 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4516 			if (kstrtoint(opt + 8, 0, &phyaddr))
4517 				goto err;
4518 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4519 			if (kstrtoint(opt + 7, 0, &buf_sz))
4520 				goto err;
4521 		} else if (!strncmp(opt, "tc:", 3)) {
4522 			if (kstrtoint(opt + 3, 0, &tc))
4523 				goto err;
4524 		} else if (!strncmp(opt, "watchdog:", 9)) {
4525 			if (kstrtoint(opt + 9, 0, &watchdog))
4526 				goto err;
4527 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4528 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4529 				goto err;
4530 		} else if (!strncmp(opt, "pause:", 6)) {
4531 			if (kstrtoint(opt + 6, 0, &pause))
4532 				goto err;
4533 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4534 			if (kstrtoint(opt + 10, 0, &eee_timer))
4535 				goto err;
4536 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4537 			if (kstrtoint(opt + 11, 0, &chain_mode))
4538 				goto err;
4539 		}
4540 	}
4541 	return 0;
4542 
4543 err:
4544 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4545 	return -EINVAL;
4546 }
4547 
4548 __setup("stmmaceth=", stmmac_cmdline_opt);
4549 #endif /* MODULE */
4550 
4551 static int __init stmmac_init(void)
4552 {
4553 #ifdef CONFIG_DEBUG_FS
4554 	/* Create debugfs main directory if it doesn't exist yet */
4555 	if (!stmmac_fs_dir) {
4556 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4557 
4558 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4559 			pr_err("ERROR %s, debugfs create directory failed\n",
4560 			       STMMAC_RESOURCE_NAME);
4561 
4562 			return -ENOMEM;
4563 		}
4564 	}
4565 #endif
4566 
4567 	return 0;
4568 }
4569 
4570 static void __exit stmmac_exit(void)
4571 {
4572 #ifdef CONFIG_DEBUG_FS
4573 	debugfs_remove_recursive(stmmac_fs_dir);
4574 #endif
4575 }
4576 
4577 module_init(stmmac_init)
4578 module_exit(stmmac_exit)
4579 
4580 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4581 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4582 MODULE_LICENSE("GPL");
4583