1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	bool ret = false;
385 
386 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388 	    !phy_interface_mode_is_rgmii(interface))
389 		goto out;
390 
391 	/* Using PCS we cannot dial with the phy registers at this stage
392 	 * so we do not support extra feature like EEE.
393 	 */
394 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397 		goto out;
398 
399 	/* MAC core supports the EEE feature. */
400 	if (priv->dma_cap.eee) {
401 		int tx_lpi_timer = priv->tx_lpi_timer;
402 
403 		/* Check if the PHY supports EEE */
404 		if (phy_init_eee(ndev->phydev, 1)) {
405 			/* To manage at run-time if the EEE cannot be supported
406 			 * anymore (for example because the lp caps have been
407 			 * changed).
408 			 * In that case the driver disable own timers.
409 			 */
410 			mutex_lock(&priv->lock);
411 			if (priv->eee_active) {
412 				netdev_dbg(priv->dev, "disable EEE\n");
413 				del_timer_sync(&priv->eee_ctrl_timer);
414 				stmmac_set_eee_timer(priv, priv->hw, 0,
415 						tx_lpi_timer);
416 			}
417 			priv->eee_active = 0;
418 			mutex_unlock(&priv->lock);
419 			goto out;
420 		}
421 		/* Activate the EEE and start timers */
422 		mutex_lock(&priv->lock);
423 		if (!priv->eee_active) {
424 			priv->eee_active = 1;
425 			timer_setup(&priv->eee_ctrl_timer,
426 				    stmmac_eee_ctrl_timer, 0);
427 			mod_timer(&priv->eee_ctrl_timer,
428 				  STMMAC_LPI_T(eee_timer));
429 
430 			stmmac_set_eee_timer(priv, priv->hw,
431 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 		}
433 		/* Set HW EEE according to the speed */
434 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435 
436 		ret = true;
437 		mutex_unlock(&priv->lock);
438 
439 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 	}
441 out:
442 	return ret;
443 }
444 
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 				   struct dma_desc *p, struct sk_buff *skb)
455 {
456 	struct skb_shared_hwtstamps shhwtstamp;
457 	u64 ns;
458 
459 	if (!priv->hwts_tx_en)
460 		return;
461 
462 	/* exit if skb doesn't support hw tstamp */
463 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 		return;
465 
466 	/* check tx tstamp status */
467 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468 		/* get the valid tstamp */
469 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470 
471 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473 
474 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475 		/* pass tstamp to stack */
476 		skb_tstamp_tx(skb, &shhwtstamp);
477 	}
478 
479 	return;
480 }
481 
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 				   struct dma_desc *np, struct sk_buff *skb)
493 {
494 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 	struct dma_desc *desc = p;
496 	u64 ns;
497 
498 	if (!priv->hwts_rx_en)
499 		return;
500 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501 	if (priv->plat->has_gmac4)
502 		desc = np;
503 
504 	/* Check if timestamp is available */
505 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 		shhwtstamp = skb_hwtstamps(skb);
509 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 	} else  {
512 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 	}
514 }
515 
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529 	struct stmmac_priv *priv = netdev_priv(dev);
530 	struct hwtstamp_config config;
531 	struct timespec64 now;
532 	u64 temp = 0;
533 	u32 ptp_v2 = 0;
534 	u32 tstamp_all = 0;
535 	u32 ptp_over_ipv4_udp = 0;
536 	u32 ptp_over_ipv6_udp = 0;
537 	u32 ptp_over_ethernet = 0;
538 	u32 snap_type_sel = 0;
539 	u32 ts_master_en = 0;
540 	u32 ts_event_en = 0;
541 	u32 value = 0;
542 	u32 sec_inc;
543 
544 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546 		priv->hwts_tx_en = 0;
547 		priv->hwts_rx_en = 0;
548 
549 		return -EOPNOTSUPP;
550 	}
551 
552 	if (copy_from_user(&config, ifr->ifr_data,
553 			   sizeof(struct hwtstamp_config)))
554 		return -EFAULT;
555 
556 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 		   __func__, config.flags, config.tx_type, config.rx_filter);
558 
559 	/* reserved for future extensions */
560 	if (config.flags)
561 		return -EINVAL;
562 
563 	if (config.tx_type != HWTSTAMP_TX_OFF &&
564 	    config.tx_type != HWTSTAMP_TX_ON)
565 		return -ERANGE;
566 
567 	if (priv->adv_ts) {
568 		switch (config.rx_filter) {
569 		case HWTSTAMP_FILTER_NONE:
570 			/* time stamp no incoming packet at all */
571 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572 			break;
573 
574 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575 			/* PTP v1, UDP, any kind of event packet */
576 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 			/* take time stamp for all event messages */
578 			if (priv->plat->has_gmac4)
579 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 			else
581 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588 			/* PTP v1, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590 			/* take time stamp for SYNC messages only */
591 			ts_event_en = PTP_TCR_TSEVNTENA;
592 
593 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 			break;
596 
597 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598 			/* PTP v1, UDP, Delay_req packet */
599 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600 			/* take time stamp for Delay_Req messages only */
601 			ts_master_en = PTP_TCR_TSMSTRENA;
602 			ts_event_en = PTP_TCR_TSEVNTENA;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609 			/* PTP v2, UDP, any kind of event packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for all event messages */
613 			if (priv->plat->has_gmac4)
614 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 			else
616 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617 
618 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623 			/* PTP v2, UDP, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634 			/* PTP v2, UDP, Delay_req packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for Delay_Req messages only */
638 			ts_master_en = PTP_TCR_TSMSTRENA;
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for all event messages */
650 			if (priv->plat->has_gmac4)
651 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 			else
653 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 
655 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661 			/* PTP v2/802.AS1, any layer, Sync packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for SYNC messages only */
665 			ts_event_en = PTP_TCR_TSEVNTENA;
666 
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			/* take time stamp for Delay_Req messages only */
677 			ts_master_en = PTP_TCR_TSMSTRENA;
678 			ts_event_en = PTP_TCR_TSEVNTENA;
679 
680 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_NTP_ALL:
686 		case HWTSTAMP_FILTER_ALL:
687 			/* time stamp any incoming packet */
688 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689 			tstamp_all = PTP_TCR_TSENALL;
690 			break;
691 
692 		default:
693 			return -ERANGE;
694 		}
695 	} else {
696 		switch (config.rx_filter) {
697 		case HWTSTAMP_FILTER_NONE:
698 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699 			break;
700 		default:
701 			/* PTP v1, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 			break;
704 		}
705 	}
706 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708 
709 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 	else {
712 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 			 ts_master_en | snap_type_sel);
716 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717 
718 		/* program Sub Second Increment reg */
719 		stmmac_config_sub_second_increment(priv,
720 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721 				priv->plat->has_gmac4, &sec_inc);
722 		temp = div_u64(1000000000ULL, sec_inc);
723 
724 		/* Store sub second increment and flags for later use */
725 		priv->sub_second_inc = sec_inc;
726 		priv->systime_flags = value;
727 
728 		/* calculate default added value:
729 		 * formula is :
730 		 * addend = (2^32)/freq_div_ratio;
731 		 * where, freq_div_ratio = 1e9ns/sec_inc
732 		 */
733 		temp = (u64)(temp << 32);
734 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736 
737 		/* initialize system time */
738 		ktime_get_real_ts64(&now);
739 
740 		/* lower 32 bits of tv_sec are safe until y2106 */
741 		stmmac_init_systime(priv, priv->ptpaddr,
742 				(u32)now.tv_sec, now.tv_nsec);
743 	}
744 
745 	return copy_to_user(ifr->ifr_data, &config,
746 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747 }
748 
749 /**
750  * stmmac_init_ptp - init PTP
751  * @priv: driver private structure
752  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
753  * This is done by looking at the HW cap. register.
754  * This function also registers the ptp driver.
755  */
756 static int stmmac_init_ptp(struct stmmac_priv *priv)
757 {
758 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759 		return -EOPNOTSUPP;
760 
761 	priv->adv_ts = 0;
762 	/* Check if adv_ts can be enabled for dwmac 4.x core */
763 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764 		priv->adv_ts = 1;
765 	/* Dwmac 3.x core with extend_desc can support adv_ts */
766 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767 		priv->adv_ts = 1;
768 
769 	if (priv->dma_cap.time_stamp)
770 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771 
772 	if (priv->adv_ts)
773 		netdev_info(priv->dev,
774 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
775 
776 	priv->hwts_tx_en = 0;
777 	priv->hwts_rx_en = 0;
778 
779 	stmmac_ptp_register(priv);
780 
781 	return 0;
782 }
783 
784 static void stmmac_release_ptp(struct stmmac_priv *priv)
785 {
786 	if (priv->plat->clk_ptp_ref)
787 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
788 	stmmac_ptp_unregister(priv);
789 }
790 
791 /**
792  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
793  *  @priv: driver private structure
794  *  Description: It is used for configuring the flow control in all queues
795  */
796 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797 {
798 	u32 tx_cnt = priv->plat->tx_queues_to_use;
799 
800 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801 			priv->pause, tx_cnt);
802 }
803 
804 /**
805  * stmmac_adjust_link - adjusts the link parameters
806  * @dev: net device structure
807  * Description: this is the helper called by the physical abstraction layer
808  * drivers to communicate the phy link status. According the speed and duplex
809  * this driver can invoke registered glue-logic as well.
810  * It also invoke the eee initialization because it could happen when switch
811  * on different networks (that are eee capable).
812  */
813 static void stmmac_adjust_link(struct net_device *dev)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct phy_device *phydev = dev->phydev;
817 	bool new_state = false;
818 
819 	if (!phydev)
820 		return;
821 
822 	mutex_lock(&priv->lock);
823 
824 	if (phydev->link) {
825 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826 
827 		/* Now we make sure that we can be in full duplex mode.
828 		 * If not, we operate in half-duplex mode. */
829 		if (phydev->duplex != priv->oldduplex) {
830 			new_state = true;
831 			if (!phydev->duplex)
832 				ctrl &= ~priv->hw->link.duplex;
833 			else
834 				ctrl |= priv->hw->link.duplex;
835 			priv->oldduplex = phydev->duplex;
836 		}
837 		/* Flow Control operation */
838 		if (phydev->pause)
839 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
840 
841 		if (phydev->speed != priv->speed) {
842 			new_state = true;
843 			ctrl &= ~priv->hw->link.speed_mask;
844 			switch (phydev->speed) {
845 			case SPEED_1000:
846 				ctrl |= priv->hw->link.speed1000;
847 				break;
848 			case SPEED_100:
849 				ctrl |= priv->hw->link.speed100;
850 				break;
851 			case SPEED_10:
852 				ctrl |= priv->hw->link.speed10;
853 				break;
854 			default:
855 				netif_warn(priv, link, priv->dev,
856 					   "broken speed: %d\n", phydev->speed);
857 				phydev->speed = SPEED_UNKNOWN;
858 				break;
859 			}
860 			if (phydev->speed != SPEED_UNKNOWN)
861 				stmmac_hw_fix_mac_speed(priv);
862 			priv->speed = phydev->speed;
863 		}
864 
865 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866 
867 		if (!priv->oldlink) {
868 			new_state = true;
869 			priv->oldlink = true;
870 		}
871 	} else if (priv->oldlink) {
872 		new_state = true;
873 		priv->oldlink = false;
874 		priv->speed = SPEED_UNKNOWN;
875 		priv->oldduplex = DUPLEX_UNKNOWN;
876 	}
877 
878 	if (new_state && netif_msg_link(priv))
879 		phy_print_status(phydev);
880 
881 	mutex_unlock(&priv->lock);
882 
883 	if (phydev->is_pseudo_fixed_link)
884 		/* Stop PHY layer to call the hook to adjust the link in case
885 		 * of a switch is attached to the stmmac driver.
886 		 */
887 		phydev->irq = PHY_IGNORE_INTERRUPT;
888 	else
889 		/* At this stage, init the EEE if supported.
890 		 * Never called in case of fixed_link.
891 		 */
892 		priv->eee_enabled = stmmac_eee_init(priv);
893 }
894 
895 /**
896  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
897  * @priv: driver private structure
898  * Description: this is to verify if the HW supports the PCS.
899  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
900  * configured for the TBI, RTBI, or SGMII PHY interface.
901  */
902 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903 {
904 	int interface = priv->plat->interface;
905 
906 	if (priv->dma_cap.pcs) {
907 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912 			priv->hw->pcs = STMMAC_PCS_RGMII;
913 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
914 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915 			priv->hw->pcs = STMMAC_PCS_SGMII;
916 		}
917 	}
918 }
919 
920 /**
921  * stmmac_init_phy - PHY initialization
922  * @dev: net device structure
923  * Description: it initializes the driver's PHY state, and attaches the PHY
924  * to the mac driver.
925  *  Return value:
926  *  0 on success
927  */
928 static int stmmac_init_phy(struct net_device *dev)
929 {
930 	struct stmmac_priv *priv = netdev_priv(dev);
931 	u32 tx_cnt = priv->plat->tx_queues_to_use;
932 	struct phy_device *phydev;
933 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
934 	char bus_id[MII_BUS_ID_SIZE];
935 	int interface = priv->plat->interface;
936 	int max_speed = priv->plat->max_speed;
937 	priv->oldlink = false;
938 	priv->speed = SPEED_UNKNOWN;
939 	priv->oldduplex = DUPLEX_UNKNOWN;
940 
941 	if (priv->plat->phy_node) {
942 		phydev = of_phy_connect(dev, priv->plat->phy_node,
943 					&stmmac_adjust_link, 0, interface);
944 	} else {
945 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
946 			 priv->plat->bus_id);
947 
948 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
949 			 priv->plat->phy_addr);
950 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
951 			   phy_id_fmt);
952 
953 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
954 				     interface);
955 	}
956 
957 	if (IS_ERR_OR_NULL(phydev)) {
958 		netdev_err(priv->dev, "Could not attach to PHY\n");
959 		if (!phydev)
960 			return -ENODEV;
961 
962 		return PTR_ERR(phydev);
963 	}
964 
965 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
966 	if ((interface == PHY_INTERFACE_MODE_MII) ||
967 	    (interface == PHY_INTERFACE_MODE_RMII) ||
968 		(max_speed < 1000 && max_speed > 0))
969 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
970 					 SUPPORTED_1000baseT_Full);
971 
972 	/*
973 	 * Half-duplex mode not supported with multiqueue
974 	 * half-duplex can only works with single queue
975 	 */
976 	if (tx_cnt > 1)
977 		phydev->supported &= ~(SUPPORTED_1000baseT_Half |
978 				       SUPPORTED_100baseT_Half |
979 				       SUPPORTED_10baseT_Half);
980 
981 	/*
982 	 * Broken HW is sometimes missing the pull-up resistor on the
983 	 * MDIO line, which results in reads to non-existent devices returning
984 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
985 	 * device as well.
986 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
987 	 */
988 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
989 		phy_disconnect(phydev);
990 		return -ENODEV;
991 	}
992 
993 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
994 	 * subsequent PHY polling, make sure we force a link transition if
995 	 * we have a UP/DOWN/UP transition
996 	 */
997 	if (phydev->is_pseudo_fixed_link)
998 		phydev->irq = PHY_POLL;
999 
1000 	phy_attached_info(phydev);
1001 	return 0;
1002 }
1003 
1004 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1005 {
1006 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1007 	void *head_rx;
1008 	u32 queue;
1009 
1010 	/* Display RX rings */
1011 	for (queue = 0; queue < rx_cnt; queue++) {
1012 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1013 
1014 		pr_info("\tRX Queue %u rings\n", queue);
1015 
1016 		if (priv->extend_desc)
1017 			head_rx = (void *)rx_q->dma_erx;
1018 		else
1019 			head_rx = (void *)rx_q->dma_rx;
1020 
1021 		/* Display RX ring */
1022 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1023 	}
1024 }
1025 
1026 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1027 {
1028 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1029 	void *head_tx;
1030 	u32 queue;
1031 
1032 	/* Display TX rings */
1033 	for (queue = 0; queue < tx_cnt; queue++) {
1034 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1035 
1036 		pr_info("\tTX Queue %d rings\n", queue);
1037 
1038 		if (priv->extend_desc)
1039 			head_tx = (void *)tx_q->dma_etx;
1040 		else
1041 			head_tx = (void *)tx_q->dma_tx;
1042 
1043 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1044 	}
1045 }
1046 
1047 static void stmmac_display_rings(struct stmmac_priv *priv)
1048 {
1049 	/* Display RX ring */
1050 	stmmac_display_rx_rings(priv);
1051 
1052 	/* Display TX ring */
1053 	stmmac_display_tx_rings(priv);
1054 }
1055 
1056 static int stmmac_set_bfsize(int mtu, int bufsize)
1057 {
1058 	int ret = bufsize;
1059 
1060 	if (mtu >= BUF_SIZE_4KiB)
1061 		ret = BUF_SIZE_8KiB;
1062 	else if (mtu >= BUF_SIZE_2KiB)
1063 		ret = BUF_SIZE_4KiB;
1064 	else if (mtu > DEFAULT_BUFSIZE)
1065 		ret = BUF_SIZE_2KiB;
1066 	else
1067 		ret = DEFAULT_BUFSIZE;
1068 
1069 	return ret;
1070 }
1071 
1072 /**
1073  * stmmac_clear_rx_descriptors - clear RX descriptors
1074  * @priv: driver private structure
1075  * @queue: RX queue index
1076  * Description: this function is called to clear the RX descriptors
1077  * in case of both basic and extended descriptors are used.
1078  */
1079 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1080 {
1081 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1082 	int i;
1083 
1084 	/* Clear the RX descriptors */
1085 	for (i = 0; i < DMA_RX_SIZE; i++)
1086 		if (priv->extend_desc)
1087 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1088 					priv->use_riwt, priv->mode,
1089 					(i == DMA_RX_SIZE - 1));
1090 		else
1091 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1092 					priv->use_riwt, priv->mode,
1093 					(i == DMA_RX_SIZE - 1));
1094 }
1095 
1096 /**
1097  * stmmac_clear_tx_descriptors - clear tx descriptors
1098  * @priv: driver private structure
1099  * @queue: TX queue index.
1100  * Description: this function is called to clear the TX descriptors
1101  * in case of both basic and extended descriptors are used.
1102  */
1103 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1104 {
1105 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1106 	int i;
1107 
1108 	/* Clear the TX descriptors */
1109 	for (i = 0; i < DMA_TX_SIZE; i++)
1110 		if (priv->extend_desc)
1111 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1112 					priv->mode, (i == DMA_TX_SIZE - 1));
1113 		else
1114 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1115 					priv->mode, (i == DMA_TX_SIZE - 1));
1116 }
1117 
1118 /**
1119  * stmmac_clear_descriptors - clear descriptors
1120  * @priv: driver private structure
1121  * Description: this function is called to clear the TX and RX descriptors
1122  * in case of both basic and extended descriptors are used.
1123  */
1124 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1125 {
1126 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1127 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1128 	u32 queue;
1129 
1130 	/* Clear the RX descriptors */
1131 	for (queue = 0; queue < rx_queue_cnt; queue++)
1132 		stmmac_clear_rx_descriptors(priv, queue);
1133 
1134 	/* Clear the TX descriptors */
1135 	for (queue = 0; queue < tx_queue_cnt; queue++)
1136 		stmmac_clear_tx_descriptors(priv, queue);
1137 }
1138 
1139 /**
1140  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1141  * @priv: driver private structure
1142  * @p: descriptor pointer
1143  * @i: descriptor index
1144  * @flags: gfp flag
1145  * @queue: RX queue index
1146  * Description: this function is called to allocate a receive buffer, perform
1147  * the DMA mapping and init the descriptor.
1148  */
1149 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1150 				  int i, gfp_t flags, u32 queue)
1151 {
1152 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1153 	struct sk_buff *skb;
1154 
1155 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1156 	if (!skb) {
1157 		netdev_err(priv->dev,
1158 			   "%s: Rx init fails; skb is NULL\n", __func__);
1159 		return -ENOMEM;
1160 	}
1161 	rx_q->rx_skbuff[i] = skb;
1162 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1163 						priv->dma_buf_sz,
1164 						DMA_FROM_DEVICE);
1165 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1166 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1167 		dev_kfree_skb_any(skb);
1168 		return -EINVAL;
1169 	}
1170 
1171 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1172 
1173 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1174 		stmmac_init_desc3(priv, p);
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * stmmac_free_rx_buffer - free RX dma buffers
1181  * @priv: private structure
1182  * @queue: RX queue index
1183  * @i: buffer index.
1184  */
1185 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1186 {
1187 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1188 
1189 	if (rx_q->rx_skbuff[i]) {
1190 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1191 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1192 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1193 	}
1194 	rx_q->rx_skbuff[i] = NULL;
1195 }
1196 
1197 /**
1198  * stmmac_free_tx_buffer - free RX dma buffers
1199  * @priv: private structure
1200  * @queue: RX queue index
1201  * @i: buffer index.
1202  */
1203 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1204 {
1205 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1206 
1207 	if (tx_q->tx_skbuff_dma[i].buf) {
1208 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1209 			dma_unmap_page(priv->device,
1210 				       tx_q->tx_skbuff_dma[i].buf,
1211 				       tx_q->tx_skbuff_dma[i].len,
1212 				       DMA_TO_DEVICE);
1213 		else
1214 			dma_unmap_single(priv->device,
1215 					 tx_q->tx_skbuff_dma[i].buf,
1216 					 tx_q->tx_skbuff_dma[i].len,
1217 					 DMA_TO_DEVICE);
1218 	}
1219 
1220 	if (tx_q->tx_skbuff[i]) {
1221 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1222 		tx_q->tx_skbuff[i] = NULL;
1223 		tx_q->tx_skbuff_dma[i].buf = 0;
1224 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1225 	}
1226 }
1227 
1228 /**
1229  * init_dma_rx_desc_rings - init the RX descriptor rings
1230  * @dev: net device structure
1231  * @flags: gfp flag.
1232  * Description: this function initializes the DMA RX descriptors
1233  * and allocates the socket buffers. It supports the chained and ring
1234  * modes.
1235  */
1236 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1237 {
1238 	struct stmmac_priv *priv = netdev_priv(dev);
1239 	u32 rx_count = priv->plat->rx_queues_to_use;
1240 	int ret = -ENOMEM;
1241 	int bfsize = 0;
1242 	int queue;
1243 	int i;
1244 
1245 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1246 	if (bfsize < 0)
1247 		bfsize = 0;
1248 
1249 	if (bfsize < BUF_SIZE_16KiB)
1250 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1251 
1252 	priv->dma_buf_sz = bfsize;
1253 
1254 	/* RX INITIALIZATION */
1255 	netif_dbg(priv, probe, priv->dev,
1256 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1257 
1258 	for (queue = 0; queue < rx_count; queue++) {
1259 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1260 
1261 		netif_dbg(priv, probe, priv->dev,
1262 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1263 			  (u32)rx_q->dma_rx_phy);
1264 
1265 		for (i = 0; i < DMA_RX_SIZE; i++) {
1266 			struct dma_desc *p;
1267 
1268 			if (priv->extend_desc)
1269 				p = &((rx_q->dma_erx + i)->basic);
1270 			else
1271 				p = rx_q->dma_rx + i;
1272 
1273 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1274 						     queue);
1275 			if (ret)
1276 				goto err_init_rx_buffers;
1277 
1278 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1279 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1280 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1281 		}
1282 
1283 		rx_q->cur_rx = 0;
1284 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1285 
1286 		stmmac_clear_rx_descriptors(priv, queue);
1287 
1288 		/* Setup the chained descriptor addresses */
1289 		if (priv->mode == STMMAC_CHAIN_MODE) {
1290 			if (priv->extend_desc)
1291 				stmmac_mode_init(priv, rx_q->dma_erx,
1292 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1293 			else
1294 				stmmac_mode_init(priv, rx_q->dma_rx,
1295 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1296 		}
1297 	}
1298 
1299 	buf_sz = bfsize;
1300 
1301 	return 0;
1302 
1303 err_init_rx_buffers:
1304 	while (queue >= 0) {
1305 		while (--i >= 0)
1306 			stmmac_free_rx_buffer(priv, queue, i);
1307 
1308 		if (queue == 0)
1309 			break;
1310 
1311 		i = DMA_RX_SIZE;
1312 		queue--;
1313 	}
1314 
1315 	return ret;
1316 }
1317 
1318 /**
1319  * init_dma_tx_desc_rings - init the TX descriptor rings
1320  * @dev: net device structure.
1321  * Description: this function initializes the DMA TX descriptors
1322  * and allocates the socket buffers. It supports the chained and ring
1323  * modes.
1324  */
1325 static int init_dma_tx_desc_rings(struct net_device *dev)
1326 {
1327 	struct stmmac_priv *priv = netdev_priv(dev);
1328 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1329 	u32 queue;
1330 	int i;
1331 
1332 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1333 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1334 
1335 		netif_dbg(priv, probe, priv->dev,
1336 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1337 			 (u32)tx_q->dma_tx_phy);
1338 
1339 		/* Setup the chained descriptor addresses */
1340 		if (priv->mode == STMMAC_CHAIN_MODE) {
1341 			if (priv->extend_desc)
1342 				stmmac_mode_init(priv, tx_q->dma_etx,
1343 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1344 			else
1345 				stmmac_mode_init(priv, tx_q->dma_tx,
1346 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1347 		}
1348 
1349 		for (i = 0; i < DMA_TX_SIZE; i++) {
1350 			struct dma_desc *p;
1351 			if (priv->extend_desc)
1352 				p = &((tx_q->dma_etx + i)->basic);
1353 			else
1354 				p = tx_q->dma_tx + i;
1355 
1356 			stmmac_clear_desc(priv, p);
1357 
1358 			tx_q->tx_skbuff_dma[i].buf = 0;
1359 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1360 			tx_q->tx_skbuff_dma[i].len = 0;
1361 			tx_q->tx_skbuff_dma[i].last_segment = false;
1362 			tx_q->tx_skbuff[i] = NULL;
1363 		}
1364 
1365 		tx_q->dirty_tx = 0;
1366 		tx_q->cur_tx = 0;
1367 		tx_q->mss = 0;
1368 
1369 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 /**
1376  * init_dma_desc_rings - init the RX/TX descriptor rings
1377  * @dev: net device structure
1378  * @flags: gfp flag.
1379  * Description: this function initializes the DMA RX/TX descriptors
1380  * and allocates the socket buffers. It supports the chained and ring
1381  * modes.
1382  */
1383 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1384 {
1385 	struct stmmac_priv *priv = netdev_priv(dev);
1386 	int ret;
1387 
1388 	ret = init_dma_rx_desc_rings(dev, flags);
1389 	if (ret)
1390 		return ret;
1391 
1392 	ret = init_dma_tx_desc_rings(dev);
1393 
1394 	stmmac_clear_descriptors(priv);
1395 
1396 	if (netif_msg_hw(priv))
1397 		stmmac_display_rings(priv);
1398 
1399 	return ret;
1400 }
1401 
1402 /**
1403  * dma_free_rx_skbufs - free RX dma buffers
1404  * @priv: private structure
1405  * @queue: RX queue index
1406  */
1407 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1408 {
1409 	int i;
1410 
1411 	for (i = 0; i < DMA_RX_SIZE; i++)
1412 		stmmac_free_rx_buffer(priv, queue, i);
1413 }
1414 
1415 /**
1416  * dma_free_tx_skbufs - free TX dma buffers
1417  * @priv: private structure
1418  * @queue: TX queue index
1419  */
1420 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1421 {
1422 	int i;
1423 
1424 	for (i = 0; i < DMA_TX_SIZE; i++)
1425 		stmmac_free_tx_buffer(priv, queue, i);
1426 }
1427 
1428 /**
1429  * free_dma_rx_desc_resources - free RX dma desc resources
1430  * @priv: private structure
1431  */
1432 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1433 {
1434 	u32 rx_count = priv->plat->rx_queues_to_use;
1435 	u32 queue;
1436 
1437 	/* Free RX queue resources */
1438 	for (queue = 0; queue < rx_count; queue++) {
1439 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1440 
1441 		/* Release the DMA RX socket buffers */
1442 		dma_free_rx_skbufs(priv, queue);
1443 
1444 		/* Free DMA regions of consistent memory previously allocated */
1445 		if (!priv->extend_desc)
1446 			dma_free_coherent(priv->device,
1447 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1448 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1449 		else
1450 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1451 					  sizeof(struct dma_extended_desc),
1452 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1453 
1454 		kfree(rx_q->rx_skbuff_dma);
1455 		kfree(rx_q->rx_skbuff);
1456 	}
1457 }
1458 
1459 /**
1460  * free_dma_tx_desc_resources - free TX dma desc resources
1461  * @priv: private structure
1462  */
1463 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1464 {
1465 	u32 tx_count = priv->plat->tx_queues_to_use;
1466 	u32 queue;
1467 
1468 	/* Free TX queue resources */
1469 	for (queue = 0; queue < tx_count; queue++) {
1470 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1471 
1472 		/* Release the DMA TX socket buffers */
1473 		dma_free_tx_skbufs(priv, queue);
1474 
1475 		/* Free DMA regions of consistent memory previously allocated */
1476 		if (!priv->extend_desc)
1477 			dma_free_coherent(priv->device,
1478 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1479 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1480 		else
1481 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1482 					  sizeof(struct dma_extended_desc),
1483 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1484 
1485 		kfree(tx_q->tx_skbuff_dma);
1486 		kfree(tx_q->tx_skbuff);
1487 	}
1488 }
1489 
1490 /**
1491  * alloc_dma_rx_desc_resources - alloc RX resources.
1492  * @priv: private structure
1493  * Description: according to which descriptor can be used (extend or basic)
1494  * this function allocates the resources for TX and RX paths. In case of
1495  * reception, for example, it pre-allocated the RX socket buffer in order to
1496  * allow zero-copy mechanism.
1497  */
1498 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1499 {
1500 	u32 rx_count = priv->plat->rx_queues_to_use;
1501 	int ret = -ENOMEM;
1502 	u32 queue;
1503 
1504 	/* RX queues buffers and DMA */
1505 	for (queue = 0; queue < rx_count; queue++) {
1506 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1507 
1508 		rx_q->queue_index = queue;
1509 		rx_q->priv_data = priv;
1510 
1511 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1512 						    sizeof(dma_addr_t),
1513 						    GFP_KERNEL);
1514 		if (!rx_q->rx_skbuff_dma)
1515 			goto err_dma;
1516 
1517 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1518 						sizeof(struct sk_buff *),
1519 						GFP_KERNEL);
1520 		if (!rx_q->rx_skbuff)
1521 			goto err_dma;
1522 
1523 		if (priv->extend_desc) {
1524 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1525 							    DMA_RX_SIZE *
1526 							    sizeof(struct
1527 							    dma_extended_desc),
1528 							    &rx_q->dma_rx_phy,
1529 							    GFP_KERNEL);
1530 			if (!rx_q->dma_erx)
1531 				goto err_dma;
1532 
1533 		} else {
1534 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1535 							   DMA_RX_SIZE *
1536 							   sizeof(struct
1537 							   dma_desc),
1538 							   &rx_q->dma_rx_phy,
1539 							   GFP_KERNEL);
1540 			if (!rx_q->dma_rx)
1541 				goto err_dma;
1542 		}
1543 	}
1544 
1545 	return 0;
1546 
1547 err_dma:
1548 	free_dma_rx_desc_resources(priv);
1549 
1550 	return ret;
1551 }
1552 
1553 /**
1554  * alloc_dma_tx_desc_resources - alloc TX resources.
1555  * @priv: private structure
1556  * Description: according to which descriptor can be used (extend or basic)
1557  * this function allocates the resources for TX and RX paths. In case of
1558  * reception, for example, it pre-allocated the RX socket buffer in order to
1559  * allow zero-copy mechanism.
1560  */
1561 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1562 {
1563 	u32 tx_count = priv->plat->tx_queues_to_use;
1564 	int ret = -ENOMEM;
1565 	u32 queue;
1566 
1567 	/* TX queues buffers and DMA */
1568 	for (queue = 0; queue < tx_count; queue++) {
1569 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1570 
1571 		tx_q->queue_index = queue;
1572 		tx_q->priv_data = priv;
1573 
1574 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1575 						    sizeof(*tx_q->tx_skbuff_dma),
1576 						    GFP_KERNEL);
1577 		if (!tx_q->tx_skbuff_dma)
1578 			goto err_dma;
1579 
1580 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1581 						sizeof(struct sk_buff *),
1582 						GFP_KERNEL);
1583 		if (!tx_q->tx_skbuff)
1584 			goto err_dma;
1585 
1586 		if (priv->extend_desc) {
1587 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1588 							    DMA_TX_SIZE *
1589 							    sizeof(struct
1590 							    dma_extended_desc),
1591 							    &tx_q->dma_tx_phy,
1592 							    GFP_KERNEL);
1593 			if (!tx_q->dma_etx)
1594 				goto err_dma;
1595 		} else {
1596 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1597 							   DMA_TX_SIZE *
1598 							   sizeof(struct
1599 								  dma_desc),
1600 							   &tx_q->dma_tx_phy,
1601 							   GFP_KERNEL);
1602 			if (!tx_q->dma_tx)
1603 				goto err_dma;
1604 		}
1605 	}
1606 
1607 	return 0;
1608 
1609 err_dma:
1610 	free_dma_tx_desc_resources(priv);
1611 
1612 	return ret;
1613 }
1614 
1615 /**
1616  * alloc_dma_desc_resources - alloc TX/RX resources.
1617  * @priv: private structure
1618  * Description: according to which descriptor can be used (extend or basic)
1619  * this function allocates the resources for TX and RX paths. In case of
1620  * reception, for example, it pre-allocated the RX socket buffer in order to
1621  * allow zero-copy mechanism.
1622  */
1623 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1624 {
1625 	/* RX Allocation */
1626 	int ret = alloc_dma_rx_desc_resources(priv);
1627 
1628 	if (ret)
1629 		return ret;
1630 
1631 	ret = alloc_dma_tx_desc_resources(priv);
1632 
1633 	return ret;
1634 }
1635 
1636 /**
1637  * free_dma_desc_resources - free dma desc resources
1638  * @priv: private structure
1639  */
1640 static void free_dma_desc_resources(struct stmmac_priv *priv)
1641 {
1642 	/* Release the DMA RX socket buffers */
1643 	free_dma_rx_desc_resources(priv);
1644 
1645 	/* Release the DMA TX socket buffers */
1646 	free_dma_tx_desc_resources(priv);
1647 }
1648 
1649 /**
1650  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1651  *  @priv: driver private structure
1652  *  Description: It is used for enabling the rx queues in the MAC
1653  */
1654 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1655 {
1656 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1657 	int queue;
1658 	u8 mode;
1659 
1660 	for (queue = 0; queue < rx_queues_count; queue++) {
1661 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1662 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1663 	}
1664 }
1665 
1666 /**
1667  * stmmac_start_rx_dma - start RX DMA channel
1668  * @priv: driver private structure
1669  * @chan: RX channel index
1670  * Description:
1671  * This starts a RX DMA channel
1672  */
1673 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1674 {
1675 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1676 	stmmac_start_rx(priv, priv->ioaddr, chan);
1677 }
1678 
1679 /**
1680  * stmmac_start_tx_dma - start TX DMA channel
1681  * @priv: driver private structure
1682  * @chan: TX channel index
1683  * Description:
1684  * This starts a TX DMA channel
1685  */
1686 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1687 {
1688 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1689 	stmmac_start_tx(priv, priv->ioaddr, chan);
1690 }
1691 
1692 /**
1693  * stmmac_stop_rx_dma - stop RX DMA channel
1694  * @priv: driver private structure
1695  * @chan: RX channel index
1696  * Description:
1697  * This stops a RX DMA channel
1698  */
1699 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1700 {
1701 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1702 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1703 }
1704 
1705 /**
1706  * stmmac_stop_tx_dma - stop TX DMA channel
1707  * @priv: driver private structure
1708  * @chan: TX channel index
1709  * Description:
1710  * This stops a TX DMA channel
1711  */
1712 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1713 {
1714 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1715 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1716 }
1717 
1718 /**
1719  * stmmac_start_all_dma - start all RX and TX DMA channels
1720  * @priv: driver private structure
1721  * Description:
1722  * This starts all the RX and TX DMA channels
1723  */
1724 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1725 {
1726 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1727 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1728 	u32 chan = 0;
1729 
1730 	for (chan = 0; chan < rx_channels_count; chan++)
1731 		stmmac_start_rx_dma(priv, chan);
1732 
1733 	for (chan = 0; chan < tx_channels_count; chan++)
1734 		stmmac_start_tx_dma(priv, chan);
1735 }
1736 
1737 /**
1738  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1739  * @priv: driver private structure
1740  * Description:
1741  * This stops the RX and TX DMA channels
1742  */
1743 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1744 {
1745 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1746 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1747 	u32 chan = 0;
1748 
1749 	for (chan = 0; chan < rx_channels_count; chan++)
1750 		stmmac_stop_rx_dma(priv, chan);
1751 
1752 	for (chan = 0; chan < tx_channels_count; chan++)
1753 		stmmac_stop_tx_dma(priv, chan);
1754 }
1755 
1756 /**
1757  *  stmmac_dma_operation_mode - HW DMA operation mode
1758  *  @priv: driver private structure
1759  *  Description: it is used for configuring the DMA operation mode register in
1760  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1761  */
1762 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1763 {
1764 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1765 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1766 	int rxfifosz = priv->plat->rx_fifo_size;
1767 	int txfifosz = priv->plat->tx_fifo_size;
1768 	u32 txmode = 0;
1769 	u32 rxmode = 0;
1770 	u32 chan = 0;
1771 	u8 qmode = 0;
1772 
1773 	if (rxfifosz == 0)
1774 		rxfifosz = priv->dma_cap.rx_fifo_size;
1775 	if (txfifosz == 0)
1776 		txfifosz = priv->dma_cap.tx_fifo_size;
1777 
1778 	/* Adjust for real per queue fifo size */
1779 	rxfifosz /= rx_channels_count;
1780 	txfifosz /= tx_channels_count;
1781 
1782 	if (priv->plat->force_thresh_dma_mode) {
1783 		txmode = tc;
1784 		rxmode = tc;
1785 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1786 		/*
1787 		 * In case of GMAC, SF mode can be enabled
1788 		 * to perform the TX COE in HW. This depends on:
1789 		 * 1) TX COE if actually supported
1790 		 * 2) There is no bugged Jumbo frame support
1791 		 *    that needs to not insert csum in the TDES.
1792 		 */
1793 		txmode = SF_DMA_MODE;
1794 		rxmode = SF_DMA_MODE;
1795 		priv->xstats.threshold = SF_DMA_MODE;
1796 	} else {
1797 		txmode = tc;
1798 		rxmode = SF_DMA_MODE;
1799 	}
1800 
1801 	/* configure all channels */
1802 	for (chan = 0; chan < rx_channels_count; chan++) {
1803 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1804 
1805 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1806 				rxfifosz, qmode);
1807 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1808 				chan);
1809 	}
1810 
1811 	for (chan = 0; chan < tx_channels_count; chan++) {
1812 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1813 
1814 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1815 				txfifosz, qmode);
1816 	}
1817 }
1818 
1819 /**
1820  * stmmac_tx_clean - to manage the transmission completion
1821  * @priv: driver private structure
1822  * @queue: TX queue index
1823  * Description: it reclaims the transmit resources after transmission completes.
1824  */
1825 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1826 {
1827 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1828 	unsigned int bytes_compl = 0, pkts_compl = 0;
1829 	unsigned int entry;
1830 
1831 	netif_tx_lock(priv->dev);
1832 
1833 	priv->xstats.tx_clean++;
1834 
1835 	entry = tx_q->dirty_tx;
1836 	while (entry != tx_q->cur_tx) {
1837 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1838 		struct dma_desc *p;
1839 		int status;
1840 
1841 		if (priv->extend_desc)
1842 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1843 		else
1844 			p = tx_q->dma_tx + entry;
1845 
1846 		status = stmmac_tx_status(priv, &priv->dev->stats,
1847 				&priv->xstats, p, priv->ioaddr);
1848 		/* Check if the descriptor is owned by the DMA */
1849 		if (unlikely(status & tx_dma_own))
1850 			break;
1851 
1852 		/* Make sure descriptor fields are read after reading
1853 		 * the own bit.
1854 		 */
1855 		dma_rmb();
1856 
1857 		/* Just consider the last segment and ...*/
1858 		if (likely(!(status & tx_not_ls))) {
1859 			/* ... verify the status error condition */
1860 			if (unlikely(status & tx_err)) {
1861 				priv->dev->stats.tx_errors++;
1862 			} else {
1863 				priv->dev->stats.tx_packets++;
1864 				priv->xstats.tx_pkt_n++;
1865 			}
1866 			stmmac_get_tx_hwtstamp(priv, p, skb);
1867 		}
1868 
1869 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1870 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1871 				dma_unmap_page(priv->device,
1872 					       tx_q->tx_skbuff_dma[entry].buf,
1873 					       tx_q->tx_skbuff_dma[entry].len,
1874 					       DMA_TO_DEVICE);
1875 			else
1876 				dma_unmap_single(priv->device,
1877 						 tx_q->tx_skbuff_dma[entry].buf,
1878 						 tx_q->tx_skbuff_dma[entry].len,
1879 						 DMA_TO_DEVICE);
1880 			tx_q->tx_skbuff_dma[entry].buf = 0;
1881 			tx_q->tx_skbuff_dma[entry].len = 0;
1882 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1883 		}
1884 
1885 		stmmac_clean_desc3(priv, tx_q, p);
1886 
1887 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1888 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1889 
1890 		if (likely(skb != NULL)) {
1891 			pkts_compl++;
1892 			bytes_compl += skb->len;
1893 			dev_consume_skb_any(skb);
1894 			tx_q->tx_skbuff[entry] = NULL;
1895 		}
1896 
1897 		stmmac_release_tx_desc(priv, p, priv->mode);
1898 
1899 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1900 	}
1901 	tx_q->dirty_tx = entry;
1902 
1903 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1904 				  pkts_compl, bytes_compl);
1905 
1906 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1907 								queue))) &&
1908 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1909 
1910 		netif_dbg(priv, tx_done, priv->dev,
1911 			  "%s: restart transmit\n", __func__);
1912 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1913 	}
1914 
1915 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1916 		stmmac_enable_eee_mode(priv);
1917 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1918 	}
1919 	netif_tx_unlock(priv->dev);
1920 }
1921 
1922 /**
1923  * stmmac_tx_err - to manage the tx error
1924  * @priv: driver private structure
1925  * @chan: channel index
1926  * Description: it cleans the descriptors and restarts the transmission
1927  * in case of transmission errors.
1928  */
1929 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1930 {
1931 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1932 	int i;
1933 
1934 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1935 
1936 	stmmac_stop_tx_dma(priv, chan);
1937 	dma_free_tx_skbufs(priv, chan);
1938 	for (i = 0; i < DMA_TX_SIZE; i++)
1939 		if (priv->extend_desc)
1940 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1941 					priv->mode, (i == DMA_TX_SIZE - 1));
1942 		else
1943 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1944 					priv->mode, (i == DMA_TX_SIZE - 1));
1945 	tx_q->dirty_tx = 0;
1946 	tx_q->cur_tx = 0;
1947 	tx_q->mss = 0;
1948 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1949 	stmmac_start_tx_dma(priv, chan);
1950 
1951 	priv->dev->stats.tx_errors++;
1952 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1953 }
1954 
1955 /**
1956  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1957  *  @priv: driver private structure
1958  *  @txmode: TX operating mode
1959  *  @rxmode: RX operating mode
1960  *  @chan: channel index
1961  *  Description: it is used for configuring of the DMA operation mode in
1962  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1963  *  mode.
1964  */
1965 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1966 					  u32 rxmode, u32 chan)
1967 {
1968 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1969 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1970 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1971 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1972 	int rxfifosz = priv->plat->rx_fifo_size;
1973 	int txfifosz = priv->plat->tx_fifo_size;
1974 
1975 	if (rxfifosz == 0)
1976 		rxfifosz = priv->dma_cap.rx_fifo_size;
1977 	if (txfifosz == 0)
1978 		txfifosz = priv->dma_cap.tx_fifo_size;
1979 
1980 	/* Adjust for real per queue fifo size */
1981 	rxfifosz /= rx_channels_count;
1982 	txfifosz /= tx_channels_count;
1983 
1984 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1985 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1986 }
1987 
1988 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1989 {
1990 	int ret;
1991 
1992 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1993 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1994 	if (ret && (ret != -EINVAL)) {
1995 		stmmac_global_err(priv);
1996 		return true;
1997 	}
1998 
1999 	return false;
2000 }
2001 
2002 /**
2003  * stmmac_dma_interrupt - DMA ISR
2004  * @priv: driver private structure
2005  * Description: this is the DMA ISR. It is called by the main ISR.
2006  * It calls the dwmac dma routine and schedule poll method in case of some
2007  * work can be done.
2008  */
2009 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2010 {
2011 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2012 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2013 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2014 				tx_channel_count : rx_channel_count;
2015 	u32 chan;
2016 	bool poll_scheduled = false;
2017 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2018 
2019 	/* Make sure we never check beyond our status buffer. */
2020 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2021 		channels_to_check = ARRAY_SIZE(status);
2022 
2023 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2024 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2025 	 * stmmac_channel struct.
2026 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2027 	 * all tx queues rather than just a single tx queue.
2028 	 */
2029 	for (chan = 0; chan < channels_to_check; chan++)
2030 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2031 				&priv->xstats, chan);
2032 
2033 	for (chan = 0; chan < rx_channel_count; chan++) {
2034 		if (likely(status[chan] & handle_rx)) {
2035 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2036 
2037 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2038 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2039 				__napi_schedule(&rx_q->napi);
2040 				poll_scheduled = true;
2041 			}
2042 		}
2043 	}
2044 
2045 	/* If we scheduled poll, we already know that tx queues will be checked.
2046 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2047 	 * completed transmission, if so, call stmmac_poll (once).
2048 	 */
2049 	if (!poll_scheduled) {
2050 		for (chan = 0; chan < tx_channel_count; chan++) {
2051 			if (status[chan] & handle_tx) {
2052 				/* It doesn't matter what rx queue we choose
2053 				 * here. We use 0 since it always exists.
2054 				 */
2055 				struct stmmac_rx_queue *rx_q =
2056 					&priv->rx_queue[0];
2057 
2058 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2059 					stmmac_disable_dma_irq(priv,
2060 							priv->ioaddr, chan);
2061 					__napi_schedule(&rx_q->napi);
2062 				}
2063 				break;
2064 			}
2065 		}
2066 	}
2067 
2068 	for (chan = 0; chan < tx_channel_count; chan++) {
2069 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2070 			/* Try to bump up the dma threshold on this failure */
2071 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2072 			    (tc <= 256)) {
2073 				tc += 64;
2074 				if (priv->plat->force_thresh_dma_mode)
2075 					stmmac_set_dma_operation_mode(priv,
2076 								      tc,
2077 								      tc,
2078 								      chan);
2079 				else
2080 					stmmac_set_dma_operation_mode(priv,
2081 								    tc,
2082 								    SF_DMA_MODE,
2083 								    chan);
2084 				priv->xstats.threshold = tc;
2085 			}
2086 		} else if (unlikely(status[chan] == tx_hard_error)) {
2087 			stmmac_tx_err(priv, chan);
2088 		}
2089 	}
2090 }
2091 
2092 /**
2093  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2094  * @priv: driver private structure
2095  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2096  */
2097 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2098 {
2099 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2100 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2101 
2102 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2103 
2104 	if (priv->dma_cap.rmon) {
2105 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2106 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2107 	} else
2108 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2109 }
2110 
2111 /**
2112  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2113  * @priv: driver private structure
2114  * Description:
2115  *  new GMAC chip generations have a new register to indicate the
2116  *  presence of the optional feature/functions.
2117  *  This can be also used to override the value passed through the
2118  *  platform and necessary for old MAC10/100 and GMAC chips.
2119  */
2120 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2121 {
2122 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2123 }
2124 
2125 /**
2126  * stmmac_check_ether_addr - check if the MAC addr is valid
2127  * @priv: driver private structure
2128  * Description:
2129  * it is to verify if the MAC address is valid, in case of failures it
2130  * generates a random MAC address
2131  */
2132 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2133 {
2134 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2135 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2136 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2137 			eth_hw_addr_random(priv->dev);
2138 		netdev_info(priv->dev, "device MAC address %pM\n",
2139 			    priv->dev->dev_addr);
2140 	}
2141 }
2142 
2143 /**
2144  * stmmac_init_dma_engine - DMA init.
2145  * @priv: driver private structure
2146  * Description:
2147  * It inits the DMA invoking the specific MAC/GMAC callback.
2148  * Some DMA parameters can be passed from the platform;
2149  * in case of these are not passed a default is kept for the MAC or GMAC.
2150  */
2151 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2152 {
2153 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2154 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2155 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2156 	struct stmmac_rx_queue *rx_q;
2157 	struct stmmac_tx_queue *tx_q;
2158 	u32 chan = 0;
2159 	int atds = 0;
2160 	int ret = 0;
2161 
2162 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2163 		dev_err(priv->device, "Invalid DMA configuration\n");
2164 		return -EINVAL;
2165 	}
2166 
2167 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2168 		atds = 1;
2169 
2170 	ret = stmmac_reset(priv, priv->ioaddr);
2171 	if (ret) {
2172 		dev_err(priv->device, "Failed to reset the dma\n");
2173 		return ret;
2174 	}
2175 
2176 	/* DMA RX Channel Configuration */
2177 	for (chan = 0; chan < rx_channels_count; chan++) {
2178 		rx_q = &priv->rx_queue[chan];
2179 
2180 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2181 				    rx_q->dma_rx_phy, chan);
2182 
2183 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2184 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2185 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2186 				       rx_q->rx_tail_addr, chan);
2187 	}
2188 
2189 	/* DMA TX Channel Configuration */
2190 	for (chan = 0; chan < tx_channels_count; chan++) {
2191 		tx_q = &priv->tx_queue[chan];
2192 
2193 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2194 				    tx_q->dma_tx_phy, chan);
2195 
2196 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2197 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2198 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2199 				       tx_q->tx_tail_addr, chan);
2200 	}
2201 
2202 	/* DMA CSR Channel configuration */
2203 	for (chan = 0; chan < dma_csr_ch; chan++)
2204 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2205 
2206 	/* DMA Configuration */
2207 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2208 
2209 	if (priv->plat->axi)
2210 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2211 
2212 	return ret;
2213 }
2214 
2215 /**
2216  * stmmac_tx_timer - mitigation sw timer for tx.
2217  * @data: data pointer
2218  * Description:
2219  * This is the timer handler to directly invoke the stmmac_tx_clean.
2220  */
2221 static void stmmac_tx_timer(struct timer_list *t)
2222 {
2223 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2224 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2225 	u32 queue;
2226 
2227 	/* let's scan all the tx queues */
2228 	for (queue = 0; queue < tx_queues_count; queue++)
2229 		stmmac_tx_clean(priv, queue);
2230 }
2231 
2232 /**
2233  * stmmac_init_tx_coalesce - init tx mitigation options.
2234  * @priv: driver private structure
2235  * Description:
2236  * This inits the transmit coalesce parameters: i.e. timer rate,
2237  * timer handler and default threshold used for enabling the
2238  * interrupt on completion bit.
2239  */
2240 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2241 {
2242 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2243 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2244 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2245 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2246 	add_timer(&priv->txtimer);
2247 }
2248 
2249 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2250 {
2251 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2252 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2253 	u32 chan;
2254 
2255 	/* set TX ring length */
2256 	for (chan = 0; chan < tx_channels_count; chan++)
2257 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2258 				(DMA_TX_SIZE - 1), chan);
2259 
2260 	/* set RX ring length */
2261 	for (chan = 0; chan < rx_channels_count; chan++)
2262 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2263 				(DMA_RX_SIZE - 1), chan);
2264 }
2265 
2266 /**
2267  *  stmmac_set_tx_queue_weight - Set TX queue weight
2268  *  @priv: driver private structure
2269  *  Description: It is used for setting TX queues weight
2270  */
2271 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2272 {
2273 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2274 	u32 weight;
2275 	u32 queue;
2276 
2277 	for (queue = 0; queue < tx_queues_count; queue++) {
2278 		weight = priv->plat->tx_queues_cfg[queue].weight;
2279 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2280 	}
2281 }
2282 
2283 /**
2284  *  stmmac_configure_cbs - Configure CBS in TX queue
2285  *  @priv: driver private structure
2286  *  Description: It is used for configuring CBS in AVB TX queues
2287  */
2288 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2289 {
2290 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2291 	u32 mode_to_use;
2292 	u32 queue;
2293 
2294 	/* queue 0 is reserved for legacy traffic */
2295 	for (queue = 1; queue < tx_queues_count; queue++) {
2296 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2297 		if (mode_to_use == MTL_QUEUE_DCB)
2298 			continue;
2299 
2300 		stmmac_config_cbs(priv, priv->hw,
2301 				priv->plat->tx_queues_cfg[queue].send_slope,
2302 				priv->plat->tx_queues_cfg[queue].idle_slope,
2303 				priv->plat->tx_queues_cfg[queue].high_credit,
2304 				priv->plat->tx_queues_cfg[queue].low_credit,
2305 				queue);
2306 	}
2307 }
2308 
2309 /**
2310  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2311  *  @priv: driver private structure
2312  *  Description: It is used for mapping RX queues to RX dma channels
2313  */
2314 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2315 {
2316 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2317 	u32 queue;
2318 	u32 chan;
2319 
2320 	for (queue = 0; queue < rx_queues_count; queue++) {
2321 		chan = priv->plat->rx_queues_cfg[queue].chan;
2322 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2323 	}
2324 }
2325 
2326 /**
2327  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2328  *  @priv: driver private structure
2329  *  Description: It is used for configuring the RX Queue Priority
2330  */
2331 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2332 {
2333 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2334 	u32 queue;
2335 	u32 prio;
2336 
2337 	for (queue = 0; queue < rx_queues_count; queue++) {
2338 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2339 			continue;
2340 
2341 		prio = priv->plat->rx_queues_cfg[queue].prio;
2342 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2343 	}
2344 }
2345 
2346 /**
2347  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2348  *  @priv: driver private structure
2349  *  Description: It is used for configuring the TX Queue Priority
2350  */
2351 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2352 {
2353 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2354 	u32 queue;
2355 	u32 prio;
2356 
2357 	for (queue = 0; queue < tx_queues_count; queue++) {
2358 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2359 			continue;
2360 
2361 		prio = priv->plat->tx_queues_cfg[queue].prio;
2362 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2363 	}
2364 }
2365 
2366 /**
2367  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2368  *  @priv: driver private structure
2369  *  Description: It is used for configuring the RX queue routing
2370  */
2371 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2372 {
2373 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2374 	u32 queue;
2375 	u8 packet;
2376 
2377 	for (queue = 0; queue < rx_queues_count; queue++) {
2378 		/* no specific packet type routing specified for the queue */
2379 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2380 			continue;
2381 
2382 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2383 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2384 	}
2385 }
2386 
2387 /**
2388  *  stmmac_mtl_configuration - Configure MTL
2389  *  @priv: driver private structure
2390  *  Description: It is used for configurring MTL
2391  */
2392 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2393 {
2394 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2395 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2396 
2397 	if (tx_queues_count > 1)
2398 		stmmac_set_tx_queue_weight(priv);
2399 
2400 	/* Configure MTL RX algorithms */
2401 	if (rx_queues_count > 1)
2402 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2403 				priv->plat->rx_sched_algorithm);
2404 
2405 	/* Configure MTL TX algorithms */
2406 	if (tx_queues_count > 1)
2407 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2408 				priv->plat->tx_sched_algorithm);
2409 
2410 	/* Configure CBS in AVB TX queues */
2411 	if (tx_queues_count > 1)
2412 		stmmac_configure_cbs(priv);
2413 
2414 	/* Map RX MTL to DMA channels */
2415 	stmmac_rx_queue_dma_chan_map(priv);
2416 
2417 	/* Enable MAC RX Queues */
2418 	stmmac_mac_enable_rx_queues(priv);
2419 
2420 	/* Set RX priorities */
2421 	if (rx_queues_count > 1)
2422 		stmmac_mac_config_rx_queues_prio(priv);
2423 
2424 	/* Set TX priorities */
2425 	if (tx_queues_count > 1)
2426 		stmmac_mac_config_tx_queues_prio(priv);
2427 
2428 	/* Set RX routing */
2429 	if (rx_queues_count > 1)
2430 		stmmac_mac_config_rx_queues_routing(priv);
2431 }
2432 
2433 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2434 {
2435 	if (priv->dma_cap.asp) {
2436 		netdev_info(priv->dev, "Enabling Safety Features\n");
2437 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2438 	} else {
2439 		netdev_info(priv->dev, "No Safety Features support found\n");
2440 	}
2441 }
2442 
2443 /**
2444  * stmmac_hw_setup - setup mac in a usable state.
2445  *  @dev : pointer to the device structure.
2446  *  Description:
2447  *  this is the main function to setup the HW in a usable state because the
2448  *  dma engine is reset, the core registers are configured (e.g. AXI,
2449  *  Checksum features, timers). The DMA is ready to start receiving and
2450  *  transmitting.
2451  *  Return value:
2452  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2453  *  file on failure.
2454  */
2455 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2456 {
2457 	struct stmmac_priv *priv = netdev_priv(dev);
2458 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2459 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2460 	u32 chan;
2461 	int ret;
2462 
2463 	/* DMA initialization and SW reset */
2464 	ret = stmmac_init_dma_engine(priv);
2465 	if (ret < 0) {
2466 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2467 			   __func__);
2468 		return ret;
2469 	}
2470 
2471 	/* Copy the MAC addr into the HW  */
2472 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2473 
2474 	/* PS and related bits will be programmed according to the speed */
2475 	if (priv->hw->pcs) {
2476 		int speed = priv->plat->mac_port_sel_speed;
2477 
2478 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2479 		    (speed == SPEED_1000)) {
2480 			priv->hw->ps = speed;
2481 		} else {
2482 			dev_warn(priv->device, "invalid port speed\n");
2483 			priv->hw->ps = 0;
2484 		}
2485 	}
2486 
2487 	/* Initialize the MAC Core */
2488 	stmmac_core_init(priv, priv->hw, dev);
2489 
2490 	/* Initialize MTL*/
2491 	stmmac_mtl_configuration(priv);
2492 
2493 	/* Initialize Safety Features */
2494 	stmmac_safety_feat_configuration(priv);
2495 
2496 	ret = stmmac_rx_ipc(priv, priv->hw);
2497 	if (!ret) {
2498 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2499 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2500 		priv->hw->rx_csum = 0;
2501 	}
2502 
2503 	/* Enable the MAC Rx/Tx */
2504 	stmmac_mac_set(priv, priv->ioaddr, true);
2505 
2506 	/* Set the HW DMA mode and the COE */
2507 	stmmac_dma_operation_mode(priv);
2508 
2509 	stmmac_mmc_setup(priv);
2510 
2511 	if (init_ptp) {
2512 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2513 		if (ret < 0)
2514 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2515 
2516 		ret = stmmac_init_ptp(priv);
2517 		if (ret == -EOPNOTSUPP)
2518 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2519 		else if (ret)
2520 			netdev_warn(priv->dev, "PTP init failed\n");
2521 	}
2522 
2523 #ifdef CONFIG_DEBUG_FS
2524 	ret = stmmac_init_fs(dev);
2525 	if (ret < 0)
2526 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2527 			    __func__);
2528 #endif
2529 	/* Start the ball rolling... */
2530 	stmmac_start_all_dma(priv);
2531 
2532 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2533 
2534 	if (priv->use_riwt) {
2535 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2536 		if (!ret)
2537 			priv->rx_riwt = MAX_DMA_RIWT;
2538 	}
2539 
2540 	if (priv->hw->pcs)
2541 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2542 
2543 	/* set TX and RX rings length */
2544 	stmmac_set_rings_length(priv);
2545 
2546 	/* Enable TSO */
2547 	if (priv->tso) {
2548 		for (chan = 0; chan < tx_cnt; chan++)
2549 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static void stmmac_hw_teardown(struct net_device *dev)
2556 {
2557 	struct stmmac_priv *priv = netdev_priv(dev);
2558 
2559 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2560 }
2561 
2562 /**
2563  *  stmmac_open - open entry point of the driver
2564  *  @dev : pointer to the device structure.
2565  *  Description:
2566  *  This function is the open entry point of the driver.
2567  *  Return value:
2568  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2569  *  file on failure.
2570  */
2571 static int stmmac_open(struct net_device *dev)
2572 {
2573 	struct stmmac_priv *priv = netdev_priv(dev);
2574 	int ret;
2575 
2576 	stmmac_check_ether_addr(priv);
2577 
2578 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2579 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2580 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2581 		ret = stmmac_init_phy(dev);
2582 		if (ret) {
2583 			netdev_err(priv->dev,
2584 				   "%s: Cannot attach to PHY (error: %d)\n",
2585 				   __func__, ret);
2586 			return ret;
2587 		}
2588 	}
2589 
2590 	/* Extra statistics */
2591 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2592 	priv->xstats.threshold = tc;
2593 
2594 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2595 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2596 
2597 	ret = alloc_dma_desc_resources(priv);
2598 	if (ret < 0) {
2599 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2600 			   __func__);
2601 		goto dma_desc_error;
2602 	}
2603 
2604 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2605 	if (ret < 0) {
2606 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2607 			   __func__);
2608 		goto init_error;
2609 	}
2610 
2611 	ret = stmmac_hw_setup(dev, true);
2612 	if (ret < 0) {
2613 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2614 		goto init_error;
2615 	}
2616 
2617 	stmmac_init_tx_coalesce(priv);
2618 
2619 	if (dev->phydev)
2620 		phy_start(dev->phydev);
2621 
2622 	/* Request the IRQ lines */
2623 	ret = request_irq(dev->irq, stmmac_interrupt,
2624 			  IRQF_SHARED, dev->name, dev);
2625 	if (unlikely(ret < 0)) {
2626 		netdev_err(priv->dev,
2627 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2628 			   __func__, dev->irq, ret);
2629 		goto irq_error;
2630 	}
2631 
2632 	/* Request the Wake IRQ in case of another line is used for WoL */
2633 	if (priv->wol_irq != dev->irq) {
2634 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2635 				  IRQF_SHARED, dev->name, dev);
2636 		if (unlikely(ret < 0)) {
2637 			netdev_err(priv->dev,
2638 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2639 				   __func__, priv->wol_irq, ret);
2640 			goto wolirq_error;
2641 		}
2642 	}
2643 
2644 	/* Request the IRQ lines */
2645 	if (priv->lpi_irq > 0) {
2646 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2647 				  dev->name, dev);
2648 		if (unlikely(ret < 0)) {
2649 			netdev_err(priv->dev,
2650 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2651 				   __func__, priv->lpi_irq, ret);
2652 			goto lpiirq_error;
2653 		}
2654 	}
2655 
2656 	stmmac_enable_all_queues(priv);
2657 	stmmac_start_all_queues(priv);
2658 
2659 	return 0;
2660 
2661 lpiirq_error:
2662 	if (priv->wol_irq != dev->irq)
2663 		free_irq(priv->wol_irq, dev);
2664 wolirq_error:
2665 	free_irq(dev->irq, dev);
2666 irq_error:
2667 	if (dev->phydev)
2668 		phy_stop(dev->phydev);
2669 
2670 	del_timer_sync(&priv->txtimer);
2671 	stmmac_hw_teardown(dev);
2672 init_error:
2673 	free_dma_desc_resources(priv);
2674 dma_desc_error:
2675 	if (dev->phydev)
2676 		phy_disconnect(dev->phydev);
2677 
2678 	return ret;
2679 }
2680 
2681 /**
2682  *  stmmac_release - close entry point of the driver
2683  *  @dev : device pointer.
2684  *  Description:
2685  *  This is the stop entry point of the driver.
2686  */
2687 static int stmmac_release(struct net_device *dev)
2688 {
2689 	struct stmmac_priv *priv = netdev_priv(dev);
2690 
2691 	if (priv->eee_enabled)
2692 		del_timer_sync(&priv->eee_ctrl_timer);
2693 
2694 	/* Stop and disconnect the PHY */
2695 	if (dev->phydev) {
2696 		phy_stop(dev->phydev);
2697 		phy_disconnect(dev->phydev);
2698 	}
2699 
2700 	stmmac_stop_all_queues(priv);
2701 
2702 	stmmac_disable_all_queues(priv);
2703 
2704 	del_timer_sync(&priv->txtimer);
2705 
2706 	/* Free the IRQ lines */
2707 	free_irq(dev->irq, dev);
2708 	if (priv->wol_irq != dev->irq)
2709 		free_irq(priv->wol_irq, dev);
2710 	if (priv->lpi_irq > 0)
2711 		free_irq(priv->lpi_irq, dev);
2712 
2713 	/* Stop TX/RX DMA and clear the descriptors */
2714 	stmmac_stop_all_dma(priv);
2715 
2716 	/* Release and free the Rx/Tx resources */
2717 	free_dma_desc_resources(priv);
2718 
2719 	/* Disable the MAC Rx/Tx */
2720 	stmmac_mac_set(priv, priv->ioaddr, false);
2721 
2722 	netif_carrier_off(dev);
2723 
2724 #ifdef CONFIG_DEBUG_FS
2725 	stmmac_exit_fs(dev);
2726 #endif
2727 
2728 	stmmac_release_ptp(priv);
2729 
2730 	return 0;
2731 }
2732 
2733 /**
2734  *  stmmac_tso_allocator - close entry point of the driver
2735  *  @priv: driver private structure
2736  *  @des: buffer start address
2737  *  @total_len: total length to fill in descriptors
2738  *  @last_segmant: condition for the last descriptor
2739  *  @queue: TX queue index
2740  *  Description:
2741  *  This function fills descriptor and request new descriptors according to
2742  *  buffer length to fill
2743  */
2744 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2745 				 int total_len, bool last_segment, u32 queue)
2746 {
2747 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2748 	struct dma_desc *desc;
2749 	u32 buff_size;
2750 	int tmp_len;
2751 
2752 	tmp_len = total_len;
2753 
2754 	while (tmp_len > 0) {
2755 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2756 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2757 		desc = tx_q->dma_tx + tx_q->cur_tx;
2758 
2759 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2760 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2761 			    TSO_MAX_BUFF_SIZE : tmp_len;
2762 
2763 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2764 				0, 1,
2765 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2766 				0, 0);
2767 
2768 		tmp_len -= TSO_MAX_BUFF_SIZE;
2769 	}
2770 }
2771 
2772 /**
2773  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2774  *  @skb : the socket buffer
2775  *  @dev : device pointer
2776  *  Description: this is the transmit function that is called on TSO frames
2777  *  (support available on GMAC4 and newer chips).
2778  *  Diagram below show the ring programming in case of TSO frames:
2779  *
2780  *  First Descriptor
2781  *   --------
2782  *   | DES0 |---> buffer1 = L2/L3/L4 header
2783  *   | DES1 |---> TCP Payload (can continue on next descr...)
2784  *   | DES2 |---> buffer 1 and 2 len
2785  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2786  *   --------
2787  *	|
2788  *     ...
2789  *	|
2790  *   --------
2791  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2792  *   | DES1 | --|
2793  *   | DES2 | --> buffer 1 and 2 len
2794  *   | DES3 |
2795  *   --------
2796  *
2797  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2798  */
2799 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2800 {
2801 	struct dma_desc *desc, *first, *mss_desc = NULL;
2802 	struct stmmac_priv *priv = netdev_priv(dev);
2803 	int nfrags = skb_shinfo(skb)->nr_frags;
2804 	u32 queue = skb_get_queue_mapping(skb);
2805 	unsigned int first_entry, des;
2806 	struct stmmac_tx_queue *tx_q;
2807 	int tmp_pay_len = 0;
2808 	u32 pay_len, mss;
2809 	u8 proto_hdr_len;
2810 	int i;
2811 
2812 	tx_q = &priv->tx_queue[queue];
2813 
2814 	/* Compute header lengths */
2815 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2816 
2817 	/* Desc availability based on threshold should be enough safe */
2818 	if (unlikely(stmmac_tx_avail(priv, queue) <
2819 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2820 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2821 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2822 								queue));
2823 			/* This is a hard error, log it. */
2824 			netdev_err(priv->dev,
2825 				   "%s: Tx Ring full when queue awake\n",
2826 				   __func__);
2827 		}
2828 		return NETDEV_TX_BUSY;
2829 	}
2830 
2831 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2832 
2833 	mss = skb_shinfo(skb)->gso_size;
2834 
2835 	/* set new MSS value if needed */
2836 	if (mss != tx_q->mss) {
2837 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2838 		stmmac_set_mss(priv, mss_desc, mss);
2839 		tx_q->mss = mss;
2840 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2841 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2842 	}
2843 
2844 	if (netif_msg_tx_queued(priv)) {
2845 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2846 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2847 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2848 			skb->data_len);
2849 	}
2850 
2851 	first_entry = tx_q->cur_tx;
2852 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2853 
2854 	desc = tx_q->dma_tx + first_entry;
2855 	first = desc;
2856 
2857 	/* first descriptor: fill Headers on Buf1 */
2858 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2859 			     DMA_TO_DEVICE);
2860 	if (dma_mapping_error(priv->device, des))
2861 		goto dma_map_err;
2862 
2863 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2864 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2865 
2866 	first->des0 = cpu_to_le32(des);
2867 
2868 	/* Fill start of payload in buff2 of first descriptor */
2869 	if (pay_len)
2870 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2871 
2872 	/* If needed take extra descriptors to fill the remaining payload */
2873 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2874 
2875 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2876 
2877 	/* Prepare fragments */
2878 	for (i = 0; i < nfrags; i++) {
2879 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2880 
2881 		des = skb_frag_dma_map(priv->device, frag, 0,
2882 				       skb_frag_size(frag),
2883 				       DMA_TO_DEVICE);
2884 		if (dma_mapping_error(priv->device, des))
2885 			goto dma_map_err;
2886 
2887 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2888 				     (i == nfrags - 1), queue);
2889 
2890 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2891 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2892 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2893 	}
2894 
2895 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2896 
2897 	/* Only the last descriptor gets to point to the skb. */
2898 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2899 
2900 	/* We've used all descriptors we need for this skb, however,
2901 	 * advance cur_tx so that it references a fresh descriptor.
2902 	 * ndo_start_xmit will fill this descriptor the next time it's
2903 	 * called and stmmac_tx_clean may clean up to this descriptor.
2904 	 */
2905 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2906 
2907 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2908 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2909 			  __func__);
2910 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2911 	}
2912 
2913 	dev->stats.tx_bytes += skb->len;
2914 	priv->xstats.tx_tso_frames++;
2915 	priv->xstats.tx_tso_nfrags += nfrags;
2916 
2917 	/* Manage tx mitigation */
2918 	priv->tx_count_frames += nfrags + 1;
2919 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2920 		mod_timer(&priv->txtimer,
2921 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2922 	} else {
2923 		priv->tx_count_frames = 0;
2924 		stmmac_set_tx_ic(priv, desc);
2925 		priv->xstats.tx_set_ic_bit++;
2926 	}
2927 
2928 	skb_tx_timestamp(skb);
2929 
2930 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2931 		     priv->hwts_tx_en)) {
2932 		/* declare that device is doing timestamping */
2933 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2934 		stmmac_enable_tx_timestamp(priv, first);
2935 	}
2936 
2937 	/* Complete the first descriptor before granting the DMA */
2938 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2939 			proto_hdr_len,
2940 			pay_len,
2941 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2942 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2943 
2944 	/* If context desc is used to change MSS */
2945 	if (mss_desc) {
2946 		/* Make sure that first descriptor has been completely
2947 		 * written, including its own bit. This is because MSS is
2948 		 * actually before first descriptor, so we need to make
2949 		 * sure that MSS's own bit is the last thing written.
2950 		 */
2951 		dma_wmb();
2952 		stmmac_set_tx_owner(priv, mss_desc);
2953 	}
2954 
2955 	/* The own bit must be the latest setting done when prepare the
2956 	 * descriptor and then barrier is needed to make sure that
2957 	 * all is coherent before granting the DMA engine.
2958 	 */
2959 	wmb();
2960 
2961 	if (netif_msg_pktdata(priv)) {
2962 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2963 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2964 			tx_q->cur_tx, first, nfrags);
2965 
2966 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2967 
2968 		pr_info(">>> frame to be transmitted: ");
2969 		print_pkt(skb->data, skb_headlen(skb));
2970 	}
2971 
2972 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2973 
2974 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2975 
2976 	return NETDEV_TX_OK;
2977 
2978 dma_map_err:
2979 	dev_err(priv->device, "Tx dma map failed\n");
2980 	dev_kfree_skb(skb);
2981 	priv->dev->stats.tx_dropped++;
2982 	return NETDEV_TX_OK;
2983 }
2984 
2985 /**
2986  *  stmmac_xmit - Tx entry point of the driver
2987  *  @skb : the socket buffer
2988  *  @dev : device pointer
2989  *  Description : this is the tx entry point of the driver.
2990  *  It programs the chain or the ring and supports oversized frames
2991  *  and SG feature.
2992  */
2993 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2994 {
2995 	struct stmmac_priv *priv = netdev_priv(dev);
2996 	unsigned int nopaged_len = skb_headlen(skb);
2997 	int i, csum_insertion = 0, is_jumbo = 0;
2998 	u32 queue = skb_get_queue_mapping(skb);
2999 	int nfrags = skb_shinfo(skb)->nr_frags;
3000 	int entry;
3001 	unsigned int first_entry;
3002 	struct dma_desc *desc, *first;
3003 	struct stmmac_tx_queue *tx_q;
3004 	unsigned int enh_desc;
3005 	unsigned int des;
3006 
3007 	tx_q = &priv->tx_queue[queue];
3008 
3009 	/* Manage oversized TCP frames for GMAC4 device */
3010 	if (skb_is_gso(skb) && priv->tso) {
3011 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3012 			return stmmac_tso_xmit(skb, dev);
3013 	}
3014 
3015 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3016 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3017 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3018 								queue));
3019 			/* This is a hard error, log it. */
3020 			netdev_err(priv->dev,
3021 				   "%s: Tx Ring full when queue awake\n",
3022 				   __func__);
3023 		}
3024 		return NETDEV_TX_BUSY;
3025 	}
3026 
3027 	if (priv->tx_path_in_lpi_mode)
3028 		stmmac_disable_eee_mode(priv);
3029 
3030 	entry = tx_q->cur_tx;
3031 	first_entry = entry;
3032 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3033 
3034 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3035 
3036 	if (likely(priv->extend_desc))
3037 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3038 	else
3039 		desc = tx_q->dma_tx + entry;
3040 
3041 	first = desc;
3042 
3043 	enh_desc = priv->plat->enh_desc;
3044 	/* To program the descriptors according to the size of the frame */
3045 	if (enh_desc)
3046 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3047 
3048 	if (unlikely(is_jumbo)) {
3049 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3050 		if (unlikely(entry < 0) && (entry != -EINVAL))
3051 			goto dma_map_err;
3052 	}
3053 
3054 	for (i = 0; i < nfrags; i++) {
3055 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3056 		int len = skb_frag_size(frag);
3057 		bool last_segment = (i == (nfrags - 1));
3058 
3059 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3060 		WARN_ON(tx_q->tx_skbuff[entry]);
3061 
3062 		if (likely(priv->extend_desc))
3063 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3064 		else
3065 			desc = tx_q->dma_tx + entry;
3066 
3067 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3068 				       DMA_TO_DEVICE);
3069 		if (dma_mapping_error(priv->device, des))
3070 			goto dma_map_err; /* should reuse desc w/o issues */
3071 
3072 		tx_q->tx_skbuff_dma[entry].buf = des;
3073 
3074 		stmmac_set_desc_addr(priv, desc, des);
3075 
3076 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3077 		tx_q->tx_skbuff_dma[entry].len = len;
3078 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3079 
3080 		/* Prepare the descriptor and set the own bit too */
3081 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3082 				priv->mode, 1, last_segment, skb->len);
3083 	}
3084 
3085 	/* Only the last descriptor gets to point to the skb. */
3086 	tx_q->tx_skbuff[entry] = skb;
3087 
3088 	/* We've used all descriptors we need for this skb, however,
3089 	 * advance cur_tx so that it references a fresh descriptor.
3090 	 * ndo_start_xmit will fill this descriptor the next time it's
3091 	 * called and stmmac_tx_clean may clean up to this descriptor.
3092 	 */
3093 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3094 	tx_q->cur_tx = entry;
3095 
3096 	if (netif_msg_pktdata(priv)) {
3097 		void *tx_head;
3098 
3099 		netdev_dbg(priv->dev,
3100 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3101 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3102 			   entry, first, nfrags);
3103 
3104 		if (priv->extend_desc)
3105 			tx_head = (void *)tx_q->dma_etx;
3106 		else
3107 			tx_head = (void *)tx_q->dma_tx;
3108 
3109 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3110 
3111 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3112 		print_pkt(skb->data, skb->len);
3113 	}
3114 
3115 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3116 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3117 			  __func__);
3118 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3119 	}
3120 
3121 	dev->stats.tx_bytes += skb->len;
3122 
3123 	/* According to the coalesce parameter the IC bit for the latest
3124 	 * segment is reset and the timer re-started to clean the tx status.
3125 	 * This approach takes care about the fragments: desc is the first
3126 	 * element in case of no SG.
3127 	 */
3128 	priv->tx_count_frames += nfrags + 1;
3129 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3130 	    !priv->tx_timer_armed) {
3131 		mod_timer(&priv->txtimer,
3132 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3133 		priv->tx_timer_armed = true;
3134 	} else {
3135 		priv->tx_count_frames = 0;
3136 		stmmac_set_tx_ic(priv, desc);
3137 		priv->xstats.tx_set_ic_bit++;
3138 		priv->tx_timer_armed = false;
3139 	}
3140 
3141 	skb_tx_timestamp(skb);
3142 
3143 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3144 	 * problems because all the descriptors are actually ready to be
3145 	 * passed to the DMA engine.
3146 	 */
3147 	if (likely(!is_jumbo)) {
3148 		bool last_segment = (nfrags == 0);
3149 
3150 		des = dma_map_single(priv->device, skb->data,
3151 				     nopaged_len, DMA_TO_DEVICE);
3152 		if (dma_mapping_error(priv->device, des))
3153 			goto dma_map_err;
3154 
3155 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3156 
3157 		stmmac_set_desc_addr(priv, first, des);
3158 
3159 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3160 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3161 
3162 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3163 			     priv->hwts_tx_en)) {
3164 			/* declare that device is doing timestamping */
3165 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3166 			stmmac_enable_tx_timestamp(priv, first);
3167 		}
3168 
3169 		/* Prepare the first descriptor setting the OWN bit too */
3170 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3171 				csum_insertion, priv->mode, 1, last_segment,
3172 				skb->len);
3173 
3174 		/* The own bit must be the latest setting done when prepare the
3175 		 * descriptor and then barrier is needed to make sure that
3176 		 * all is coherent before granting the DMA engine.
3177 		 */
3178 		wmb();
3179 	}
3180 
3181 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3182 
3183 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3184 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3185 
3186 	return NETDEV_TX_OK;
3187 
3188 dma_map_err:
3189 	netdev_err(priv->dev, "Tx DMA map failed\n");
3190 	dev_kfree_skb(skb);
3191 	priv->dev->stats.tx_dropped++;
3192 	return NETDEV_TX_OK;
3193 }
3194 
3195 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3196 {
3197 	struct vlan_ethhdr *veth;
3198 	__be16 vlan_proto;
3199 	u16 vlanid;
3200 
3201 	veth = (struct vlan_ethhdr *)skb->data;
3202 	vlan_proto = veth->h_vlan_proto;
3203 
3204 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3205 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3206 	    (vlan_proto == htons(ETH_P_8021AD) &&
3207 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3208 		/* pop the vlan tag */
3209 		vlanid = ntohs(veth->h_vlan_TCI);
3210 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3211 		skb_pull(skb, VLAN_HLEN);
3212 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3213 	}
3214 }
3215 
3216 
3217 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3218 {
3219 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3220 		return 0;
3221 
3222 	return 1;
3223 }
3224 
3225 /**
3226  * stmmac_rx_refill - refill used skb preallocated buffers
3227  * @priv: driver private structure
3228  * @queue: RX queue index
3229  * Description : this is to reallocate the skb for the reception process
3230  * that is based on zero-copy.
3231  */
3232 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3233 {
3234 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3235 	int dirty = stmmac_rx_dirty(priv, queue);
3236 	unsigned int entry = rx_q->dirty_rx;
3237 
3238 	int bfsize = priv->dma_buf_sz;
3239 
3240 	while (dirty-- > 0) {
3241 		struct dma_desc *p;
3242 
3243 		if (priv->extend_desc)
3244 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3245 		else
3246 			p = rx_q->dma_rx + entry;
3247 
3248 		if (likely(!rx_q->rx_skbuff[entry])) {
3249 			struct sk_buff *skb;
3250 
3251 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3252 			if (unlikely(!skb)) {
3253 				/* so for a while no zero-copy! */
3254 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3255 				if (unlikely(net_ratelimit()))
3256 					dev_err(priv->device,
3257 						"fail to alloc skb entry %d\n",
3258 						entry);
3259 				break;
3260 			}
3261 
3262 			rx_q->rx_skbuff[entry] = skb;
3263 			rx_q->rx_skbuff_dma[entry] =
3264 			    dma_map_single(priv->device, skb->data, bfsize,
3265 					   DMA_FROM_DEVICE);
3266 			if (dma_mapping_error(priv->device,
3267 					      rx_q->rx_skbuff_dma[entry])) {
3268 				netdev_err(priv->dev, "Rx DMA map failed\n");
3269 				dev_kfree_skb(skb);
3270 				break;
3271 			}
3272 
3273 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3274 			stmmac_refill_desc3(priv, rx_q, p);
3275 
3276 			if (rx_q->rx_zeroc_thresh > 0)
3277 				rx_q->rx_zeroc_thresh--;
3278 
3279 			netif_dbg(priv, rx_status, priv->dev,
3280 				  "refill entry #%d\n", entry);
3281 		}
3282 		dma_wmb();
3283 
3284 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3285 
3286 		dma_wmb();
3287 
3288 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3289 	}
3290 	rx_q->dirty_rx = entry;
3291 }
3292 
3293 /**
3294  * stmmac_rx - manage the receive process
3295  * @priv: driver private structure
3296  * @limit: napi bugget
3297  * @queue: RX queue index.
3298  * Description :  this the function called by the napi poll method.
3299  * It gets all the frames inside the ring.
3300  */
3301 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3302 {
3303 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3304 	unsigned int entry = rx_q->cur_rx;
3305 	int coe = priv->hw->rx_csum;
3306 	unsigned int next_entry;
3307 	unsigned int count = 0;
3308 
3309 	if (netif_msg_rx_status(priv)) {
3310 		void *rx_head;
3311 
3312 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3313 		if (priv->extend_desc)
3314 			rx_head = (void *)rx_q->dma_erx;
3315 		else
3316 			rx_head = (void *)rx_q->dma_rx;
3317 
3318 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3319 	}
3320 	while (count < limit) {
3321 		int status;
3322 		struct dma_desc *p;
3323 		struct dma_desc *np;
3324 
3325 		if (priv->extend_desc)
3326 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3327 		else
3328 			p = rx_q->dma_rx + entry;
3329 
3330 		/* read the status of the incoming frame */
3331 		status = stmmac_rx_status(priv, &priv->dev->stats,
3332 				&priv->xstats, p);
3333 		/* check if managed by the DMA otherwise go ahead */
3334 		if (unlikely(status & dma_own))
3335 			break;
3336 
3337 		count++;
3338 
3339 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3340 		next_entry = rx_q->cur_rx;
3341 
3342 		if (priv->extend_desc)
3343 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3344 		else
3345 			np = rx_q->dma_rx + next_entry;
3346 
3347 		prefetch(np);
3348 
3349 		if (priv->extend_desc)
3350 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3351 					&priv->xstats, rx_q->dma_erx + entry);
3352 		if (unlikely(status == discard_frame)) {
3353 			priv->dev->stats.rx_errors++;
3354 			if (priv->hwts_rx_en && !priv->extend_desc) {
3355 				/* DESC2 & DESC3 will be overwritten by device
3356 				 * with timestamp value, hence reinitialize
3357 				 * them in stmmac_rx_refill() function so that
3358 				 * device can reuse it.
3359 				 */
3360 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3361 				rx_q->rx_skbuff[entry] = NULL;
3362 				dma_unmap_single(priv->device,
3363 						 rx_q->rx_skbuff_dma[entry],
3364 						 priv->dma_buf_sz,
3365 						 DMA_FROM_DEVICE);
3366 			}
3367 		} else {
3368 			struct sk_buff *skb;
3369 			int frame_len;
3370 			unsigned int des;
3371 
3372 			stmmac_get_desc_addr(priv, p, &des);
3373 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3374 
3375 			/*  If frame length is greater than skb buffer size
3376 			 *  (preallocated during init) then the packet is
3377 			 *  ignored
3378 			 */
3379 			if (frame_len > priv->dma_buf_sz) {
3380 				netdev_err(priv->dev,
3381 					   "len %d larger than size (%d)\n",
3382 					   frame_len, priv->dma_buf_sz);
3383 				priv->dev->stats.rx_length_errors++;
3384 				break;
3385 			}
3386 
3387 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3388 			 * Type frames (LLC/LLC-SNAP)
3389 			 *
3390 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3391 			 * feature is always disabled and packets need to be
3392 			 * stripped manually.
3393 			 */
3394 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3395 			    unlikely(status != llc_snap))
3396 				frame_len -= ETH_FCS_LEN;
3397 
3398 			if (netif_msg_rx_status(priv)) {
3399 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3400 					   p, entry, des);
3401 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3402 					   frame_len, status);
3403 			}
3404 
3405 			/* The zero-copy is always used for all the sizes
3406 			 * in case of GMAC4 because it needs
3407 			 * to refill the used descriptors, always.
3408 			 */
3409 			if (unlikely(!priv->plat->has_gmac4 &&
3410 				     ((frame_len < priv->rx_copybreak) ||
3411 				     stmmac_rx_threshold_count(rx_q)))) {
3412 				skb = netdev_alloc_skb_ip_align(priv->dev,
3413 								frame_len);
3414 				if (unlikely(!skb)) {
3415 					if (net_ratelimit())
3416 						dev_warn(priv->device,
3417 							 "packet dropped\n");
3418 					priv->dev->stats.rx_dropped++;
3419 					break;
3420 				}
3421 
3422 				dma_sync_single_for_cpu(priv->device,
3423 							rx_q->rx_skbuff_dma
3424 							[entry], frame_len,
3425 							DMA_FROM_DEVICE);
3426 				skb_copy_to_linear_data(skb,
3427 							rx_q->
3428 							rx_skbuff[entry]->data,
3429 							frame_len);
3430 
3431 				skb_put(skb, frame_len);
3432 				dma_sync_single_for_device(priv->device,
3433 							   rx_q->rx_skbuff_dma
3434 							   [entry], frame_len,
3435 							   DMA_FROM_DEVICE);
3436 			} else {
3437 				skb = rx_q->rx_skbuff[entry];
3438 				if (unlikely(!skb)) {
3439 					netdev_err(priv->dev,
3440 						   "%s: Inconsistent Rx chain\n",
3441 						   priv->dev->name);
3442 					priv->dev->stats.rx_dropped++;
3443 					break;
3444 				}
3445 				prefetch(skb->data - NET_IP_ALIGN);
3446 				rx_q->rx_skbuff[entry] = NULL;
3447 				rx_q->rx_zeroc_thresh++;
3448 
3449 				skb_put(skb, frame_len);
3450 				dma_unmap_single(priv->device,
3451 						 rx_q->rx_skbuff_dma[entry],
3452 						 priv->dma_buf_sz,
3453 						 DMA_FROM_DEVICE);
3454 			}
3455 
3456 			if (netif_msg_pktdata(priv)) {
3457 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3458 					   frame_len);
3459 				print_pkt(skb->data, frame_len);
3460 			}
3461 
3462 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3463 
3464 			stmmac_rx_vlan(priv->dev, skb);
3465 
3466 			skb->protocol = eth_type_trans(skb, priv->dev);
3467 
3468 			if (unlikely(!coe))
3469 				skb_checksum_none_assert(skb);
3470 			else
3471 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3472 
3473 			napi_gro_receive(&rx_q->napi, skb);
3474 
3475 			priv->dev->stats.rx_packets++;
3476 			priv->dev->stats.rx_bytes += frame_len;
3477 		}
3478 		entry = next_entry;
3479 	}
3480 
3481 	stmmac_rx_refill(priv, queue);
3482 
3483 	priv->xstats.rx_pkt_n += count;
3484 
3485 	return count;
3486 }
3487 
3488 /**
3489  *  stmmac_poll - stmmac poll method (NAPI)
3490  *  @napi : pointer to the napi structure.
3491  *  @budget : maximum number of packets that the current CPU can receive from
3492  *	      all interfaces.
3493  *  Description :
3494  *  To look at the incoming frames and clear the tx resources.
3495  */
3496 static int stmmac_poll(struct napi_struct *napi, int budget)
3497 {
3498 	struct stmmac_rx_queue *rx_q =
3499 		container_of(napi, struct stmmac_rx_queue, napi);
3500 	struct stmmac_priv *priv = rx_q->priv_data;
3501 	u32 tx_count = priv->plat->tx_queues_to_use;
3502 	u32 chan = rx_q->queue_index;
3503 	int work_done = 0;
3504 	u32 queue;
3505 
3506 	priv->xstats.napi_poll++;
3507 
3508 	/* check all the queues */
3509 	for (queue = 0; queue < tx_count; queue++)
3510 		stmmac_tx_clean(priv, queue);
3511 
3512 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3513 	if (work_done < budget) {
3514 		napi_complete_done(napi, work_done);
3515 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3516 	}
3517 	return work_done;
3518 }
3519 
3520 /**
3521  *  stmmac_tx_timeout
3522  *  @dev : Pointer to net device structure
3523  *  Description: this function is called when a packet transmission fails to
3524  *   complete within a reasonable time. The driver will mark the error in the
3525  *   netdev structure and arrange for the device to be reset to a sane state
3526  *   in order to transmit a new packet.
3527  */
3528 static void stmmac_tx_timeout(struct net_device *dev)
3529 {
3530 	struct stmmac_priv *priv = netdev_priv(dev);
3531 
3532 	stmmac_global_err(priv);
3533 }
3534 
3535 /**
3536  *  stmmac_set_rx_mode - entry point for multicast addressing
3537  *  @dev : pointer to the device structure
3538  *  Description:
3539  *  This function is a driver entry point which gets called by the kernel
3540  *  whenever multicast addresses must be enabled/disabled.
3541  *  Return value:
3542  *  void.
3543  */
3544 static void stmmac_set_rx_mode(struct net_device *dev)
3545 {
3546 	struct stmmac_priv *priv = netdev_priv(dev);
3547 
3548 	stmmac_set_filter(priv, priv->hw, dev);
3549 }
3550 
3551 /**
3552  *  stmmac_change_mtu - entry point to change MTU size for the device.
3553  *  @dev : device pointer.
3554  *  @new_mtu : the new MTU size for the device.
3555  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3556  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3557  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3558  *  Return value:
3559  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3560  *  file on failure.
3561  */
3562 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3563 {
3564 	struct stmmac_priv *priv = netdev_priv(dev);
3565 
3566 	if (netif_running(dev)) {
3567 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3568 		return -EBUSY;
3569 	}
3570 
3571 	dev->mtu = new_mtu;
3572 
3573 	netdev_update_features(dev);
3574 
3575 	return 0;
3576 }
3577 
3578 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3579 					     netdev_features_t features)
3580 {
3581 	struct stmmac_priv *priv = netdev_priv(dev);
3582 
3583 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3584 		features &= ~NETIF_F_RXCSUM;
3585 
3586 	if (!priv->plat->tx_coe)
3587 		features &= ~NETIF_F_CSUM_MASK;
3588 
3589 	/* Some GMAC devices have a bugged Jumbo frame support that
3590 	 * needs to have the Tx COE disabled for oversized frames
3591 	 * (due to limited buffer sizes). In this case we disable
3592 	 * the TX csum insertion in the TDES and not use SF.
3593 	 */
3594 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3595 		features &= ~NETIF_F_CSUM_MASK;
3596 
3597 	/* Disable tso if asked by ethtool */
3598 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3599 		if (features & NETIF_F_TSO)
3600 			priv->tso = true;
3601 		else
3602 			priv->tso = false;
3603 	}
3604 
3605 	return features;
3606 }
3607 
3608 static int stmmac_set_features(struct net_device *netdev,
3609 			       netdev_features_t features)
3610 {
3611 	struct stmmac_priv *priv = netdev_priv(netdev);
3612 
3613 	/* Keep the COE Type in case of csum is supporting */
3614 	if (features & NETIF_F_RXCSUM)
3615 		priv->hw->rx_csum = priv->plat->rx_coe;
3616 	else
3617 		priv->hw->rx_csum = 0;
3618 	/* No check needed because rx_coe has been set before and it will be
3619 	 * fixed in case of issue.
3620 	 */
3621 	stmmac_rx_ipc(priv, priv->hw);
3622 
3623 	return 0;
3624 }
3625 
3626 /**
3627  *  stmmac_interrupt - main ISR
3628  *  @irq: interrupt number.
3629  *  @dev_id: to pass the net device pointer.
3630  *  Description: this is the main driver interrupt service routine.
3631  *  It can call:
3632  *  o DMA service routine (to manage incoming frame reception and transmission
3633  *    status)
3634  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3635  *    interrupts.
3636  */
3637 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3638 {
3639 	struct net_device *dev = (struct net_device *)dev_id;
3640 	struct stmmac_priv *priv = netdev_priv(dev);
3641 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3642 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3643 	u32 queues_count;
3644 	u32 queue;
3645 
3646 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3647 
3648 	if (priv->irq_wake)
3649 		pm_wakeup_event(priv->device, 0);
3650 
3651 	if (unlikely(!dev)) {
3652 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3653 		return IRQ_NONE;
3654 	}
3655 
3656 	/* Check if adapter is up */
3657 	if (test_bit(STMMAC_DOWN, &priv->state))
3658 		return IRQ_HANDLED;
3659 	/* Check if a fatal error happened */
3660 	if (stmmac_safety_feat_interrupt(priv))
3661 		return IRQ_HANDLED;
3662 
3663 	/* To handle GMAC own interrupts */
3664 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3665 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3666 		int mtl_status;
3667 
3668 		if (unlikely(status)) {
3669 			/* For LPI we need to save the tx status */
3670 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3671 				priv->tx_path_in_lpi_mode = true;
3672 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3673 				priv->tx_path_in_lpi_mode = false;
3674 		}
3675 
3676 		for (queue = 0; queue < queues_count; queue++) {
3677 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3678 
3679 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3680 								queue);
3681 			if (mtl_status != -EINVAL)
3682 				status |= mtl_status;
3683 
3684 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3685 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3686 						       rx_q->rx_tail_addr,
3687 						       queue);
3688 		}
3689 
3690 		/* PCS link status */
3691 		if (priv->hw->pcs) {
3692 			if (priv->xstats.pcs_link)
3693 				netif_carrier_on(dev);
3694 			else
3695 				netif_carrier_off(dev);
3696 		}
3697 	}
3698 
3699 	/* To handle DMA interrupts */
3700 	stmmac_dma_interrupt(priv);
3701 
3702 	return IRQ_HANDLED;
3703 }
3704 
3705 #ifdef CONFIG_NET_POLL_CONTROLLER
3706 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3707  * to allow network I/O with interrupts disabled.
3708  */
3709 static void stmmac_poll_controller(struct net_device *dev)
3710 {
3711 	disable_irq(dev->irq);
3712 	stmmac_interrupt(dev->irq, dev);
3713 	enable_irq(dev->irq);
3714 }
3715 #endif
3716 
3717 /**
3718  *  stmmac_ioctl - Entry point for the Ioctl
3719  *  @dev: Device pointer.
3720  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3721  *  a proprietary structure used to pass information to the driver.
3722  *  @cmd: IOCTL command
3723  *  Description:
3724  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3725  */
3726 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3727 {
3728 	int ret = -EOPNOTSUPP;
3729 
3730 	if (!netif_running(dev))
3731 		return -EINVAL;
3732 
3733 	switch (cmd) {
3734 	case SIOCGMIIPHY:
3735 	case SIOCGMIIREG:
3736 	case SIOCSMIIREG:
3737 		if (!dev->phydev)
3738 			return -EINVAL;
3739 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3740 		break;
3741 	case SIOCSHWTSTAMP:
3742 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3743 		break;
3744 	default:
3745 		break;
3746 	}
3747 
3748 	return ret;
3749 }
3750 
3751 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3752 				    void *cb_priv)
3753 {
3754 	struct stmmac_priv *priv = cb_priv;
3755 	int ret = -EOPNOTSUPP;
3756 
3757 	stmmac_disable_all_queues(priv);
3758 
3759 	switch (type) {
3760 	case TC_SETUP_CLSU32:
3761 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3762 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3763 		break;
3764 	default:
3765 		break;
3766 	}
3767 
3768 	stmmac_enable_all_queues(priv);
3769 	return ret;
3770 }
3771 
3772 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3773 				 struct tc_block_offload *f)
3774 {
3775 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3776 		return -EOPNOTSUPP;
3777 
3778 	switch (f->command) {
3779 	case TC_BLOCK_BIND:
3780 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3781 				priv, priv);
3782 	case TC_BLOCK_UNBIND:
3783 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3784 		return 0;
3785 	default:
3786 		return -EOPNOTSUPP;
3787 	}
3788 }
3789 
3790 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3791 			   void *type_data)
3792 {
3793 	struct stmmac_priv *priv = netdev_priv(ndev);
3794 
3795 	switch (type) {
3796 	case TC_SETUP_BLOCK:
3797 		return stmmac_setup_tc_block(priv, type_data);
3798 	default:
3799 		return -EOPNOTSUPP;
3800 	}
3801 }
3802 
3803 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3804 {
3805 	struct stmmac_priv *priv = netdev_priv(ndev);
3806 	int ret = 0;
3807 
3808 	ret = eth_mac_addr(ndev, addr);
3809 	if (ret)
3810 		return ret;
3811 
3812 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3813 
3814 	return ret;
3815 }
3816 
3817 #ifdef CONFIG_DEBUG_FS
3818 static struct dentry *stmmac_fs_dir;
3819 
3820 static void sysfs_display_ring(void *head, int size, int extend_desc,
3821 			       struct seq_file *seq)
3822 {
3823 	int i;
3824 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3825 	struct dma_desc *p = (struct dma_desc *)head;
3826 
3827 	for (i = 0; i < size; i++) {
3828 		if (extend_desc) {
3829 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3830 				   i, (unsigned int)virt_to_phys(ep),
3831 				   le32_to_cpu(ep->basic.des0),
3832 				   le32_to_cpu(ep->basic.des1),
3833 				   le32_to_cpu(ep->basic.des2),
3834 				   le32_to_cpu(ep->basic.des3));
3835 			ep++;
3836 		} else {
3837 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3838 				   i, (unsigned int)virt_to_phys(p),
3839 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3840 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3841 			p++;
3842 		}
3843 		seq_printf(seq, "\n");
3844 	}
3845 }
3846 
3847 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3848 {
3849 	struct net_device *dev = seq->private;
3850 	struct stmmac_priv *priv = netdev_priv(dev);
3851 	u32 rx_count = priv->plat->rx_queues_to_use;
3852 	u32 tx_count = priv->plat->tx_queues_to_use;
3853 	u32 queue;
3854 
3855 	for (queue = 0; queue < rx_count; queue++) {
3856 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3857 
3858 		seq_printf(seq, "RX Queue %d:\n", queue);
3859 
3860 		if (priv->extend_desc) {
3861 			seq_printf(seq, "Extended descriptor ring:\n");
3862 			sysfs_display_ring((void *)rx_q->dma_erx,
3863 					   DMA_RX_SIZE, 1, seq);
3864 		} else {
3865 			seq_printf(seq, "Descriptor ring:\n");
3866 			sysfs_display_ring((void *)rx_q->dma_rx,
3867 					   DMA_RX_SIZE, 0, seq);
3868 		}
3869 	}
3870 
3871 	for (queue = 0; queue < tx_count; queue++) {
3872 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3873 
3874 		seq_printf(seq, "TX Queue %d:\n", queue);
3875 
3876 		if (priv->extend_desc) {
3877 			seq_printf(seq, "Extended descriptor ring:\n");
3878 			sysfs_display_ring((void *)tx_q->dma_etx,
3879 					   DMA_TX_SIZE, 1, seq);
3880 		} else {
3881 			seq_printf(seq, "Descriptor ring:\n");
3882 			sysfs_display_ring((void *)tx_q->dma_tx,
3883 					   DMA_TX_SIZE, 0, seq);
3884 		}
3885 	}
3886 
3887 	return 0;
3888 }
3889 
3890 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3891 {
3892 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3893 }
3894 
3895 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3896 
3897 static const struct file_operations stmmac_rings_status_fops = {
3898 	.owner = THIS_MODULE,
3899 	.open = stmmac_sysfs_ring_open,
3900 	.read = seq_read,
3901 	.llseek = seq_lseek,
3902 	.release = single_release,
3903 };
3904 
3905 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3906 {
3907 	struct net_device *dev = seq->private;
3908 	struct stmmac_priv *priv = netdev_priv(dev);
3909 
3910 	if (!priv->hw_cap_support) {
3911 		seq_printf(seq, "DMA HW features not supported\n");
3912 		return 0;
3913 	}
3914 
3915 	seq_printf(seq, "==============================\n");
3916 	seq_printf(seq, "\tDMA HW features\n");
3917 	seq_printf(seq, "==============================\n");
3918 
3919 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3920 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3921 	seq_printf(seq, "\t1000 Mbps: %s\n",
3922 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3923 	seq_printf(seq, "\tHalf duplex: %s\n",
3924 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3925 	seq_printf(seq, "\tHash Filter: %s\n",
3926 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3927 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3928 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3929 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3930 		   (priv->dma_cap.pcs) ? "Y" : "N");
3931 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3932 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3933 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3934 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3935 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3936 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3937 	seq_printf(seq, "\tRMON module: %s\n",
3938 		   (priv->dma_cap.rmon) ? "Y" : "N");
3939 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3940 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3941 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3942 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3943 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3944 		   (priv->dma_cap.eee) ? "Y" : "N");
3945 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3946 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3947 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3948 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3949 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3950 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3951 	} else {
3952 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3953 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3954 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3955 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3956 	}
3957 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3958 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3959 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3960 		   priv->dma_cap.number_rx_channel);
3961 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3962 		   priv->dma_cap.number_tx_channel);
3963 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3964 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3965 
3966 	return 0;
3967 }
3968 
3969 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3970 {
3971 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3972 }
3973 
3974 static const struct file_operations stmmac_dma_cap_fops = {
3975 	.owner = THIS_MODULE,
3976 	.open = stmmac_sysfs_dma_cap_open,
3977 	.read = seq_read,
3978 	.llseek = seq_lseek,
3979 	.release = single_release,
3980 };
3981 
3982 static int stmmac_init_fs(struct net_device *dev)
3983 {
3984 	struct stmmac_priv *priv = netdev_priv(dev);
3985 
3986 	/* Create per netdev entries */
3987 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3988 
3989 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3990 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3991 
3992 		return -ENOMEM;
3993 	}
3994 
3995 	/* Entry to report DMA RX/TX rings */
3996 	priv->dbgfs_rings_status =
3997 		debugfs_create_file("descriptors_status", 0444,
3998 				    priv->dbgfs_dir, dev,
3999 				    &stmmac_rings_status_fops);
4000 
4001 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4002 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4003 		debugfs_remove_recursive(priv->dbgfs_dir);
4004 
4005 		return -ENOMEM;
4006 	}
4007 
4008 	/* Entry to report the DMA HW features */
4009 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4010 						  priv->dbgfs_dir,
4011 						  dev, &stmmac_dma_cap_fops);
4012 
4013 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4014 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4015 		debugfs_remove_recursive(priv->dbgfs_dir);
4016 
4017 		return -ENOMEM;
4018 	}
4019 
4020 	return 0;
4021 }
4022 
4023 static void stmmac_exit_fs(struct net_device *dev)
4024 {
4025 	struct stmmac_priv *priv = netdev_priv(dev);
4026 
4027 	debugfs_remove_recursive(priv->dbgfs_dir);
4028 }
4029 #endif /* CONFIG_DEBUG_FS */
4030 
4031 static const struct net_device_ops stmmac_netdev_ops = {
4032 	.ndo_open = stmmac_open,
4033 	.ndo_start_xmit = stmmac_xmit,
4034 	.ndo_stop = stmmac_release,
4035 	.ndo_change_mtu = stmmac_change_mtu,
4036 	.ndo_fix_features = stmmac_fix_features,
4037 	.ndo_set_features = stmmac_set_features,
4038 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4039 	.ndo_tx_timeout = stmmac_tx_timeout,
4040 	.ndo_do_ioctl = stmmac_ioctl,
4041 	.ndo_setup_tc = stmmac_setup_tc,
4042 #ifdef CONFIG_NET_POLL_CONTROLLER
4043 	.ndo_poll_controller = stmmac_poll_controller,
4044 #endif
4045 	.ndo_set_mac_address = stmmac_set_mac_address,
4046 };
4047 
4048 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4049 {
4050 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4051 		return;
4052 	if (test_bit(STMMAC_DOWN, &priv->state))
4053 		return;
4054 
4055 	netdev_err(priv->dev, "Reset adapter.\n");
4056 
4057 	rtnl_lock();
4058 	netif_trans_update(priv->dev);
4059 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4060 		usleep_range(1000, 2000);
4061 
4062 	set_bit(STMMAC_DOWN, &priv->state);
4063 	dev_close(priv->dev);
4064 	dev_open(priv->dev);
4065 	clear_bit(STMMAC_DOWN, &priv->state);
4066 	clear_bit(STMMAC_RESETING, &priv->state);
4067 	rtnl_unlock();
4068 }
4069 
4070 static void stmmac_service_task(struct work_struct *work)
4071 {
4072 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4073 			service_task);
4074 
4075 	stmmac_reset_subtask(priv);
4076 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4077 }
4078 
4079 /**
4080  *  stmmac_hw_init - Init the MAC device
4081  *  @priv: driver private structure
4082  *  Description: this function is to configure the MAC device according to
4083  *  some platform parameters or the HW capability register. It prepares the
4084  *  driver to use either ring or chain modes and to setup either enhanced or
4085  *  normal descriptors.
4086  */
4087 static int stmmac_hw_init(struct stmmac_priv *priv)
4088 {
4089 	int ret;
4090 
4091 	/* dwmac-sun8i only work in chain mode */
4092 	if (priv->plat->has_sun8i)
4093 		chain_mode = 1;
4094 	priv->chain_mode = chain_mode;
4095 
4096 	/* Initialize HW Interface */
4097 	ret = stmmac_hwif_init(priv);
4098 	if (ret)
4099 		return ret;
4100 
4101 	/* Get the HW capability (new GMAC newer than 3.50a) */
4102 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4103 	if (priv->hw_cap_support) {
4104 		dev_info(priv->device, "DMA HW capability register supported\n");
4105 
4106 		/* We can override some gmac/dma configuration fields: e.g.
4107 		 * enh_desc, tx_coe (e.g. that are passed through the
4108 		 * platform) with the values from the HW capability
4109 		 * register (if supported).
4110 		 */
4111 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4112 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4113 		priv->hw->pmt = priv->plat->pmt;
4114 
4115 		/* TXCOE doesn't work in thresh DMA mode */
4116 		if (priv->plat->force_thresh_dma_mode)
4117 			priv->plat->tx_coe = 0;
4118 		else
4119 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4120 
4121 		/* In case of GMAC4 rx_coe is from HW cap register. */
4122 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4123 
4124 		if (priv->dma_cap.rx_coe_type2)
4125 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4126 		else if (priv->dma_cap.rx_coe_type1)
4127 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4128 
4129 	} else {
4130 		dev_info(priv->device, "No HW DMA feature register supported\n");
4131 	}
4132 
4133 	if (priv->plat->rx_coe) {
4134 		priv->hw->rx_csum = priv->plat->rx_coe;
4135 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4136 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4137 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4138 	}
4139 	if (priv->plat->tx_coe)
4140 		dev_info(priv->device, "TX Checksum insertion supported\n");
4141 
4142 	if (priv->plat->pmt) {
4143 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4144 		device_set_wakeup_capable(priv->device, 1);
4145 	}
4146 
4147 	if (priv->dma_cap.tsoen)
4148 		dev_info(priv->device, "TSO supported\n");
4149 
4150 	/* Run HW quirks, if any */
4151 	if (priv->hwif_quirks) {
4152 		ret = priv->hwif_quirks(priv);
4153 		if (ret)
4154 			return ret;
4155 	}
4156 
4157 	return 0;
4158 }
4159 
4160 /**
4161  * stmmac_dvr_probe
4162  * @device: device pointer
4163  * @plat_dat: platform data pointer
4164  * @res: stmmac resource pointer
4165  * Description: this is the main probe function used to
4166  * call the alloc_etherdev, allocate the priv structure.
4167  * Return:
4168  * returns 0 on success, otherwise errno.
4169  */
4170 int stmmac_dvr_probe(struct device *device,
4171 		     struct plat_stmmacenet_data *plat_dat,
4172 		     struct stmmac_resources *res)
4173 {
4174 	struct net_device *ndev = NULL;
4175 	struct stmmac_priv *priv;
4176 	int ret = 0;
4177 	u32 queue;
4178 
4179 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4180 				  MTL_MAX_TX_QUEUES,
4181 				  MTL_MAX_RX_QUEUES);
4182 	if (!ndev)
4183 		return -ENOMEM;
4184 
4185 	SET_NETDEV_DEV(ndev, device);
4186 
4187 	priv = netdev_priv(ndev);
4188 	priv->device = device;
4189 	priv->dev = ndev;
4190 
4191 	stmmac_set_ethtool_ops(ndev);
4192 	priv->pause = pause;
4193 	priv->plat = plat_dat;
4194 	priv->ioaddr = res->addr;
4195 	priv->dev->base_addr = (unsigned long)res->addr;
4196 
4197 	priv->dev->irq = res->irq;
4198 	priv->wol_irq = res->wol_irq;
4199 	priv->lpi_irq = res->lpi_irq;
4200 
4201 	if (res->mac)
4202 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4203 
4204 	dev_set_drvdata(device, priv->dev);
4205 
4206 	/* Verify driver arguments */
4207 	stmmac_verify_args();
4208 
4209 	/* Allocate workqueue */
4210 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4211 	if (!priv->wq) {
4212 		dev_err(priv->device, "failed to create workqueue\n");
4213 		goto error_wq;
4214 	}
4215 
4216 	INIT_WORK(&priv->service_task, stmmac_service_task);
4217 
4218 	/* Override with kernel parameters if supplied XXX CRS XXX
4219 	 * this needs to have multiple instances
4220 	 */
4221 	if ((phyaddr >= 0) && (phyaddr <= 31))
4222 		priv->plat->phy_addr = phyaddr;
4223 
4224 	if (priv->plat->stmmac_rst) {
4225 		ret = reset_control_assert(priv->plat->stmmac_rst);
4226 		reset_control_deassert(priv->plat->stmmac_rst);
4227 		/* Some reset controllers have only reset callback instead of
4228 		 * assert + deassert callbacks pair.
4229 		 */
4230 		if (ret == -ENOTSUPP)
4231 			reset_control_reset(priv->plat->stmmac_rst);
4232 	}
4233 
4234 	/* Init MAC and get the capabilities */
4235 	ret = stmmac_hw_init(priv);
4236 	if (ret)
4237 		goto error_hw_init;
4238 
4239 	/* Configure real RX and TX queues */
4240 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4241 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4242 
4243 	ndev->netdev_ops = &stmmac_netdev_ops;
4244 
4245 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4246 			    NETIF_F_RXCSUM;
4247 
4248 	ret = stmmac_tc_init(priv, priv);
4249 	if (!ret) {
4250 		ndev->hw_features |= NETIF_F_HW_TC;
4251 	}
4252 
4253 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4254 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4255 		priv->tso = true;
4256 		dev_info(priv->device, "TSO feature enabled\n");
4257 	}
4258 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4259 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4260 #ifdef STMMAC_VLAN_TAG_USED
4261 	/* Both mac100 and gmac support receive VLAN tag detection */
4262 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4263 #endif
4264 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4265 
4266 	/* MTU range: 46 - hw-specific max */
4267 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4268 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4269 		ndev->max_mtu = JUMBO_LEN;
4270 	else
4271 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4272 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4273 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4274 	 */
4275 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4276 	    (priv->plat->maxmtu >= ndev->min_mtu))
4277 		ndev->max_mtu = priv->plat->maxmtu;
4278 	else if (priv->plat->maxmtu < ndev->min_mtu)
4279 		dev_warn(priv->device,
4280 			 "%s: warning: maxmtu having invalid value (%d)\n",
4281 			 __func__, priv->plat->maxmtu);
4282 
4283 	if (flow_ctrl)
4284 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4285 
4286 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4287 	 * In some case, for example on bugged HW this feature
4288 	 * has to be disable and this can be done by passing the
4289 	 * riwt_off field from the platform.
4290 	 */
4291 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4292 		priv->use_riwt = 1;
4293 		dev_info(priv->device,
4294 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4295 	}
4296 
4297 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4298 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4299 
4300 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4301 			       (8 * priv->plat->rx_queues_to_use));
4302 	}
4303 
4304 	mutex_init(&priv->lock);
4305 
4306 	/* If a specific clk_csr value is passed from the platform
4307 	 * this means that the CSR Clock Range selection cannot be
4308 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4309 	 * set the MDC clock dynamically according to the csr actual
4310 	 * clock input.
4311 	 */
4312 	if (!priv->plat->clk_csr)
4313 		stmmac_clk_csr_set(priv);
4314 	else
4315 		priv->clk_csr = priv->plat->clk_csr;
4316 
4317 	stmmac_check_pcs_mode(priv);
4318 
4319 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4320 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4321 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4322 		/* MDIO bus Registration */
4323 		ret = stmmac_mdio_register(ndev);
4324 		if (ret < 0) {
4325 			dev_err(priv->device,
4326 				"%s: MDIO bus (id: %d) registration failed",
4327 				__func__, priv->plat->bus_id);
4328 			goto error_mdio_register;
4329 		}
4330 	}
4331 
4332 	ret = register_netdev(ndev);
4333 	if (ret) {
4334 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4335 			__func__, ret);
4336 		goto error_netdev_register;
4337 	}
4338 
4339 	return ret;
4340 
4341 error_netdev_register:
4342 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4343 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4344 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4345 		stmmac_mdio_unregister(ndev);
4346 error_mdio_register:
4347 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4348 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4349 
4350 		netif_napi_del(&rx_q->napi);
4351 	}
4352 error_hw_init:
4353 	destroy_workqueue(priv->wq);
4354 error_wq:
4355 	free_netdev(ndev);
4356 
4357 	return ret;
4358 }
4359 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4360 
4361 /**
4362  * stmmac_dvr_remove
4363  * @dev: device pointer
4364  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4365  * changes the link status, releases the DMA descriptor rings.
4366  */
4367 int stmmac_dvr_remove(struct device *dev)
4368 {
4369 	struct net_device *ndev = dev_get_drvdata(dev);
4370 	struct stmmac_priv *priv = netdev_priv(ndev);
4371 
4372 	netdev_info(priv->dev, "%s: removing driver", __func__);
4373 
4374 	stmmac_stop_all_dma(priv);
4375 
4376 	stmmac_mac_set(priv, priv->ioaddr, false);
4377 	netif_carrier_off(ndev);
4378 	unregister_netdev(ndev);
4379 	if (priv->plat->stmmac_rst)
4380 		reset_control_assert(priv->plat->stmmac_rst);
4381 	clk_disable_unprepare(priv->plat->pclk);
4382 	clk_disable_unprepare(priv->plat->stmmac_clk);
4383 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4384 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4385 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4386 		stmmac_mdio_unregister(ndev);
4387 	destroy_workqueue(priv->wq);
4388 	mutex_destroy(&priv->lock);
4389 	free_netdev(ndev);
4390 
4391 	return 0;
4392 }
4393 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4394 
4395 /**
4396  * stmmac_suspend - suspend callback
4397  * @dev: device pointer
4398  * Description: this is the function to suspend the device and it is called
4399  * by the platform driver to stop the network queue, release the resources,
4400  * program the PMT register (for WoL), clean and release driver resources.
4401  */
4402 int stmmac_suspend(struct device *dev)
4403 {
4404 	struct net_device *ndev = dev_get_drvdata(dev);
4405 	struct stmmac_priv *priv = netdev_priv(ndev);
4406 
4407 	if (!ndev || !netif_running(ndev))
4408 		return 0;
4409 
4410 	if (ndev->phydev)
4411 		phy_stop(ndev->phydev);
4412 
4413 	mutex_lock(&priv->lock);
4414 
4415 	netif_device_detach(ndev);
4416 	stmmac_stop_all_queues(priv);
4417 
4418 	stmmac_disable_all_queues(priv);
4419 
4420 	/* Stop TX/RX DMA */
4421 	stmmac_stop_all_dma(priv);
4422 
4423 	/* Enable Power down mode by programming the PMT regs */
4424 	if (device_may_wakeup(priv->device)) {
4425 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4426 		priv->irq_wake = 1;
4427 	} else {
4428 		stmmac_mac_set(priv, priv->ioaddr, false);
4429 		pinctrl_pm_select_sleep_state(priv->device);
4430 		/* Disable clock in case of PWM is off */
4431 		clk_disable(priv->plat->pclk);
4432 		clk_disable(priv->plat->stmmac_clk);
4433 	}
4434 	mutex_unlock(&priv->lock);
4435 
4436 	priv->oldlink = false;
4437 	priv->speed = SPEED_UNKNOWN;
4438 	priv->oldduplex = DUPLEX_UNKNOWN;
4439 	return 0;
4440 }
4441 EXPORT_SYMBOL_GPL(stmmac_suspend);
4442 
4443 /**
4444  * stmmac_reset_queues_param - reset queue parameters
4445  * @dev: device pointer
4446  */
4447 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4448 {
4449 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4450 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4451 	u32 queue;
4452 
4453 	for (queue = 0; queue < rx_cnt; queue++) {
4454 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4455 
4456 		rx_q->cur_rx = 0;
4457 		rx_q->dirty_rx = 0;
4458 	}
4459 
4460 	for (queue = 0; queue < tx_cnt; queue++) {
4461 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4462 
4463 		tx_q->cur_tx = 0;
4464 		tx_q->dirty_tx = 0;
4465 		tx_q->mss = 0;
4466 	}
4467 }
4468 
4469 /**
4470  * stmmac_resume - resume callback
4471  * @dev: device pointer
4472  * Description: when resume this function is invoked to setup the DMA and CORE
4473  * in a usable state.
4474  */
4475 int stmmac_resume(struct device *dev)
4476 {
4477 	struct net_device *ndev = dev_get_drvdata(dev);
4478 	struct stmmac_priv *priv = netdev_priv(ndev);
4479 
4480 	if (!netif_running(ndev))
4481 		return 0;
4482 
4483 	/* Power Down bit, into the PM register, is cleared
4484 	 * automatically as soon as a magic packet or a Wake-up frame
4485 	 * is received. Anyway, it's better to manually clear
4486 	 * this bit because it can generate problems while resuming
4487 	 * from another devices (e.g. serial console).
4488 	 */
4489 	if (device_may_wakeup(priv->device)) {
4490 		mutex_lock(&priv->lock);
4491 		stmmac_pmt(priv, priv->hw, 0);
4492 		mutex_unlock(&priv->lock);
4493 		priv->irq_wake = 0;
4494 	} else {
4495 		pinctrl_pm_select_default_state(priv->device);
4496 		/* enable the clk previously disabled */
4497 		clk_enable(priv->plat->stmmac_clk);
4498 		clk_enable(priv->plat->pclk);
4499 		/* reset the phy so that it's ready */
4500 		if (priv->mii)
4501 			stmmac_mdio_reset(priv->mii);
4502 	}
4503 
4504 	netif_device_attach(ndev);
4505 
4506 	mutex_lock(&priv->lock);
4507 
4508 	stmmac_reset_queues_param(priv);
4509 
4510 	stmmac_clear_descriptors(priv);
4511 
4512 	stmmac_hw_setup(ndev, false);
4513 	stmmac_init_tx_coalesce(priv);
4514 	stmmac_set_rx_mode(ndev);
4515 
4516 	stmmac_enable_all_queues(priv);
4517 
4518 	stmmac_start_all_queues(priv);
4519 
4520 	mutex_unlock(&priv->lock);
4521 
4522 	if (ndev->phydev)
4523 		phy_start(ndev->phydev);
4524 
4525 	return 0;
4526 }
4527 EXPORT_SYMBOL_GPL(stmmac_resume);
4528 
4529 #ifndef MODULE
4530 static int __init stmmac_cmdline_opt(char *str)
4531 {
4532 	char *opt;
4533 
4534 	if (!str || !*str)
4535 		return -EINVAL;
4536 	while ((opt = strsep(&str, ",")) != NULL) {
4537 		if (!strncmp(opt, "debug:", 6)) {
4538 			if (kstrtoint(opt + 6, 0, &debug))
4539 				goto err;
4540 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4541 			if (kstrtoint(opt + 8, 0, &phyaddr))
4542 				goto err;
4543 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4544 			if (kstrtoint(opt + 7, 0, &buf_sz))
4545 				goto err;
4546 		} else if (!strncmp(opt, "tc:", 3)) {
4547 			if (kstrtoint(opt + 3, 0, &tc))
4548 				goto err;
4549 		} else if (!strncmp(opt, "watchdog:", 9)) {
4550 			if (kstrtoint(opt + 9, 0, &watchdog))
4551 				goto err;
4552 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4553 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4554 				goto err;
4555 		} else if (!strncmp(opt, "pause:", 6)) {
4556 			if (kstrtoint(opt + 6, 0, &pause))
4557 				goto err;
4558 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4559 			if (kstrtoint(opt + 10, 0, &eee_timer))
4560 				goto err;
4561 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4562 			if (kstrtoint(opt + 11, 0, &chain_mode))
4563 				goto err;
4564 		}
4565 	}
4566 	return 0;
4567 
4568 err:
4569 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4570 	return -EINVAL;
4571 }
4572 
4573 __setup("stmmaceth=", stmmac_cmdline_opt);
4574 #endif /* MODULE */
4575 
4576 static int __init stmmac_init(void)
4577 {
4578 #ifdef CONFIG_DEBUG_FS
4579 	/* Create debugfs main directory if it doesn't exist yet */
4580 	if (!stmmac_fs_dir) {
4581 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4582 
4583 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4584 			pr_err("ERROR %s, debugfs create directory failed\n",
4585 			       STMMAC_RESOURCE_NAME);
4586 
4587 			return -ENOMEM;
4588 		}
4589 	}
4590 #endif
4591 
4592 	return 0;
4593 }
4594 
4595 static void __exit stmmac_exit(void)
4596 {
4597 #ifdef CONFIG_DEBUG_FS
4598 	debugfs_remove_recursive(stmmac_fs_dir);
4599 #endif
4600 }
4601 
4602 module_init(stmmac_init)
4603 module_exit(stmmac_exit)
4604 
4605 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4606 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4607 MODULE_LICENSE("GPL");
4608