xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision dd2934a95701576203b2f61e8ded4e4a2f9183ea)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 queue;
152 
153 	for (queue = 0; queue < rx_queues_cnt; queue++) {
154 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
155 
156 		napi_disable(&rx_q->napi);
157 	}
158 }
159 
160 /**
161  * stmmac_enable_all_queues - Enable all queues
162  * @priv: driver private structure
163  */
164 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
165 {
166 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
167 	u32 queue;
168 
169 	for (queue = 0; queue < rx_queues_cnt; queue++) {
170 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
171 
172 		napi_enable(&rx_q->napi);
173 	}
174 }
175 
176 /**
177  * stmmac_stop_all_queues - Stop all queues
178  * @priv: driver private structure
179  */
180 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
181 {
182 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
183 	u32 queue;
184 
185 	for (queue = 0; queue < tx_queues_cnt; queue++)
186 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
187 }
188 
189 /**
190  * stmmac_start_all_queues - Start all queues
191  * @priv: driver private structure
192  */
193 static void stmmac_start_all_queues(struct stmmac_priv *priv)
194 {
195 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196 	u32 queue;
197 
198 	for (queue = 0; queue < tx_queues_cnt; queue++)
199 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
200 }
201 
202 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
203 {
204 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
205 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
206 		queue_work(priv->wq, &priv->service_task);
207 }
208 
209 static void stmmac_global_err(struct stmmac_priv *priv)
210 {
211 	netif_carrier_off(priv->dev);
212 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
213 	stmmac_service_event_schedule(priv);
214 }
215 
216 /**
217  * stmmac_clk_csr_set - dynamically set the MDC clock
218  * @priv: driver private structure
219  * Description: this is to dynamically set the MDC clock according to the csr
220  * clock input.
221  * Note:
222  *	If a specific clk_csr value is passed from the platform
223  *	this means that the CSR Clock Range selection cannot be
224  *	changed at run-time and it is fixed (as reported in the driver
225  *	documentation). Viceversa the driver will try to set the MDC
226  *	clock dynamically according to the actual clock input.
227  */
228 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
229 {
230 	u32 clk_rate;
231 
232 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
233 
234 	/* Platform provided default clk_csr would be assumed valid
235 	 * for all other cases except for the below mentioned ones.
236 	 * For values higher than the IEEE 802.3 specified frequency
237 	 * we can not estimate the proper divider as it is not known
238 	 * the frequency of clk_csr_i. So we do not change the default
239 	 * divider.
240 	 */
241 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
242 		if (clk_rate < CSR_F_35M)
243 			priv->clk_csr = STMMAC_CSR_20_35M;
244 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
245 			priv->clk_csr = STMMAC_CSR_35_60M;
246 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
247 			priv->clk_csr = STMMAC_CSR_60_100M;
248 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
249 			priv->clk_csr = STMMAC_CSR_100_150M;
250 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
251 			priv->clk_csr = STMMAC_CSR_150_250M;
252 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
253 			priv->clk_csr = STMMAC_CSR_250_300M;
254 	}
255 
256 	if (priv->plat->has_sun8i) {
257 		if (clk_rate > 160000000)
258 			priv->clk_csr = 0x03;
259 		else if (clk_rate > 80000000)
260 			priv->clk_csr = 0x02;
261 		else if (clk_rate > 40000000)
262 			priv->clk_csr = 0x01;
263 		else
264 			priv->clk_csr = 0;
265 	}
266 
267 	if (priv->plat->has_xgmac) {
268 		if (clk_rate > 400000000)
269 			priv->clk_csr = 0x5;
270 		else if (clk_rate > 350000000)
271 			priv->clk_csr = 0x4;
272 		else if (clk_rate > 300000000)
273 			priv->clk_csr = 0x3;
274 		else if (clk_rate > 250000000)
275 			priv->clk_csr = 0x2;
276 		else if (clk_rate > 150000000)
277 			priv->clk_csr = 0x1;
278 		else
279 			priv->clk_csr = 0x0;
280 	}
281 }
282 
283 static void print_pkt(unsigned char *buf, int len)
284 {
285 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
286 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
287 }
288 
289 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
290 {
291 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
292 	u32 avail;
293 
294 	if (tx_q->dirty_tx > tx_q->cur_tx)
295 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
296 	else
297 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
298 
299 	return avail;
300 }
301 
302 /**
303  * stmmac_rx_dirty - Get RX queue dirty
304  * @priv: driver private structure
305  * @queue: RX queue index
306  */
307 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
308 {
309 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
310 	u32 dirty;
311 
312 	if (rx_q->dirty_rx <= rx_q->cur_rx)
313 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
314 	else
315 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
316 
317 	return dirty;
318 }
319 
320 /**
321  * stmmac_hw_fix_mac_speed - callback for speed selection
322  * @priv: driver private structure
323  * Description: on some platforms (e.g. ST), some HW system configuration
324  * registers have to be set according to the link speed negotiated.
325  */
326 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
327 {
328 	struct net_device *ndev = priv->dev;
329 	struct phy_device *phydev = ndev->phydev;
330 
331 	if (likely(priv->plat->fix_mac_speed))
332 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
333 }
334 
335 /**
336  * stmmac_enable_eee_mode - check and enter in LPI mode
337  * @priv: driver private structure
338  * Description: this function is to verify and enter in LPI mode in case of
339  * EEE.
340  */
341 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
342 {
343 	u32 tx_cnt = priv->plat->tx_queues_to_use;
344 	u32 queue;
345 
346 	/* check if all TX queues have the work finished */
347 	for (queue = 0; queue < tx_cnt; queue++) {
348 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
349 
350 		if (tx_q->dirty_tx != tx_q->cur_tx)
351 			return; /* still unfinished work */
352 	}
353 
354 	/* Check and enter in LPI mode */
355 	if (!priv->tx_path_in_lpi_mode)
356 		stmmac_set_eee_mode(priv, priv->hw,
357 				priv->plat->en_tx_lpi_clockgating);
358 }
359 
360 /**
361  * stmmac_disable_eee_mode - disable and exit from LPI mode
362  * @priv: driver private structure
363  * Description: this function is to exit and disable EEE in case of
364  * LPI state is true. This is called by the xmit.
365  */
366 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
367 {
368 	stmmac_reset_eee_mode(priv, priv->hw);
369 	del_timer_sync(&priv->eee_ctrl_timer);
370 	priv->tx_path_in_lpi_mode = false;
371 }
372 
373 /**
374  * stmmac_eee_ctrl_timer - EEE TX SW timer.
375  * @arg : data hook
376  * Description:
377  *  if there is no data transfer and if we are not in LPI state,
378  *  then MAC Transmitter can be moved to LPI state.
379  */
380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
383 
384 	stmmac_enable_eee_mode(priv);
385 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
386 }
387 
388 /**
389  * stmmac_eee_init - init EEE
390  * @priv: driver private structure
391  * Description:
392  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
393  *  can also manage EEE, this function enable the LPI state and start related
394  *  timer.
395  */
396 bool stmmac_eee_init(struct stmmac_priv *priv)
397 {
398 	struct net_device *ndev = priv->dev;
399 	int interface = priv->plat->interface;
400 	bool ret = false;
401 
402 	if ((interface != PHY_INTERFACE_MODE_MII) &&
403 	    (interface != PHY_INTERFACE_MODE_GMII) &&
404 	    !phy_interface_mode_is_rgmii(interface))
405 		goto out;
406 
407 	/* Using PCS we cannot dial with the phy registers at this stage
408 	 * so we do not support extra feature like EEE.
409 	 */
410 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
411 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
412 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
413 		goto out;
414 
415 	/* MAC core supports the EEE feature. */
416 	if (priv->dma_cap.eee) {
417 		int tx_lpi_timer = priv->tx_lpi_timer;
418 
419 		/* Check if the PHY supports EEE */
420 		if (phy_init_eee(ndev->phydev, 1)) {
421 			/* To manage at run-time if the EEE cannot be supported
422 			 * anymore (for example because the lp caps have been
423 			 * changed).
424 			 * In that case the driver disable own timers.
425 			 */
426 			mutex_lock(&priv->lock);
427 			if (priv->eee_active) {
428 				netdev_dbg(priv->dev, "disable EEE\n");
429 				del_timer_sync(&priv->eee_ctrl_timer);
430 				stmmac_set_eee_timer(priv, priv->hw, 0,
431 						tx_lpi_timer);
432 			}
433 			priv->eee_active = 0;
434 			mutex_unlock(&priv->lock);
435 			goto out;
436 		}
437 		/* Activate the EEE and start timers */
438 		mutex_lock(&priv->lock);
439 		if (!priv->eee_active) {
440 			priv->eee_active = 1;
441 			timer_setup(&priv->eee_ctrl_timer,
442 				    stmmac_eee_ctrl_timer, 0);
443 			mod_timer(&priv->eee_ctrl_timer,
444 				  STMMAC_LPI_T(eee_timer));
445 
446 			stmmac_set_eee_timer(priv, priv->hw,
447 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
448 		}
449 		/* Set HW EEE according to the speed */
450 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
451 
452 		ret = true;
453 		mutex_unlock(&priv->lock);
454 
455 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
456 	}
457 out:
458 	return ret;
459 }
460 
461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
462  * @priv: driver private structure
463  * @p : descriptor pointer
464  * @skb : the socket buffer
465  * Description :
466  * This function will read timestamp from the descriptor & pass it to stack.
467  * and also perform some sanity checks.
468  */
469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
470 				   struct dma_desc *p, struct sk_buff *skb)
471 {
472 	struct skb_shared_hwtstamps shhwtstamp;
473 	u64 ns;
474 
475 	if (!priv->hwts_tx_en)
476 		return;
477 
478 	/* exit if skb doesn't support hw tstamp */
479 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
480 		return;
481 
482 	/* check tx tstamp status */
483 	if (stmmac_get_tx_timestamp_status(priv, p)) {
484 		/* get the valid tstamp */
485 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
486 
487 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
488 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
489 
490 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
491 		/* pass tstamp to stack */
492 		skb_tstamp_tx(skb, &shhwtstamp);
493 	}
494 
495 	return;
496 }
497 
498 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
499  * @priv: driver private structure
500  * @p : descriptor pointer
501  * @np : next descriptor pointer
502  * @skb : the socket buffer
503  * Description :
504  * This function will read received packet's timestamp from the descriptor
505  * and pass it to stack. It also perform some sanity checks.
506  */
507 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
508 				   struct dma_desc *np, struct sk_buff *skb)
509 {
510 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
511 	struct dma_desc *desc = p;
512 	u64 ns;
513 
514 	if (!priv->hwts_rx_en)
515 		return;
516 	/* For GMAC4, the valid timestamp is from CTX next desc. */
517 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
518 		desc = np;
519 
520 	/* Check if timestamp is available */
521 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
522 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
523 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
524 		shhwtstamp = skb_hwtstamps(skb);
525 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
526 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
527 	} else  {
528 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
529 	}
530 }
531 
532 /**
533  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
534  *  @dev: device pointer.
535  *  @ifr: An IOCTL specific structure, that can contain a pointer to
536  *  a proprietary structure used to pass information to the driver.
537  *  Description:
538  *  This function configures the MAC to enable/disable both outgoing(TX)
539  *  and incoming(RX) packets time stamping based on user input.
540  *  Return Value:
541  *  0 on success and an appropriate -ve integer on failure.
542  */
543 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
544 {
545 	struct stmmac_priv *priv = netdev_priv(dev);
546 	struct hwtstamp_config config;
547 	struct timespec64 now;
548 	u64 temp = 0;
549 	u32 ptp_v2 = 0;
550 	u32 tstamp_all = 0;
551 	u32 ptp_over_ipv4_udp = 0;
552 	u32 ptp_over_ipv6_udp = 0;
553 	u32 ptp_over_ethernet = 0;
554 	u32 snap_type_sel = 0;
555 	u32 ts_master_en = 0;
556 	u32 ts_event_en = 0;
557 	u32 value = 0;
558 	u32 sec_inc;
559 	bool xmac;
560 
561 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
562 
563 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
564 		netdev_alert(priv->dev, "No support for HW time stamping\n");
565 		priv->hwts_tx_en = 0;
566 		priv->hwts_rx_en = 0;
567 
568 		return -EOPNOTSUPP;
569 	}
570 
571 	if (copy_from_user(&config, ifr->ifr_data,
572 			   sizeof(struct hwtstamp_config)))
573 		return -EFAULT;
574 
575 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
576 		   __func__, config.flags, config.tx_type, config.rx_filter);
577 
578 	/* reserved for future extensions */
579 	if (config.flags)
580 		return -EINVAL;
581 
582 	if (config.tx_type != HWTSTAMP_TX_OFF &&
583 	    config.tx_type != HWTSTAMP_TX_ON)
584 		return -ERANGE;
585 
586 	if (priv->adv_ts) {
587 		switch (config.rx_filter) {
588 		case HWTSTAMP_FILTER_NONE:
589 			/* time stamp no incoming packet at all */
590 			config.rx_filter = HWTSTAMP_FILTER_NONE;
591 			break;
592 
593 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
594 			/* PTP v1, UDP, any kind of event packet */
595 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
596 			/* take time stamp for all event messages */
597 			if (xmac)
598 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
599 			else
600 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
601 
602 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
603 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
604 			break;
605 
606 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
607 			/* PTP v1, UDP, Sync packet */
608 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
609 			/* take time stamp for SYNC messages only */
610 			ts_event_en = PTP_TCR_TSEVNTENA;
611 
612 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 			break;
615 
616 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
617 			/* PTP v1, UDP, Delay_req packet */
618 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
619 			/* take time stamp for Delay_Req messages only */
620 			ts_master_en = PTP_TCR_TSMSTRENA;
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
628 			/* PTP v2, UDP, any kind of event packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
630 			ptp_v2 = PTP_TCR_TSVER2ENA;
631 			/* take time stamp for all event messages */
632 			if (xmac)
633 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634 			else
635 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636 
637 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 			break;
640 
641 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
642 			/* PTP v2, UDP, Sync packet */
643 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
644 			ptp_v2 = PTP_TCR_TSVER2ENA;
645 			/* take time stamp for SYNC messages only */
646 			ts_event_en = PTP_TCR_TSEVNTENA;
647 
648 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
653 			/* PTP v2, UDP, Delay_req packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
665 			/* PTP v2/802.AS1 any layer, any kind of event packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
667 			ptp_v2 = PTP_TCR_TSVER2ENA;
668 			/* take time stamp for all event messages */
669 			if (xmac)
670 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
671 			else
672 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
673 
674 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 			ptp_over_ethernet = PTP_TCR_TSIPENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
680 			/* PTP v2/802.AS1, any layer, Sync packet */
681 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
682 			ptp_v2 = PTP_TCR_TSVER2ENA;
683 			/* take time stamp for SYNC messages only */
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			ptp_over_ethernet = PTP_TCR_TSIPENA;
689 			break;
690 
691 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
692 			/* PTP v2/802.AS1, any layer, Delay_req packet */
693 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
694 			ptp_v2 = PTP_TCR_TSVER2ENA;
695 			/* take time stamp for Delay_Req messages only */
696 			ts_master_en = PTP_TCR_TSMSTRENA;
697 			ts_event_en = PTP_TCR_TSEVNTENA;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			ptp_over_ethernet = PTP_TCR_TSIPENA;
702 			break;
703 
704 		case HWTSTAMP_FILTER_NTP_ALL:
705 		case HWTSTAMP_FILTER_ALL:
706 			/* time stamp any incoming packet */
707 			config.rx_filter = HWTSTAMP_FILTER_ALL;
708 			tstamp_all = PTP_TCR_TSENALL;
709 			break;
710 
711 		default:
712 			return -ERANGE;
713 		}
714 	} else {
715 		switch (config.rx_filter) {
716 		case HWTSTAMP_FILTER_NONE:
717 			config.rx_filter = HWTSTAMP_FILTER_NONE;
718 			break;
719 		default:
720 			/* PTP v1, UDP, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
722 			break;
723 		}
724 	}
725 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
726 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
727 
728 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
729 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
730 	else {
731 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
732 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
733 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
734 			 ts_master_en | snap_type_sel);
735 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
736 
737 		/* program Sub Second Increment reg */
738 		stmmac_config_sub_second_increment(priv,
739 				priv->ptpaddr, priv->plat->clk_ptp_rate,
740 				xmac, &sec_inc);
741 		temp = div_u64(1000000000ULL, sec_inc);
742 
743 		/* Store sub second increment and flags for later use */
744 		priv->sub_second_inc = sec_inc;
745 		priv->systime_flags = value;
746 
747 		/* calculate default added value:
748 		 * formula is :
749 		 * addend = (2^32)/freq_div_ratio;
750 		 * where, freq_div_ratio = 1e9ns/sec_inc
751 		 */
752 		temp = (u64)(temp << 32);
753 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
754 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
755 
756 		/* initialize system time */
757 		ktime_get_real_ts64(&now);
758 
759 		/* lower 32 bits of tv_sec are safe until y2106 */
760 		stmmac_init_systime(priv, priv->ptpaddr,
761 				(u32)now.tv_sec, now.tv_nsec);
762 	}
763 
764 	return copy_to_user(ifr->ifr_data, &config,
765 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
766 }
767 
768 /**
769  * stmmac_init_ptp - init PTP
770  * @priv: driver private structure
771  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
772  * This is done by looking at the HW cap. register.
773  * This function also registers the ptp driver.
774  */
775 static int stmmac_init_ptp(struct stmmac_priv *priv)
776 {
777 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
778 
779 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
780 		return -EOPNOTSUPP;
781 
782 	priv->adv_ts = 0;
783 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
784 	if (xmac && priv->dma_cap.atime_stamp)
785 		priv->adv_ts = 1;
786 	/* Dwmac 3.x core with extend_desc can support adv_ts */
787 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
788 		priv->adv_ts = 1;
789 
790 	if (priv->dma_cap.time_stamp)
791 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
792 
793 	if (priv->adv_ts)
794 		netdev_info(priv->dev,
795 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
796 
797 	priv->hwts_tx_en = 0;
798 	priv->hwts_rx_en = 0;
799 
800 	stmmac_ptp_register(priv);
801 
802 	return 0;
803 }
804 
805 static void stmmac_release_ptp(struct stmmac_priv *priv)
806 {
807 	if (priv->plat->clk_ptp_ref)
808 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
809 	stmmac_ptp_unregister(priv);
810 }
811 
812 /**
813  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
814  *  @priv: driver private structure
815  *  Description: It is used for configuring the flow control in all queues
816  */
817 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
818 {
819 	u32 tx_cnt = priv->plat->tx_queues_to_use;
820 
821 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
822 			priv->pause, tx_cnt);
823 }
824 
825 /**
826  * stmmac_adjust_link - adjusts the link parameters
827  * @dev: net device structure
828  * Description: this is the helper called by the physical abstraction layer
829  * drivers to communicate the phy link status. According the speed and duplex
830  * this driver can invoke registered glue-logic as well.
831  * It also invoke the eee initialization because it could happen when switch
832  * on different networks (that are eee capable).
833  */
834 static void stmmac_adjust_link(struct net_device *dev)
835 {
836 	struct stmmac_priv *priv = netdev_priv(dev);
837 	struct phy_device *phydev = dev->phydev;
838 	bool new_state = false;
839 
840 	if (!phydev)
841 		return;
842 
843 	mutex_lock(&priv->lock);
844 
845 	if (phydev->link) {
846 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
847 
848 		/* Now we make sure that we can be in full duplex mode.
849 		 * If not, we operate in half-duplex mode. */
850 		if (phydev->duplex != priv->oldduplex) {
851 			new_state = true;
852 			if (!phydev->duplex)
853 				ctrl &= ~priv->hw->link.duplex;
854 			else
855 				ctrl |= priv->hw->link.duplex;
856 			priv->oldduplex = phydev->duplex;
857 		}
858 		/* Flow Control operation */
859 		if (phydev->pause)
860 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
861 
862 		if (phydev->speed != priv->speed) {
863 			new_state = true;
864 			ctrl &= ~priv->hw->link.speed_mask;
865 			switch (phydev->speed) {
866 			case SPEED_1000:
867 				ctrl |= priv->hw->link.speed1000;
868 				break;
869 			case SPEED_100:
870 				ctrl |= priv->hw->link.speed100;
871 				break;
872 			case SPEED_10:
873 				ctrl |= priv->hw->link.speed10;
874 				break;
875 			default:
876 				netif_warn(priv, link, priv->dev,
877 					   "broken speed: %d\n", phydev->speed);
878 				phydev->speed = SPEED_UNKNOWN;
879 				break;
880 			}
881 			if (phydev->speed != SPEED_UNKNOWN)
882 				stmmac_hw_fix_mac_speed(priv);
883 			priv->speed = phydev->speed;
884 		}
885 
886 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
887 
888 		if (!priv->oldlink) {
889 			new_state = true;
890 			priv->oldlink = true;
891 		}
892 	} else if (priv->oldlink) {
893 		new_state = true;
894 		priv->oldlink = false;
895 		priv->speed = SPEED_UNKNOWN;
896 		priv->oldduplex = DUPLEX_UNKNOWN;
897 	}
898 
899 	if (new_state && netif_msg_link(priv))
900 		phy_print_status(phydev);
901 
902 	mutex_unlock(&priv->lock);
903 
904 	if (phydev->is_pseudo_fixed_link)
905 		/* Stop PHY layer to call the hook to adjust the link in case
906 		 * of a switch is attached to the stmmac driver.
907 		 */
908 		phydev->irq = PHY_IGNORE_INTERRUPT;
909 	else
910 		/* At this stage, init the EEE if supported.
911 		 * Never called in case of fixed_link.
912 		 */
913 		priv->eee_enabled = stmmac_eee_init(priv);
914 }
915 
916 /**
917  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
918  * @priv: driver private structure
919  * Description: this is to verify if the HW supports the PCS.
920  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
921  * configured for the TBI, RTBI, or SGMII PHY interface.
922  */
923 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
924 {
925 	int interface = priv->plat->interface;
926 
927 	if (priv->dma_cap.pcs) {
928 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
929 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
930 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
931 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
932 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
933 			priv->hw->pcs = STMMAC_PCS_RGMII;
934 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
935 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
936 			priv->hw->pcs = STMMAC_PCS_SGMII;
937 		}
938 	}
939 }
940 
941 /**
942  * stmmac_init_phy - PHY initialization
943  * @dev: net device structure
944  * Description: it initializes the driver's PHY state, and attaches the PHY
945  * to the mac driver.
946  *  Return value:
947  *  0 on success
948  */
949 static int stmmac_init_phy(struct net_device *dev)
950 {
951 	struct stmmac_priv *priv = netdev_priv(dev);
952 	u32 tx_cnt = priv->plat->tx_queues_to_use;
953 	struct phy_device *phydev;
954 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
955 	char bus_id[MII_BUS_ID_SIZE];
956 	int interface = priv->plat->interface;
957 	int max_speed = priv->plat->max_speed;
958 	priv->oldlink = false;
959 	priv->speed = SPEED_UNKNOWN;
960 	priv->oldduplex = DUPLEX_UNKNOWN;
961 
962 	if (priv->plat->phy_node) {
963 		phydev = of_phy_connect(dev, priv->plat->phy_node,
964 					&stmmac_adjust_link, 0, interface);
965 	} else {
966 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
967 			 priv->plat->bus_id);
968 
969 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
970 			 priv->plat->phy_addr);
971 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
972 			   phy_id_fmt);
973 
974 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
975 				     interface);
976 	}
977 
978 	if (IS_ERR_OR_NULL(phydev)) {
979 		netdev_err(priv->dev, "Could not attach to PHY\n");
980 		if (!phydev)
981 			return -ENODEV;
982 
983 		return PTR_ERR(phydev);
984 	}
985 
986 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
987 	if ((interface == PHY_INTERFACE_MODE_MII) ||
988 	    (interface == PHY_INTERFACE_MODE_RMII) ||
989 		(max_speed < 1000 && max_speed > 0))
990 		phy_set_max_speed(phydev, SPEED_100);
991 
992 	/*
993 	 * Half-duplex mode not supported with multiqueue
994 	 * half-duplex can only works with single queue
995 	 */
996 	if (tx_cnt > 1) {
997 		phy_remove_link_mode(phydev,
998 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
999 		phy_remove_link_mode(phydev,
1000 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1001 		phy_remove_link_mode(phydev,
1002 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1003 	}
1004 
1005 	/*
1006 	 * Broken HW is sometimes missing the pull-up resistor on the
1007 	 * MDIO line, which results in reads to non-existent devices returning
1008 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1009 	 * device as well.
1010 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1011 	 */
1012 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1013 		phy_disconnect(phydev);
1014 		return -ENODEV;
1015 	}
1016 
1017 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1018 	 * subsequent PHY polling, make sure we force a link transition if
1019 	 * we have a UP/DOWN/UP transition
1020 	 */
1021 	if (phydev->is_pseudo_fixed_link)
1022 		phydev->irq = PHY_POLL;
1023 
1024 	phy_attached_info(phydev);
1025 	return 0;
1026 }
1027 
1028 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1029 {
1030 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1031 	void *head_rx;
1032 	u32 queue;
1033 
1034 	/* Display RX rings */
1035 	for (queue = 0; queue < rx_cnt; queue++) {
1036 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1037 
1038 		pr_info("\tRX Queue %u rings\n", queue);
1039 
1040 		if (priv->extend_desc)
1041 			head_rx = (void *)rx_q->dma_erx;
1042 		else
1043 			head_rx = (void *)rx_q->dma_rx;
1044 
1045 		/* Display RX ring */
1046 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1047 	}
1048 }
1049 
1050 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1051 {
1052 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1053 	void *head_tx;
1054 	u32 queue;
1055 
1056 	/* Display TX rings */
1057 	for (queue = 0; queue < tx_cnt; queue++) {
1058 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1059 
1060 		pr_info("\tTX Queue %d rings\n", queue);
1061 
1062 		if (priv->extend_desc)
1063 			head_tx = (void *)tx_q->dma_etx;
1064 		else
1065 			head_tx = (void *)tx_q->dma_tx;
1066 
1067 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1068 	}
1069 }
1070 
1071 static void stmmac_display_rings(struct stmmac_priv *priv)
1072 {
1073 	/* Display RX ring */
1074 	stmmac_display_rx_rings(priv);
1075 
1076 	/* Display TX ring */
1077 	stmmac_display_tx_rings(priv);
1078 }
1079 
1080 static int stmmac_set_bfsize(int mtu, int bufsize)
1081 {
1082 	int ret = bufsize;
1083 
1084 	if (mtu >= BUF_SIZE_4KiB)
1085 		ret = BUF_SIZE_8KiB;
1086 	else if (mtu >= BUF_SIZE_2KiB)
1087 		ret = BUF_SIZE_4KiB;
1088 	else if (mtu > DEFAULT_BUFSIZE)
1089 		ret = BUF_SIZE_2KiB;
1090 	else
1091 		ret = DEFAULT_BUFSIZE;
1092 
1093 	return ret;
1094 }
1095 
1096 /**
1097  * stmmac_clear_rx_descriptors - clear RX descriptors
1098  * @priv: driver private structure
1099  * @queue: RX queue index
1100  * Description: this function is called to clear the RX descriptors
1101  * in case of both basic and extended descriptors are used.
1102  */
1103 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1104 {
1105 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1106 	int i;
1107 
1108 	/* Clear the RX descriptors */
1109 	for (i = 0; i < DMA_RX_SIZE; i++)
1110 		if (priv->extend_desc)
1111 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1112 					priv->use_riwt, priv->mode,
1113 					(i == DMA_RX_SIZE - 1));
1114 		else
1115 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1116 					priv->use_riwt, priv->mode,
1117 					(i == DMA_RX_SIZE - 1));
1118 }
1119 
1120 /**
1121  * stmmac_clear_tx_descriptors - clear tx descriptors
1122  * @priv: driver private structure
1123  * @queue: TX queue index.
1124  * Description: this function is called to clear the TX descriptors
1125  * in case of both basic and extended descriptors are used.
1126  */
1127 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1128 {
1129 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1130 	int i;
1131 
1132 	/* Clear the TX descriptors */
1133 	for (i = 0; i < DMA_TX_SIZE; i++)
1134 		if (priv->extend_desc)
1135 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1136 					priv->mode, (i == DMA_TX_SIZE - 1));
1137 		else
1138 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1139 					priv->mode, (i == DMA_TX_SIZE - 1));
1140 }
1141 
1142 /**
1143  * stmmac_clear_descriptors - clear descriptors
1144  * @priv: driver private structure
1145  * Description: this function is called to clear the TX and RX descriptors
1146  * in case of both basic and extended descriptors are used.
1147  */
1148 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1149 {
1150 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1151 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1152 	u32 queue;
1153 
1154 	/* Clear the RX descriptors */
1155 	for (queue = 0; queue < rx_queue_cnt; queue++)
1156 		stmmac_clear_rx_descriptors(priv, queue);
1157 
1158 	/* Clear the TX descriptors */
1159 	for (queue = 0; queue < tx_queue_cnt; queue++)
1160 		stmmac_clear_tx_descriptors(priv, queue);
1161 }
1162 
1163 /**
1164  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1165  * @priv: driver private structure
1166  * @p: descriptor pointer
1167  * @i: descriptor index
1168  * @flags: gfp flag
1169  * @queue: RX queue index
1170  * Description: this function is called to allocate a receive buffer, perform
1171  * the DMA mapping and init the descriptor.
1172  */
1173 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1174 				  int i, gfp_t flags, u32 queue)
1175 {
1176 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1177 	struct sk_buff *skb;
1178 
1179 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1180 	if (!skb) {
1181 		netdev_err(priv->dev,
1182 			   "%s: Rx init fails; skb is NULL\n", __func__);
1183 		return -ENOMEM;
1184 	}
1185 	rx_q->rx_skbuff[i] = skb;
1186 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1187 						priv->dma_buf_sz,
1188 						DMA_FROM_DEVICE);
1189 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1190 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1191 		dev_kfree_skb_any(skb);
1192 		return -EINVAL;
1193 	}
1194 
1195 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1196 
1197 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1198 		stmmac_init_desc3(priv, p);
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * stmmac_free_rx_buffer - free RX dma buffers
1205  * @priv: private structure
1206  * @queue: RX queue index
1207  * @i: buffer index.
1208  */
1209 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1210 {
1211 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1212 
1213 	if (rx_q->rx_skbuff[i]) {
1214 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1215 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1216 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1217 	}
1218 	rx_q->rx_skbuff[i] = NULL;
1219 }
1220 
1221 /**
1222  * stmmac_free_tx_buffer - free RX dma buffers
1223  * @priv: private structure
1224  * @queue: RX queue index
1225  * @i: buffer index.
1226  */
1227 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1228 {
1229 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1230 
1231 	if (tx_q->tx_skbuff_dma[i].buf) {
1232 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1233 			dma_unmap_page(priv->device,
1234 				       tx_q->tx_skbuff_dma[i].buf,
1235 				       tx_q->tx_skbuff_dma[i].len,
1236 				       DMA_TO_DEVICE);
1237 		else
1238 			dma_unmap_single(priv->device,
1239 					 tx_q->tx_skbuff_dma[i].buf,
1240 					 tx_q->tx_skbuff_dma[i].len,
1241 					 DMA_TO_DEVICE);
1242 	}
1243 
1244 	if (tx_q->tx_skbuff[i]) {
1245 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1246 		tx_q->tx_skbuff[i] = NULL;
1247 		tx_q->tx_skbuff_dma[i].buf = 0;
1248 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1249 	}
1250 }
1251 
1252 /**
1253  * init_dma_rx_desc_rings - init the RX descriptor rings
1254  * @dev: net device structure
1255  * @flags: gfp flag.
1256  * Description: this function initializes the DMA RX descriptors
1257  * and allocates the socket buffers. It supports the chained and ring
1258  * modes.
1259  */
1260 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1261 {
1262 	struct stmmac_priv *priv = netdev_priv(dev);
1263 	u32 rx_count = priv->plat->rx_queues_to_use;
1264 	int ret = -ENOMEM;
1265 	int bfsize = 0;
1266 	int queue;
1267 	int i;
1268 
1269 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1270 	if (bfsize < 0)
1271 		bfsize = 0;
1272 
1273 	if (bfsize < BUF_SIZE_16KiB)
1274 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1275 
1276 	priv->dma_buf_sz = bfsize;
1277 
1278 	/* RX INITIALIZATION */
1279 	netif_dbg(priv, probe, priv->dev,
1280 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1281 
1282 	for (queue = 0; queue < rx_count; queue++) {
1283 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1284 
1285 		netif_dbg(priv, probe, priv->dev,
1286 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1287 			  (u32)rx_q->dma_rx_phy);
1288 
1289 		for (i = 0; i < DMA_RX_SIZE; i++) {
1290 			struct dma_desc *p;
1291 
1292 			if (priv->extend_desc)
1293 				p = &((rx_q->dma_erx + i)->basic);
1294 			else
1295 				p = rx_q->dma_rx + i;
1296 
1297 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1298 						     queue);
1299 			if (ret)
1300 				goto err_init_rx_buffers;
1301 
1302 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1303 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1304 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1305 		}
1306 
1307 		rx_q->cur_rx = 0;
1308 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1309 
1310 		stmmac_clear_rx_descriptors(priv, queue);
1311 
1312 		/* Setup the chained descriptor addresses */
1313 		if (priv->mode == STMMAC_CHAIN_MODE) {
1314 			if (priv->extend_desc)
1315 				stmmac_mode_init(priv, rx_q->dma_erx,
1316 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1317 			else
1318 				stmmac_mode_init(priv, rx_q->dma_rx,
1319 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1320 		}
1321 	}
1322 
1323 	buf_sz = bfsize;
1324 
1325 	return 0;
1326 
1327 err_init_rx_buffers:
1328 	while (queue >= 0) {
1329 		while (--i >= 0)
1330 			stmmac_free_rx_buffer(priv, queue, i);
1331 
1332 		if (queue == 0)
1333 			break;
1334 
1335 		i = DMA_RX_SIZE;
1336 		queue--;
1337 	}
1338 
1339 	return ret;
1340 }
1341 
1342 /**
1343  * init_dma_tx_desc_rings - init the TX descriptor rings
1344  * @dev: net device structure.
1345  * Description: this function initializes the DMA TX descriptors
1346  * and allocates the socket buffers. It supports the chained and ring
1347  * modes.
1348  */
1349 static int init_dma_tx_desc_rings(struct net_device *dev)
1350 {
1351 	struct stmmac_priv *priv = netdev_priv(dev);
1352 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1353 	u32 queue;
1354 	int i;
1355 
1356 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1357 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1358 
1359 		netif_dbg(priv, probe, priv->dev,
1360 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1361 			 (u32)tx_q->dma_tx_phy);
1362 
1363 		/* Setup the chained descriptor addresses */
1364 		if (priv->mode == STMMAC_CHAIN_MODE) {
1365 			if (priv->extend_desc)
1366 				stmmac_mode_init(priv, tx_q->dma_etx,
1367 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1368 			else
1369 				stmmac_mode_init(priv, tx_q->dma_tx,
1370 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1371 		}
1372 
1373 		for (i = 0; i < DMA_TX_SIZE; i++) {
1374 			struct dma_desc *p;
1375 			if (priv->extend_desc)
1376 				p = &((tx_q->dma_etx + i)->basic);
1377 			else
1378 				p = tx_q->dma_tx + i;
1379 
1380 			stmmac_clear_desc(priv, p);
1381 
1382 			tx_q->tx_skbuff_dma[i].buf = 0;
1383 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1384 			tx_q->tx_skbuff_dma[i].len = 0;
1385 			tx_q->tx_skbuff_dma[i].last_segment = false;
1386 			tx_q->tx_skbuff[i] = NULL;
1387 		}
1388 
1389 		tx_q->dirty_tx = 0;
1390 		tx_q->cur_tx = 0;
1391 		tx_q->mss = 0;
1392 
1393 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 /**
1400  * init_dma_desc_rings - init the RX/TX descriptor rings
1401  * @dev: net device structure
1402  * @flags: gfp flag.
1403  * Description: this function initializes the DMA RX/TX descriptors
1404  * and allocates the socket buffers. It supports the chained and ring
1405  * modes.
1406  */
1407 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1408 {
1409 	struct stmmac_priv *priv = netdev_priv(dev);
1410 	int ret;
1411 
1412 	ret = init_dma_rx_desc_rings(dev, flags);
1413 	if (ret)
1414 		return ret;
1415 
1416 	ret = init_dma_tx_desc_rings(dev);
1417 
1418 	stmmac_clear_descriptors(priv);
1419 
1420 	if (netif_msg_hw(priv))
1421 		stmmac_display_rings(priv);
1422 
1423 	return ret;
1424 }
1425 
1426 /**
1427  * dma_free_rx_skbufs - free RX dma buffers
1428  * @priv: private structure
1429  * @queue: RX queue index
1430  */
1431 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1432 {
1433 	int i;
1434 
1435 	for (i = 0; i < DMA_RX_SIZE; i++)
1436 		stmmac_free_rx_buffer(priv, queue, i);
1437 }
1438 
1439 /**
1440  * dma_free_tx_skbufs - free TX dma buffers
1441  * @priv: private structure
1442  * @queue: TX queue index
1443  */
1444 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1445 {
1446 	int i;
1447 
1448 	for (i = 0; i < DMA_TX_SIZE; i++)
1449 		stmmac_free_tx_buffer(priv, queue, i);
1450 }
1451 
1452 /**
1453  * free_dma_rx_desc_resources - free RX dma desc resources
1454  * @priv: private structure
1455  */
1456 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1457 {
1458 	u32 rx_count = priv->plat->rx_queues_to_use;
1459 	u32 queue;
1460 
1461 	/* Free RX queue resources */
1462 	for (queue = 0; queue < rx_count; queue++) {
1463 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1464 
1465 		/* Release the DMA RX socket buffers */
1466 		dma_free_rx_skbufs(priv, queue);
1467 
1468 		/* Free DMA regions of consistent memory previously allocated */
1469 		if (!priv->extend_desc)
1470 			dma_free_coherent(priv->device,
1471 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1472 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1473 		else
1474 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1475 					  sizeof(struct dma_extended_desc),
1476 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1477 
1478 		kfree(rx_q->rx_skbuff_dma);
1479 		kfree(rx_q->rx_skbuff);
1480 	}
1481 }
1482 
1483 /**
1484  * free_dma_tx_desc_resources - free TX dma desc resources
1485  * @priv: private structure
1486  */
1487 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1488 {
1489 	u32 tx_count = priv->plat->tx_queues_to_use;
1490 	u32 queue;
1491 
1492 	/* Free TX queue resources */
1493 	for (queue = 0; queue < tx_count; queue++) {
1494 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1495 
1496 		/* Release the DMA TX socket buffers */
1497 		dma_free_tx_skbufs(priv, queue);
1498 
1499 		/* Free DMA regions of consistent memory previously allocated */
1500 		if (!priv->extend_desc)
1501 			dma_free_coherent(priv->device,
1502 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1503 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1504 		else
1505 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1506 					  sizeof(struct dma_extended_desc),
1507 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1508 
1509 		kfree(tx_q->tx_skbuff_dma);
1510 		kfree(tx_q->tx_skbuff);
1511 	}
1512 }
1513 
1514 /**
1515  * alloc_dma_rx_desc_resources - alloc RX resources.
1516  * @priv: private structure
1517  * Description: according to which descriptor can be used (extend or basic)
1518  * this function allocates the resources for TX and RX paths. In case of
1519  * reception, for example, it pre-allocated the RX socket buffer in order to
1520  * allow zero-copy mechanism.
1521  */
1522 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1523 {
1524 	u32 rx_count = priv->plat->rx_queues_to_use;
1525 	int ret = -ENOMEM;
1526 	u32 queue;
1527 
1528 	/* RX queues buffers and DMA */
1529 	for (queue = 0; queue < rx_count; queue++) {
1530 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1531 
1532 		rx_q->queue_index = queue;
1533 		rx_q->priv_data = priv;
1534 
1535 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1536 						    sizeof(dma_addr_t),
1537 						    GFP_KERNEL);
1538 		if (!rx_q->rx_skbuff_dma)
1539 			goto err_dma;
1540 
1541 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1542 						sizeof(struct sk_buff *),
1543 						GFP_KERNEL);
1544 		if (!rx_q->rx_skbuff)
1545 			goto err_dma;
1546 
1547 		if (priv->extend_desc) {
1548 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1549 							    DMA_RX_SIZE *
1550 							    sizeof(struct
1551 							    dma_extended_desc),
1552 							    &rx_q->dma_rx_phy,
1553 							    GFP_KERNEL);
1554 			if (!rx_q->dma_erx)
1555 				goto err_dma;
1556 
1557 		} else {
1558 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1559 							   DMA_RX_SIZE *
1560 							   sizeof(struct
1561 							   dma_desc),
1562 							   &rx_q->dma_rx_phy,
1563 							   GFP_KERNEL);
1564 			if (!rx_q->dma_rx)
1565 				goto err_dma;
1566 		}
1567 	}
1568 
1569 	return 0;
1570 
1571 err_dma:
1572 	free_dma_rx_desc_resources(priv);
1573 
1574 	return ret;
1575 }
1576 
1577 /**
1578  * alloc_dma_tx_desc_resources - alloc TX resources.
1579  * @priv: private structure
1580  * Description: according to which descriptor can be used (extend or basic)
1581  * this function allocates the resources for TX and RX paths. In case of
1582  * reception, for example, it pre-allocated the RX socket buffer in order to
1583  * allow zero-copy mechanism.
1584  */
1585 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1586 {
1587 	u32 tx_count = priv->plat->tx_queues_to_use;
1588 	int ret = -ENOMEM;
1589 	u32 queue;
1590 
1591 	/* TX queues buffers and DMA */
1592 	for (queue = 0; queue < tx_count; queue++) {
1593 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1594 
1595 		tx_q->queue_index = queue;
1596 		tx_q->priv_data = priv;
1597 
1598 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1599 						    sizeof(*tx_q->tx_skbuff_dma),
1600 						    GFP_KERNEL);
1601 		if (!tx_q->tx_skbuff_dma)
1602 			goto err_dma;
1603 
1604 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1605 						sizeof(struct sk_buff *),
1606 						GFP_KERNEL);
1607 		if (!tx_q->tx_skbuff)
1608 			goto err_dma;
1609 
1610 		if (priv->extend_desc) {
1611 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1612 							    DMA_TX_SIZE *
1613 							    sizeof(struct
1614 							    dma_extended_desc),
1615 							    &tx_q->dma_tx_phy,
1616 							    GFP_KERNEL);
1617 			if (!tx_q->dma_etx)
1618 				goto err_dma;
1619 		} else {
1620 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1621 							   DMA_TX_SIZE *
1622 							   sizeof(struct
1623 								  dma_desc),
1624 							   &tx_q->dma_tx_phy,
1625 							   GFP_KERNEL);
1626 			if (!tx_q->dma_tx)
1627 				goto err_dma;
1628 		}
1629 	}
1630 
1631 	return 0;
1632 
1633 err_dma:
1634 	free_dma_tx_desc_resources(priv);
1635 
1636 	return ret;
1637 }
1638 
1639 /**
1640  * alloc_dma_desc_resources - alloc TX/RX resources.
1641  * @priv: private structure
1642  * Description: according to which descriptor can be used (extend or basic)
1643  * this function allocates the resources for TX and RX paths. In case of
1644  * reception, for example, it pre-allocated the RX socket buffer in order to
1645  * allow zero-copy mechanism.
1646  */
1647 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1648 {
1649 	/* RX Allocation */
1650 	int ret = alloc_dma_rx_desc_resources(priv);
1651 
1652 	if (ret)
1653 		return ret;
1654 
1655 	ret = alloc_dma_tx_desc_resources(priv);
1656 
1657 	return ret;
1658 }
1659 
1660 /**
1661  * free_dma_desc_resources - free dma desc resources
1662  * @priv: private structure
1663  */
1664 static void free_dma_desc_resources(struct stmmac_priv *priv)
1665 {
1666 	/* Release the DMA RX socket buffers */
1667 	free_dma_rx_desc_resources(priv);
1668 
1669 	/* Release the DMA TX socket buffers */
1670 	free_dma_tx_desc_resources(priv);
1671 }
1672 
1673 /**
1674  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1675  *  @priv: driver private structure
1676  *  Description: It is used for enabling the rx queues in the MAC
1677  */
1678 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1679 {
1680 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1681 	int queue;
1682 	u8 mode;
1683 
1684 	for (queue = 0; queue < rx_queues_count; queue++) {
1685 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1686 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1687 	}
1688 }
1689 
1690 /**
1691  * stmmac_start_rx_dma - start RX DMA channel
1692  * @priv: driver private structure
1693  * @chan: RX channel index
1694  * Description:
1695  * This starts a RX DMA channel
1696  */
1697 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1698 {
1699 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1700 	stmmac_start_rx(priv, priv->ioaddr, chan);
1701 }
1702 
1703 /**
1704  * stmmac_start_tx_dma - start TX DMA channel
1705  * @priv: driver private structure
1706  * @chan: TX channel index
1707  * Description:
1708  * This starts a TX DMA channel
1709  */
1710 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1711 {
1712 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1713 	stmmac_start_tx(priv, priv->ioaddr, chan);
1714 }
1715 
1716 /**
1717  * stmmac_stop_rx_dma - stop RX DMA channel
1718  * @priv: driver private structure
1719  * @chan: RX channel index
1720  * Description:
1721  * This stops a RX DMA channel
1722  */
1723 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1724 {
1725 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1726 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1727 }
1728 
1729 /**
1730  * stmmac_stop_tx_dma - stop TX DMA channel
1731  * @priv: driver private structure
1732  * @chan: TX channel index
1733  * Description:
1734  * This stops a TX DMA channel
1735  */
1736 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1737 {
1738 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1739 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1740 }
1741 
1742 /**
1743  * stmmac_start_all_dma - start all RX and TX DMA channels
1744  * @priv: driver private structure
1745  * Description:
1746  * This starts all the RX and TX DMA channels
1747  */
1748 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1749 {
1750 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752 	u32 chan = 0;
1753 
1754 	for (chan = 0; chan < rx_channels_count; chan++)
1755 		stmmac_start_rx_dma(priv, chan);
1756 
1757 	for (chan = 0; chan < tx_channels_count; chan++)
1758 		stmmac_start_tx_dma(priv, chan);
1759 }
1760 
1761 /**
1762  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1763  * @priv: driver private structure
1764  * Description:
1765  * This stops the RX and TX DMA channels
1766  */
1767 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1768 {
1769 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1770 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1771 	u32 chan = 0;
1772 
1773 	for (chan = 0; chan < rx_channels_count; chan++)
1774 		stmmac_stop_rx_dma(priv, chan);
1775 
1776 	for (chan = 0; chan < tx_channels_count; chan++)
1777 		stmmac_stop_tx_dma(priv, chan);
1778 }
1779 
1780 /**
1781  *  stmmac_dma_operation_mode - HW DMA operation mode
1782  *  @priv: driver private structure
1783  *  Description: it is used for configuring the DMA operation mode register in
1784  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1785  */
1786 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1787 {
1788 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1789 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1790 	int rxfifosz = priv->plat->rx_fifo_size;
1791 	int txfifosz = priv->plat->tx_fifo_size;
1792 	u32 txmode = 0;
1793 	u32 rxmode = 0;
1794 	u32 chan = 0;
1795 	u8 qmode = 0;
1796 
1797 	if (rxfifosz == 0)
1798 		rxfifosz = priv->dma_cap.rx_fifo_size;
1799 	if (txfifosz == 0)
1800 		txfifosz = priv->dma_cap.tx_fifo_size;
1801 
1802 	/* Adjust for real per queue fifo size */
1803 	rxfifosz /= rx_channels_count;
1804 	txfifosz /= tx_channels_count;
1805 
1806 	if (priv->plat->force_thresh_dma_mode) {
1807 		txmode = tc;
1808 		rxmode = tc;
1809 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1810 		/*
1811 		 * In case of GMAC, SF mode can be enabled
1812 		 * to perform the TX COE in HW. This depends on:
1813 		 * 1) TX COE if actually supported
1814 		 * 2) There is no bugged Jumbo frame support
1815 		 *    that needs to not insert csum in the TDES.
1816 		 */
1817 		txmode = SF_DMA_MODE;
1818 		rxmode = SF_DMA_MODE;
1819 		priv->xstats.threshold = SF_DMA_MODE;
1820 	} else {
1821 		txmode = tc;
1822 		rxmode = SF_DMA_MODE;
1823 	}
1824 
1825 	/* configure all channels */
1826 	for (chan = 0; chan < rx_channels_count; chan++) {
1827 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1828 
1829 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1830 				rxfifosz, qmode);
1831 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1832 				chan);
1833 	}
1834 
1835 	for (chan = 0; chan < tx_channels_count; chan++) {
1836 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1837 
1838 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1839 				txfifosz, qmode);
1840 	}
1841 }
1842 
1843 /**
1844  * stmmac_tx_clean - to manage the transmission completion
1845  * @priv: driver private structure
1846  * @queue: TX queue index
1847  * Description: it reclaims the transmit resources after transmission completes.
1848  */
1849 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1850 {
1851 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1852 	unsigned int bytes_compl = 0, pkts_compl = 0;
1853 	unsigned int entry;
1854 
1855 	netif_tx_lock(priv->dev);
1856 
1857 	priv->xstats.tx_clean++;
1858 
1859 	entry = tx_q->dirty_tx;
1860 	while (entry != tx_q->cur_tx) {
1861 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1862 		struct dma_desc *p;
1863 		int status;
1864 
1865 		if (priv->extend_desc)
1866 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1867 		else
1868 			p = tx_q->dma_tx + entry;
1869 
1870 		status = stmmac_tx_status(priv, &priv->dev->stats,
1871 				&priv->xstats, p, priv->ioaddr);
1872 		/* Check if the descriptor is owned by the DMA */
1873 		if (unlikely(status & tx_dma_own))
1874 			break;
1875 
1876 		/* Make sure descriptor fields are read after reading
1877 		 * the own bit.
1878 		 */
1879 		dma_rmb();
1880 
1881 		/* Just consider the last segment and ...*/
1882 		if (likely(!(status & tx_not_ls))) {
1883 			/* ... verify the status error condition */
1884 			if (unlikely(status & tx_err)) {
1885 				priv->dev->stats.tx_errors++;
1886 			} else {
1887 				priv->dev->stats.tx_packets++;
1888 				priv->xstats.tx_pkt_n++;
1889 			}
1890 			stmmac_get_tx_hwtstamp(priv, p, skb);
1891 		}
1892 
1893 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1894 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1895 				dma_unmap_page(priv->device,
1896 					       tx_q->tx_skbuff_dma[entry].buf,
1897 					       tx_q->tx_skbuff_dma[entry].len,
1898 					       DMA_TO_DEVICE);
1899 			else
1900 				dma_unmap_single(priv->device,
1901 						 tx_q->tx_skbuff_dma[entry].buf,
1902 						 tx_q->tx_skbuff_dma[entry].len,
1903 						 DMA_TO_DEVICE);
1904 			tx_q->tx_skbuff_dma[entry].buf = 0;
1905 			tx_q->tx_skbuff_dma[entry].len = 0;
1906 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1907 		}
1908 
1909 		stmmac_clean_desc3(priv, tx_q, p);
1910 
1911 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1912 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1913 
1914 		if (likely(skb != NULL)) {
1915 			pkts_compl++;
1916 			bytes_compl += skb->len;
1917 			dev_consume_skb_any(skb);
1918 			tx_q->tx_skbuff[entry] = NULL;
1919 		}
1920 
1921 		stmmac_release_tx_desc(priv, p, priv->mode);
1922 
1923 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1924 	}
1925 	tx_q->dirty_tx = entry;
1926 
1927 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1928 				  pkts_compl, bytes_compl);
1929 
1930 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1931 								queue))) &&
1932 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1933 
1934 		netif_dbg(priv, tx_done, priv->dev,
1935 			  "%s: restart transmit\n", __func__);
1936 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1937 	}
1938 
1939 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1940 		stmmac_enable_eee_mode(priv);
1941 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1942 	}
1943 	netif_tx_unlock(priv->dev);
1944 }
1945 
1946 /**
1947  * stmmac_tx_err - to manage the tx error
1948  * @priv: driver private structure
1949  * @chan: channel index
1950  * Description: it cleans the descriptors and restarts the transmission
1951  * in case of transmission errors.
1952  */
1953 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1954 {
1955 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1956 	int i;
1957 
1958 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1959 
1960 	stmmac_stop_tx_dma(priv, chan);
1961 	dma_free_tx_skbufs(priv, chan);
1962 	for (i = 0; i < DMA_TX_SIZE; i++)
1963 		if (priv->extend_desc)
1964 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1965 					priv->mode, (i == DMA_TX_SIZE - 1));
1966 		else
1967 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1968 					priv->mode, (i == DMA_TX_SIZE - 1));
1969 	tx_q->dirty_tx = 0;
1970 	tx_q->cur_tx = 0;
1971 	tx_q->mss = 0;
1972 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1973 	stmmac_start_tx_dma(priv, chan);
1974 
1975 	priv->dev->stats.tx_errors++;
1976 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1977 }
1978 
1979 /**
1980  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1981  *  @priv: driver private structure
1982  *  @txmode: TX operating mode
1983  *  @rxmode: RX operating mode
1984  *  @chan: channel index
1985  *  Description: it is used for configuring of the DMA operation mode in
1986  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1987  *  mode.
1988  */
1989 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1990 					  u32 rxmode, u32 chan)
1991 {
1992 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1993 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1994 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1995 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1996 	int rxfifosz = priv->plat->rx_fifo_size;
1997 	int txfifosz = priv->plat->tx_fifo_size;
1998 
1999 	if (rxfifosz == 0)
2000 		rxfifosz = priv->dma_cap.rx_fifo_size;
2001 	if (txfifosz == 0)
2002 		txfifosz = priv->dma_cap.tx_fifo_size;
2003 
2004 	/* Adjust for real per queue fifo size */
2005 	rxfifosz /= rx_channels_count;
2006 	txfifosz /= tx_channels_count;
2007 
2008 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2009 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2010 }
2011 
2012 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2013 {
2014 	int ret;
2015 
2016 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2017 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2018 	if (ret && (ret != -EINVAL)) {
2019 		stmmac_global_err(priv);
2020 		return true;
2021 	}
2022 
2023 	return false;
2024 }
2025 
2026 /**
2027  * stmmac_dma_interrupt - DMA ISR
2028  * @priv: driver private structure
2029  * Description: this is the DMA ISR. It is called by the main ISR.
2030  * It calls the dwmac dma routine and schedule poll method in case of some
2031  * work can be done.
2032  */
2033 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2034 {
2035 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2036 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2037 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2038 				tx_channel_count : rx_channel_count;
2039 	u32 chan;
2040 	bool poll_scheduled = false;
2041 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2042 
2043 	/* Make sure we never check beyond our status buffer. */
2044 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2045 		channels_to_check = ARRAY_SIZE(status);
2046 
2047 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2048 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2049 	 * stmmac_channel struct.
2050 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2051 	 * all tx queues rather than just a single tx queue.
2052 	 */
2053 	for (chan = 0; chan < channels_to_check; chan++)
2054 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2055 				&priv->xstats, chan);
2056 
2057 	for (chan = 0; chan < rx_channel_count; chan++) {
2058 		if (likely(status[chan] & handle_rx)) {
2059 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2060 
2061 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2062 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2063 				__napi_schedule(&rx_q->napi);
2064 				poll_scheduled = true;
2065 			}
2066 		}
2067 	}
2068 
2069 	/* If we scheduled poll, we already know that tx queues will be checked.
2070 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2071 	 * completed transmission, if so, call stmmac_poll (once).
2072 	 */
2073 	if (!poll_scheduled) {
2074 		for (chan = 0; chan < tx_channel_count; chan++) {
2075 			if (status[chan] & handle_tx) {
2076 				/* It doesn't matter what rx queue we choose
2077 				 * here. We use 0 since it always exists.
2078 				 */
2079 				struct stmmac_rx_queue *rx_q =
2080 					&priv->rx_queue[0];
2081 
2082 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2083 					stmmac_disable_dma_irq(priv,
2084 							priv->ioaddr, chan);
2085 					__napi_schedule(&rx_q->napi);
2086 				}
2087 				break;
2088 			}
2089 		}
2090 	}
2091 
2092 	for (chan = 0; chan < tx_channel_count; chan++) {
2093 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2094 			/* Try to bump up the dma threshold on this failure */
2095 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2096 			    (tc <= 256)) {
2097 				tc += 64;
2098 				if (priv->plat->force_thresh_dma_mode)
2099 					stmmac_set_dma_operation_mode(priv,
2100 								      tc,
2101 								      tc,
2102 								      chan);
2103 				else
2104 					stmmac_set_dma_operation_mode(priv,
2105 								    tc,
2106 								    SF_DMA_MODE,
2107 								    chan);
2108 				priv->xstats.threshold = tc;
2109 			}
2110 		} else if (unlikely(status[chan] == tx_hard_error)) {
2111 			stmmac_tx_err(priv, chan);
2112 		}
2113 	}
2114 }
2115 
2116 /**
2117  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2118  * @priv: driver private structure
2119  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2120  */
2121 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2122 {
2123 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2124 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2125 
2126 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2127 
2128 	if (priv->dma_cap.rmon) {
2129 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2130 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2131 	} else
2132 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2133 }
2134 
2135 /**
2136  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2137  * @priv: driver private structure
2138  * Description:
2139  *  new GMAC chip generations have a new register to indicate the
2140  *  presence of the optional feature/functions.
2141  *  This can be also used to override the value passed through the
2142  *  platform and necessary for old MAC10/100 and GMAC chips.
2143  */
2144 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2145 {
2146 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2147 }
2148 
2149 /**
2150  * stmmac_check_ether_addr - check if the MAC addr is valid
2151  * @priv: driver private structure
2152  * Description:
2153  * it is to verify if the MAC address is valid, in case of failures it
2154  * generates a random MAC address
2155  */
2156 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2157 {
2158 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2159 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2160 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2161 			eth_hw_addr_random(priv->dev);
2162 		netdev_info(priv->dev, "device MAC address %pM\n",
2163 			    priv->dev->dev_addr);
2164 	}
2165 }
2166 
2167 /**
2168  * stmmac_init_dma_engine - DMA init.
2169  * @priv: driver private structure
2170  * Description:
2171  * It inits the DMA invoking the specific MAC/GMAC callback.
2172  * Some DMA parameters can be passed from the platform;
2173  * in case of these are not passed a default is kept for the MAC or GMAC.
2174  */
2175 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2176 {
2177 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2178 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2179 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2180 	struct stmmac_rx_queue *rx_q;
2181 	struct stmmac_tx_queue *tx_q;
2182 	u32 chan = 0;
2183 	int atds = 0;
2184 	int ret = 0;
2185 
2186 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2187 		dev_err(priv->device, "Invalid DMA configuration\n");
2188 		return -EINVAL;
2189 	}
2190 
2191 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2192 		atds = 1;
2193 
2194 	ret = stmmac_reset(priv, priv->ioaddr);
2195 	if (ret) {
2196 		dev_err(priv->device, "Failed to reset the dma\n");
2197 		return ret;
2198 	}
2199 
2200 	/* DMA Configuration */
2201 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2202 
2203 	if (priv->plat->axi)
2204 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2205 
2206 	/* DMA RX Channel Configuration */
2207 	for (chan = 0; chan < rx_channels_count; chan++) {
2208 		rx_q = &priv->rx_queue[chan];
2209 
2210 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2211 				    rx_q->dma_rx_phy, chan);
2212 
2213 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2214 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2215 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2216 				       rx_q->rx_tail_addr, chan);
2217 	}
2218 
2219 	/* DMA TX Channel Configuration */
2220 	for (chan = 0; chan < tx_channels_count; chan++) {
2221 		tx_q = &priv->tx_queue[chan];
2222 
2223 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2224 				    tx_q->dma_tx_phy, chan);
2225 
2226 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2227 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2228 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2229 				       tx_q->tx_tail_addr, chan);
2230 	}
2231 
2232 	/* DMA CSR Channel configuration */
2233 	for (chan = 0; chan < dma_csr_ch; chan++)
2234 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2235 
2236 	return ret;
2237 }
2238 
2239 /**
2240  * stmmac_tx_timer - mitigation sw timer for tx.
2241  * @data: data pointer
2242  * Description:
2243  * This is the timer handler to directly invoke the stmmac_tx_clean.
2244  */
2245 static void stmmac_tx_timer(struct timer_list *t)
2246 {
2247 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2248 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2249 	u32 queue;
2250 
2251 	/* let's scan all the tx queues */
2252 	for (queue = 0; queue < tx_queues_count; queue++)
2253 		stmmac_tx_clean(priv, queue);
2254 }
2255 
2256 /**
2257  * stmmac_init_tx_coalesce - init tx mitigation options.
2258  * @priv: driver private structure
2259  * Description:
2260  * This inits the transmit coalesce parameters: i.e. timer rate,
2261  * timer handler and default threshold used for enabling the
2262  * interrupt on completion bit.
2263  */
2264 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2265 {
2266 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2267 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2268 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2269 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2270 	add_timer(&priv->txtimer);
2271 }
2272 
2273 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2274 {
2275 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2276 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2277 	u32 chan;
2278 
2279 	/* set TX ring length */
2280 	for (chan = 0; chan < tx_channels_count; chan++)
2281 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2282 				(DMA_TX_SIZE - 1), chan);
2283 
2284 	/* set RX ring length */
2285 	for (chan = 0; chan < rx_channels_count; chan++)
2286 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2287 				(DMA_RX_SIZE - 1), chan);
2288 }
2289 
2290 /**
2291  *  stmmac_set_tx_queue_weight - Set TX queue weight
2292  *  @priv: driver private structure
2293  *  Description: It is used for setting TX queues weight
2294  */
2295 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2296 {
2297 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2298 	u32 weight;
2299 	u32 queue;
2300 
2301 	for (queue = 0; queue < tx_queues_count; queue++) {
2302 		weight = priv->plat->tx_queues_cfg[queue].weight;
2303 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2304 	}
2305 }
2306 
2307 /**
2308  *  stmmac_configure_cbs - Configure CBS in TX queue
2309  *  @priv: driver private structure
2310  *  Description: It is used for configuring CBS in AVB TX queues
2311  */
2312 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2313 {
2314 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2315 	u32 mode_to_use;
2316 	u32 queue;
2317 
2318 	/* queue 0 is reserved for legacy traffic */
2319 	for (queue = 1; queue < tx_queues_count; queue++) {
2320 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2321 		if (mode_to_use == MTL_QUEUE_DCB)
2322 			continue;
2323 
2324 		stmmac_config_cbs(priv, priv->hw,
2325 				priv->plat->tx_queues_cfg[queue].send_slope,
2326 				priv->plat->tx_queues_cfg[queue].idle_slope,
2327 				priv->plat->tx_queues_cfg[queue].high_credit,
2328 				priv->plat->tx_queues_cfg[queue].low_credit,
2329 				queue);
2330 	}
2331 }
2332 
2333 /**
2334  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2335  *  @priv: driver private structure
2336  *  Description: It is used for mapping RX queues to RX dma channels
2337  */
2338 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2339 {
2340 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2341 	u32 queue;
2342 	u32 chan;
2343 
2344 	for (queue = 0; queue < rx_queues_count; queue++) {
2345 		chan = priv->plat->rx_queues_cfg[queue].chan;
2346 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2347 	}
2348 }
2349 
2350 /**
2351  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2352  *  @priv: driver private structure
2353  *  Description: It is used for configuring the RX Queue Priority
2354  */
2355 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2356 {
2357 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2358 	u32 queue;
2359 	u32 prio;
2360 
2361 	for (queue = 0; queue < rx_queues_count; queue++) {
2362 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2363 			continue;
2364 
2365 		prio = priv->plat->rx_queues_cfg[queue].prio;
2366 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2367 	}
2368 }
2369 
2370 /**
2371  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2372  *  @priv: driver private structure
2373  *  Description: It is used for configuring the TX Queue Priority
2374  */
2375 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2376 {
2377 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378 	u32 queue;
2379 	u32 prio;
2380 
2381 	for (queue = 0; queue < tx_queues_count; queue++) {
2382 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2383 			continue;
2384 
2385 		prio = priv->plat->tx_queues_cfg[queue].prio;
2386 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2387 	}
2388 }
2389 
2390 /**
2391  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2392  *  @priv: driver private structure
2393  *  Description: It is used for configuring the RX queue routing
2394  */
2395 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2396 {
2397 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2398 	u32 queue;
2399 	u8 packet;
2400 
2401 	for (queue = 0; queue < rx_queues_count; queue++) {
2402 		/* no specific packet type routing specified for the queue */
2403 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2404 			continue;
2405 
2406 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2407 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2408 	}
2409 }
2410 
2411 /**
2412  *  stmmac_mtl_configuration - Configure MTL
2413  *  @priv: driver private structure
2414  *  Description: It is used for configurring MTL
2415  */
2416 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2417 {
2418 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2419 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2420 
2421 	if (tx_queues_count > 1)
2422 		stmmac_set_tx_queue_weight(priv);
2423 
2424 	/* Configure MTL RX algorithms */
2425 	if (rx_queues_count > 1)
2426 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2427 				priv->plat->rx_sched_algorithm);
2428 
2429 	/* Configure MTL TX algorithms */
2430 	if (tx_queues_count > 1)
2431 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2432 				priv->plat->tx_sched_algorithm);
2433 
2434 	/* Configure CBS in AVB TX queues */
2435 	if (tx_queues_count > 1)
2436 		stmmac_configure_cbs(priv);
2437 
2438 	/* Map RX MTL to DMA channels */
2439 	stmmac_rx_queue_dma_chan_map(priv);
2440 
2441 	/* Enable MAC RX Queues */
2442 	stmmac_mac_enable_rx_queues(priv);
2443 
2444 	/* Set RX priorities */
2445 	if (rx_queues_count > 1)
2446 		stmmac_mac_config_rx_queues_prio(priv);
2447 
2448 	/* Set TX priorities */
2449 	if (tx_queues_count > 1)
2450 		stmmac_mac_config_tx_queues_prio(priv);
2451 
2452 	/* Set RX routing */
2453 	if (rx_queues_count > 1)
2454 		stmmac_mac_config_rx_queues_routing(priv);
2455 }
2456 
2457 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2458 {
2459 	if (priv->dma_cap.asp) {
2460 		netdev_info(priv->dev, "Enabling Safety Features\n");
2461 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2462 	} else {
2463 		netdev_info(priv->dev, "No Safety Features support found\n");
2464 	}
2465 }
2466 
2467 /**
2468  * stmmac_hw_setup - setup mac in a usable state.
2469  *  @dev : pointer to the device structure.
2470  *  Description:
2471  *  this is the main function to setup the HW in a usable state because the
2472  *  dma engine is reset, the core registers are configured (e.g. AXI,
2473  *  Checksum features, timers). The DMA is ready to start receiving and
2474  *  transmitting.
2475  *  Return value:
2476  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2477  *  file on failure.
2478  */
2479 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2480 {
2481 	struct stmmac_priv *priv = netdev_priv(dev);
2482 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2483 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2484 	u32 chan;
2485 	int ret;
2486 
2487 	/* DMA initialization and SW reset */
2488 	ret = stmmac_init_dma_engine(priv);
2489 	if (ret < 0) {
2490 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2491 			   __func__);
2492 		return ret;
2493 	}
2494 
2495 	/* Copy the MAC addr into the HW  */
2496 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2497 
2498 	/* PS and related bits will be programmed according to the speed */
2499 	if (priv->hw->pcs) {
2500 		int speed = priv->plat->mac_port_sel_speed;
2501 
2502 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2503 		    (speed == SPEED_1000)) {
2504 			priv->hw->ps = speed;
2505 		} else {
2506 			dev_warn(priv->device, "invalid port speed\n");
2507 			priv->hw->ps = 0;
2508 		}
2509 	}
2510 
2511 	/* Initialize the MAC Core */
2512 	stmmac_core_init(priv, priv->hw, dev);
2513 
2514 	/* Initialize MTL*/
2515 	stmmac_mtl_configuration(priv);
2516 
2517 	/* Initialize Safety Features */
2518 	stmmac_safety_feat_configuration(priv);
2519 
2520 	ret = stmmac_rx_ipc(priv, priv->hw);
2521 	if (!ret) {
2522 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2523 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2524 		priv->hw->rx_csum = 0;
2525 	}
2526 
2527 	/* Enable the MAC Rx/Tx */
2528 	stmmac_mac_set(priv, priv->ioaddr, true);
2529 
2530 	/* Set the HW DMA mode and the COE */
2531 	stmmac_dma_operation_mode(priv);
2532 
2533 	stmmac_mmc_setup(priv);
2534 
2535 	if (init_ptp) {
2536 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2537 		if (ret < 0)
2538 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2539 
2540 		ret = stmmac_init_ptp(priv);
2541 		if (ret == -EOPNOTSUPP)
2542 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2543 		else if (ret)
2544 			netdev_warn(priv->dev, "PTP init failed\n");
2545 	}
2546 
2547 #ifdef CONFIG_DEBUG_FS
2548 	ret = stmmac_init_fs(dev);
2549 	if (ret < 0)
2550 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2551 			    __func__);
2552 #endif
2553 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2554 
2555 	if (priv->use_riwt) {
2556 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2557 		if (!ret)
2558 			priv->rx_riwt = MAX_DMA_RIWT;
2559 	}
2560 
2561 	if (priv->hw->pcs)
2562 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2563 
2564 	/* set TX and RX rings length */
2565 	stmmac_set_rings_length(priv);
2566 
2567 	/* Enable TSO */
2568 	if (priv->tso) {
2569 		for (chan = 0; chan < tx_cnt; chan++)
2570 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2571 	}
2572 
2573 	/* Start the ball rolling... */
2574 	stmmac_start_all_dma(priv);
2575 
2576 	return 0;
2577 }
2578 
2579 static void stmmac_hw_teardown(struct net_device *dev)
2580 {
2581 	struct stmmac_priv *priv = netdev_priv(dev);
2582 
2583 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2584 }
2585 
2586 /**
2587  *  stmmac_open - open entry point of the driver
2588  *  @dev : pointer to the device structure.
2589  *  Description:
2590  *  This function is the open entry point of the driver.
2591  *  Return value:
2592  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2593  *  file on failure.
2594  */
2595 static int stmmac_open(struct net_device *dev)
2596 {
2597 	struct stmmac_priv *priv = netdev_priv(dev);
2598 	int ret;
2599 
2600 	stmmac_check_ether_addr(priv);
2601 
2602 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2603 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2604 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2605 		ret = stmmac_init_phy(dev);
2606 		if (ret) {
2607 			netdev_err(priv->dev,
2608 				   "%s: Cannot attach to PHY (error: %d)\n",
2609 				   __func__, ret);
2610 			return ret;
2611 		}
2612 	}
2613 
2614 	/* Extra statistics */
2615 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2616 	priv->xstats.threshold = tc;
2617 
2618 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2619 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2620 
2621 	ret = alloc_dma_desc_resources(priv);
2622 	if (ret < 0) {
2623 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2624 			   __func__);
2625 		goto dma_desc_error;
2626 	}
2627 
2628 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2629 	if (ret < 0) {
2630 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2631 			   __func__);
2632 		goto init_error;
2633 	}
2634 
2635 	ret = stmmac_hw_setup(dev, true);
2636 	if (ret < 0) {
2637 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2638 		goto init_error;
2639 	}
2640 
2641 	stmmac_init_tx_coalesce(priv);
2642 
2643 	if (dev->phydev)
2644 		phy_start(dev->phydev);
2645 
2646 	/* Request the IRQ lines */
2647 	ret = request_irq(dev->irq, stmmac_interrupt,
2648 			  IRQF_SHARED, dev->name, dev);
2649 	if (unlikely(ret < 0)) {
2650 		netdev_err(priv->dev,
2651 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2652 			   __func__, dev->irq, ret);
2653 		goto irq_error;
2654 	}
2655 
2656 	/* Request the Wake IRQ in case of another line is used for WoL */
2657 	if (priv->wol_irq != dev->irq) {
2658 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2659 				  IRQF_SHARED, dev->name, dev);
2660 		if (unlikely(ret < 0)) {
2661 			netdev_err(priv->dev,
2662 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2663 				   __func__, priv->wol_irq, ret);
2664 			goto wolirq_error;
2665 		}
2666 	}
2667 
2668 	/* Request the IRQ lines */
2669 	if (priv->lpi_irq > 0) {
2670 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2671 				  dev->name, dev);
2672 		if (unlikely(ret < 0)) {
2673 			netdev_err(priv->dev,
2674 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2675 				   __func__, priv->lpi_irq, ret);
2676 			goto lpiirq_error;
2677 		}
2678 	}
2679 
2680 	stmmac_enable_all_queues(priv);
2681 	stmmac_start_all_queues(priv);
2682 
2683 	return 0;
2684 
2685 lpiirq_error:
2686 	if (priv->wol_irq != dev->irq)
2687 		free_irq(priv->wol_irq, dev);
2688 wolirq_error:
2689 	free_irq(dev->irq, dev);
2690 irq_error:
2691 	if (dev->phydev)
2692 		phy_stop(dev->phydev);
2693 
2694 	del_timer_sync(&priv->txtimer);
2695 	stmmac_hw_teardown(dev);
2696 init_error:
2697 	free_dma_desc_resources(priv);
2698 dma_desc_error:
2699 	if (dev->phydev)
2700 		phy_disconnect(dev->phydev);
2701 
2702 	return ret;
2703 }
2704 
2705 /**
2706  *  stmmac_release - close entry point of the driver
2707  *  @dev : device pointer.
2708  *  Description:
2709  *  This is the stop entry point of the driver.
2710  */
2711 static int stmmac_release(struct net_device *dev)
2712 {
2713 	struct stmmac_priv *priv = netdev_priv(dev);
2714 
2715 	if (priv->eee_enabled)
2716 		del_timer_sync(&priv->eee_ctrl_timer);
2717 
2718 	/* Stop and disconnect the PHY */
2719 	if (dev->phydev) {
2720 		phy_stop(dev->phydev);
2721 		phy_disconnect(dev->phydev);
2722 	}
2723 
2724 	stmmac_stop_all_queues(priv);
2725 
2726 	stmmac_disable_all_queues(priv);
2727 
2728 	del_timer_sync(&priv->txtimer);
2729 
2730 	/* Free the IRQ lines */
2731 	free_irq(dev->irq, dev);
2732 	if (priv->wol_irq != dev->irq)
2733 		free_irq(priv->wol_irq, dev);
2734 	if (priv->lpi_irq > 0)
2735 		free_irq(priv->lpi_irq, dev);
2736 
2737 	/* Stop TX/RX DMA and clear the descriptors */
2738 	stmmac_stop_all_dma(priv);
2739 
2740 	/* Release and free the Rx/Tx resources */
2741 	free_dma_desc_resources(priv);
2742 
2743 	/* Disable the MAC Rx/Tx */
2744 	stmmac_mac_set(priv, priv->ioaddr, false);
2745 
2746 	netif_carrier_off(dev);
2747 
2748 #ifdef CONFIG_DEBUG_FS
2749 	stmmac_exit_fs(dev);
2750 #endif
2751 
2752 	stmmac_release_ptp(priv);
2753 
2754 	return 0;
2755 }
2756 
2757 /**
2758  *  stmmac_tso_allocator - close entry point of the driver
2759  *  @priv: driver private structure
2760  *  @des: buffer start address
2761  *  @total_len: total length to fill in descriptors
2762  *  @last_segmant: condition for the last descriptor
2763  *  @queue: TX queue index
2764  *  Description:
2765  *  This function fills descriptor and request new descriptors according to
2766  *  buffer length to fill
2767  */
2768 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2769 				 int total_len, bool last_segment, u32 queue)
2770 {
2771 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2772 	struct dma_desc *desc;
2773 	u32 buff_size;
2774 	int tmp_len;
2775 
2776 	tmp_len = total_len;
2777 
2778 	while (tmp_len > 0) {
2779 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2780 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2781 		desc = tx_q->dma_tx + tx_q->cur_tx;
2782 
2783 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2784 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2785 			    TSO_MAX_BUFF_SIZE : tmp_len;
2786 
2787 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2788 				0, 1,
2789 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2790 				0, 0);
2791 
2792 		tmp_len -= TSO_MAX_BUFF_SIZE;
2793 	}
2794 }
2795 
2796 /**
2797  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2798  *  @skb : the socket buffer
2799  *  @dev : device pointer
2800  *  Description: this is the transmit function that is called on TSO frames
2801  *  (support available on GMAC4 and newer chips).
2802  *  Diagram below show the ring programming in case of TSO frames:
2803  *
2804  *  First Descriptor
2805  *   --------
2806  *   | DES0 |---> buffer1 = L2/L3/L4 header
2807  *   | DES1 |---> TCP Payload (can continue on next descr...)
2808  *   | DES2 |---> buffer 1 and 2 len
2809  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2810  *   --------
2811  *	|
2812  *     ...
2813  *	|
2814  *   --------
2815  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2816  *   | DES1 | --|
2817  *   | DES2 | --> buffer 1 and 2 len
2818  *   | DES3 |
2819  *   --------
2820  *
2821  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2822  */
2823 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2824 {
2825 	struct dma_desc *desc, *first, *mss_desc = NULL;
2826 	struct stmmac_priv *priv = netdev_priv(dev);
2827 	int nfrags = skb_shinfo(skb)->nr_frags;
2828 	u32 queue = skb_get_queue_mapping(skb);
2829 	unsigned int first_entry, des;
2830 	struct stmmac_tx_queue *tx_q;
2831 	int tmp_pay_len = 0;
2832 	u32 pay_len, mss;
2833 	u8 proto_hdr_len;
2834 	int i;
2835 
2836 	tx_q = &priv->tx_queue[queue];
2837 
2838 	/* Compute header lengths */
2839 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2840 
2841 	/* Desc availability based on threshold should be enough safe */
2842 	if (unlikely(stmmac_tx_avail(priv, queue) <
2843 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2844 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2845 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2846 								queue));
2847 			/* This is a hard error, log it. */
2848 			netdev_err(priv->dev,
2849 				   "%s: Tx Ring full when queue awake\n",
2850 				   __func__);
2851 		}
2852 		return NETDEV_TX_BUSY;
2853 	}
2854 
2855 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2856 
2857 	mss = skb_shinfo(skb)->gso_size;
2858 
2859 	/* set new MSS value if needed */
2860 	if (mss != tx_q->mss) {
2861 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2862 		stmmac_set_mss(priv, mss_desc, mss);
2863 		tx_q->mss = mss;
2864 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2865 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2866 	}
2867 
2868 	if (netif_msg_tx_queued(priv)) {
2869 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2870 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2871 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2872 			skb->data_len);
2873 	}
2874 
2875 	first_entry = tx_q->cur_tx;
2876 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2877 
2878 	desc = tx_q->dma_tx + first_entry;
2879 	first = desc;
2880 
2881 	/* first descriptor: fill Headers on Buf1 */
2882 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2883 			     DMA_TO_DEVICE);
2884 	if (dma_mapping_error(priv->device, des))
2885 		goto dma_map_err;
2886 
2887 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2888 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2889 
2890 	first->des0 = cpu_to_le32(des);
2891 
2892 	/* Fill start of payload in buff2 of first descriptor */
2893 	if (pay_len)
2894 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2895 
2896 	/* If needed take extra descriptors to fill the remaining payload */
2897 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2898 
2899 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2900 
2901 	/* Prepare fragments */
2902 	for (i = 0; i < nfrags; i++) {
2903 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2904 
2905 		des = skb_frag_dma_map(priv->device, frag, 0,
2906 				       skb_frag_size(frag),
2907 				       DMA_TO_DEVICE);
2908 		if (dma_mapping_error(priv->device, des))
2909 			goto dma_map_err;
2910 
2911 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2912 				     (i == nfrags - 1), queue);
2913 
2914 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2915 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2916 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2917 	}
2918 
2919 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2920 
2921 	/* Only the last descriptor gets to point to the skb. */
2922 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2923 
2924 	/* We've used all descriptors we need for this skb, however,
2925 	 * advance cur_tx so that it references a fresh descriptor.
2926 	 * ndo_start_xmit will fill this descriptor the next time it's
2927 	 * called and stmmac_tx_clean may clean up to this descriptor.
2928 	 */
2929 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2930 
2931 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2932 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2933 			  __func__);
2934 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2935 	}
2936 
2937 	dev->stats.tx_bytes += skb->len;
2938 	priv->xstats.tx_tso_frames++;
2939 	priv->xstats.tx_tso_nfrags += nfrags;
2940 
2941 	/* Manage tx mitigation */
2942 	priv->tx_count_frames += nfrags + 1;
2943 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2944 		mod_timer(&priv->txtimer,
2945 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2946 	} else {
2947 		priv->tx_count_frames = 0;
2948 		stmmac_set_tx_ic(priv, desc);
2949 		priv->xstats.tx_set_ic_bit++;
2950 	}
2951 
2952 	skb_tx_timestamp(skb);
2953 
2954 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2955 		     priv->hwts_tx_en)) {
2956 		/* declare that device is doing timestamping */
2957 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2958 		stmmac_enable_tx_timestamp(priv, first);
2959 	}
2960 
2961 	/* Complete the first descriptor before granting the DMA */
2962 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2963 			proto_hdr_len,
2964 			pay_len,
2965 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2966 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2967 
2968 	/* If context desc is used to change MSS */
2969 	if (mss_desc) {
2970 		/* Make sure that first descriptor has been completely
2971 		 * written, including its own bit. This is because MSS is
2972 		 * actually before first descriptor, so we need to make
2973 		 * sure that MSS's own bit is the last thing written.
2974 		 */
2975 		dma_wmb();
2976 		stmmac_set_tx_owner(priv, mss_desc);
2977 	}
2978 
2979 	/* The own bit must be the latest setting done when prepare the
2980 	 * descriptor and then barrier is needed to make sure that
2981 	 * all is coherent before granting the DMA engine.
2982 	 */
2983 	wmb();
2984 
2985 	if (netif_msg_pktdata(priv)) {
2986 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2987 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2988 			tx_q->cur_tx, first, nfrags);
2989 
2990 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2991 
2992 		pr_info(">>> frame to be transmitted: ");
2993 		print_pkt(skb->data, skb_headlen(skb));
2994 	}
2995 
2996 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2997 
2998 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2999 
3000 	return NETDEV_TX_OK;
3001 
3002 dma_map_err:
3003 	dev_err(priv->device, "Tx dma map failed\n");
3004 	dev_kfree_skb(skb);
3005 	priv->dev->stats.tx_dropped++;
3006 	return NETDEV_TX_OK;
3007 }
3008 
3009 /**
3010  *  stmmac_xmit - Tx entry point of the driver
3011  *  @skb : the socket buffer
3012  *  @dev : device pointer
3013  *  Description : this is the tx entry point of the driver.
3014  *  It programs the chain or the ring and supports oversized frames
3015  *  and SG feature.
3016  */
3017 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3018 {
3019 	struct stmmac_priv *priv = netdev_priv(dev);
3020 	unsigned int nopaged_len = skb_headlen(skb);
3021 	int i, csum_insertion = 0, is_jumbo = 0;
3022 	u32 queue = skb_get_queue_mapping(skb);
3023 	int nfrags = skb_shinfo(skb)->nr_frags;
3024 	int entry;
3025 	unsigned int first_entry;
3026 	struct dma_desc *desc, *first;
3027 	struct stmmac_tx_queue *tx_q;
3028 	unsigned int enh_desc;
3029 	unsigned int des;
3030 
3031 	tx_q = &priv->tx_queue[queue];
3032 
3033 	/* Manage oversized TCP frames for GMAC4 device */
3034 	if (skb_is_gso(skb) && priv->tso) {
3035 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3036 			return stmmac_tso_xmit(skb, dev);
3037 	}
3038 
3039 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3040 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3041 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3042 								queue));
3043 			/* This is a hard error, log it. */
3044 			netdev_err(priv->dev,
3045 				   "%s: Tx Ring full when queue awake\n",
3046 				   __func__);
3047 		}
3048 		return NETDEV_TX_BUSY;
3049 	}
3050 
3051 	if (priv->tx_path_in_lpi_mode)
3052 		stmmac_disable_eee_mode(priv);
3053 
3054 	entry = tx_q->cur_tx;
3055 	first_entry = entry;
3056 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3057 
3058 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3059 
3060 	if (likely(priv->extend_desc))
3061 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3062 	else
3063 		desc = tx_q->dma_tx + entry;
3064 
3065 	first = desc;
3066 
3067 	enh_desc = priv->plat->enh_desc;
3068 	/* To program the descriptors according to the size of the frame */
3069 	if (enh_desc)
3070 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3071 
3072 	if (unlikely(is_jumbo)) {
3073 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3074 		if (unlikely(entry < 0) && (entry != -EINVAL))
3075 			goto dma_map_err;
3076 	}
3077 
3078 	for (i = 0; i < nfrags; i++) {
3079 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3080 		int len = skb_frag_size(frag);
3081 		bool last_segment = (i == (nfrags - 1));
3082 
3083 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3084 		WARN_ON(tx_q->tx_skbuff[entry]);
3085 
3086 		if (likely(priv->extend_desc))
3087 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088 		else
3089 			desc = tx_q->dma_tx + entry;
3090 
3091 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3092 				       DMA_TO_DEVICE);
3093 		if (dma_mapping_error(priv->device, des))
3094 			goto dma_map_err; /* should reuse desc w/o issues */
3095 
3096 		tx_q->tx_skbuff_dma[entry].buf = des;
3097 
3098 		stmmac_set_desc_addr(priv, desc, des);
3099 
3100 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3101 		tx_q->tx_skbuff_dma[entry].len = len;
3102 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3103 
3104 		/* Prepare the descriptor and set the own bit too */
3105 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3106 				priv->mode, 1, last_segment, skb->len);
3107 	}
3108 
3109 	/* Only the last descriptor gets to point to the skb. */
3110 	tx_q->tx_skbuff[entry] = skb;
3111 
3112 	/* We've used all descriptors we need for this skb, however,
3113 	 * advance cur_tx so that it references a fresh descriptor.
3114 	 * ndo_start_xmit will fill this descriptor the next time it's
3115 	 * called and stmmac_tx_clean may clean up to this descriptor.
3116 	 */
3117 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3118 	tx_q->cur_tx = entry;
3119 
3120 	if (netif_msg_pktdata(priv)) {
3121 		void *tx_head;
3122 
3123 		netdev_dbg(priv->dev,
3124 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3125 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3126 			   entry, first, nfrags);
3127 
3128 		if (priv->extend_desc)
3129 			tx_head = (void *)tx_q->dma_etx;
3130 		else
3131 			tx_head = (void *)tx_q->dma_tx;
3132 
3133 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3134 
3135 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3136 		print_pkt(skb->data, skb->len);
3137 	}
3138 
3139 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3140 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3141 			  __func__);
3142 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3143 	}
3144 
3145 	dev->stats.tx_bytes += skb->len;
3146 
3147 	/* According to the coalesce parameter the IC bit for the latest
3148 	 * segment is reset and the timer re-started to clean the tx status.
3149 	 * This approach takes care about the fragments: desc is the first
3150 	 * element in case of no SG.
3151 	 */
3152 	priv->tx_count_frames += nfrags + 1;
3153 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3154 		mod_timer(&priv->txtimer,
3155 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3156 	} else {
3157 		priv->tx_count_frames = 0;
3158 		stmmac_set_tx_ic(priv, desc);
3159 		priv->xstats.tx_set_ic_bit++;
3160 	}
3161 
3162 	skb_tx_timestamp(skb);
3163 
3164 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3165 	 * problems because all the descriptors are actually ready to be
3166 	 * passed to the DMA engine.
3167 	 */
3168 	if (likely(!is_jumbo)) {
3169 		bool last_segment = (nfrags == 0);
3170 
3171 		des = dma_map_single(priv->device, skb->data,
3172 				     nopaged_len, DMA_TO_DEVICE);
3173 		if (dma_mapping_error(priv->device, des))
3174 			goto dma_map_err;
3175 
3176 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3177 
3178 		stmmac_set_desc_addr(priv, first, des);
3179 
3180 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3181 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3182 
3183 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3184 			     priv->hwts_tx_en)) {
3185 			/* declare that device is doing timestamping */
3186 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3187 			stmmac_enable_tx_timestamp(priv, first);
3188 		}
3189 
3190 		/* Prepare the first descriptor setting the OWN bit too */
3191 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3192 				csum_insertion, priv->mode, 1, last_segment,
3193 				skb->len);
3194 
3195 		/* The own bit must be the latest setting done when prepare the
3196 		 * descriptor and then barrier is needed to make sure that
3197 		 * all is coherent before granting the DMA engine.
3198 		 */
3199 		wmb();
3200 	}
3201 
3202 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3203 
3204 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3205 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3206 
3207 	return NETDEV_TX_OK;
3208 
3209 dma_map_err:
3210 	netdev_err(priv->dev, "Tx DMA map failed\n");
3211 	dev_kfree_skb(skb);
3212 	priv->dev->stats.tx_dropped++;
3213 	return NETDEV_TX_OK;
3214 }
3215 
3216 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3217 {
3218 	struct vlan_ethhdr *veth;
3219 	__be16 vlan_proto;
3220 	u16 vlanid;
3221 
3222 	veth = (struct vlan_ethhdr *)skb->data;
3223 	vlan_proto = veth->h_vlan_proto;
3224 
3225 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3226 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3227 	    (vlan_proto == htons(ETH_P_8021AD) &&
3228 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3229 		/* pop the vlan tag */
3230 		vlanid = ntohs(veth->h_vlan_TCI);
3231 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3232 		skb_pull(skb, VLAN_HLEN);
3233 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3234 	}
3235 }
3236 
3237 
3238 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3239 {
3240 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3241 		return 0;
3242 
3243 	return 1;
3244 }
3245 
3246 /**
3247  * stmmac_rx_refill - refill used skb preallocated buffers
3248  * @priv: driver private structure
3249  * @queue: RX queue index
3250  * Description : this is to reallocate the skb for the reception process
3251  * that is based on zero-copy.
3252  */
3253 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3254 {
3255 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3256 	int dirty = stmmac_rx_dirty(priv, queue);
3257 	unsigned int entry = rx_q->dirty_rx;
3258 
3259 	int bfsize = priv->dma_buf_sz;
3260 
3261 	while (dirty-- > 0) {
3262 		struct dma_desc *p;
3263 
3264 		if (priv->extend_desc)
3265 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3266 		else
3267 			p = rx_q->dma_rx + entry;
3268 
3269 		if (likely(!rx_q->rx_skbuff[entry])) {
3270 			struct sk_buff *skb;
3271 
3272 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3273 			if (unlikely(!skb)) {
3274 				/* so for a while no zero-copy! */
3275 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3276 				if (unlikely(net_ratelimit()))
3277 					dev_err(priv->device,
3278 						"fail to alloc skb entry %d\n",
3279 						entry);
3280 				break;
3281 			}
3282 
3283 			rx_q->rx_skbuff[entry] = skb;
3284 			rx_q->rx_skbuff_dma[entry] =
3285 			    dma_map_single(priv->device, skb->data, bfsize,
3286 					   DMA_FROM_DEVICE);
3287 			if (dma_mapping_error(priv->device,
3288 					      rx_q->rx_skbuff_dma[entry])) {
3289 				netdev_err(priv->dev, "Rx DMA map failed\n");
3290 				dev_kfree_skb(skb);
3291 				break;
3292 			}
3293 
3294 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3295 			stmmac_refill_desc3(priv, rx_q, p);
3296 
3297 			if (rx_q->rx_zeroc_thresh > 0)
3298 				rx_q->rx_zeroc_thresh--;
3299 
3300 			netif_dbg(priv, rx_status, priv->dev,
3301 				  "refill entry #%d\n", entry);
3302 		}
3303 		dma_wmb();
3304 
3305 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3306 
3307 		dma_wmb();
3308 
3309 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3310 	}
3311 	rx_q->dirty_rx = entry;
3312 }
3313 
3314 /**
3315  * stmmac_rx - manage the receive process
3316  * @priv: driver private structure
3317  * @limit: napi bugget
3318  * @queue: RX queue index.
3319  * Description :  this the function called by the napi poll method.
3320  * It gets all the frames inside the ring.
3321  */
3322 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3323 {
3324 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3325 	unsigned int entry = rx_q->cur_rx;
3326 	int coe = priv->hw->rx_csum;
3327 	unsigned int next_entry;
3328 	unsigned int count = 0;
3329 	bool xmac;
3330 
3331 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3332 
3333 	if (netif_msg_rx_status(priv)) {
3334 		void *rx_head;
3335 
3336 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3337 		if (priv->extend_desc)
3338 			rx_head = (void *)rx_q->dma_erx;
3339 		else
3340 			rx_head = (void *)rx_q->dma_rx;
3341 
3342 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3343 	}
3344 	while (count < limit) {
3345 		int status;
3346 		struct dma_desc *p;
3347 		struct dma_desc *np;
3348 
3349 		if (priv->extend_desc)
3350 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3351 		else
3352 			p = rx_q->dma_rx + entry;
3353 
3354 		/* read the status of the incoming frame */
3355 		status = stmmac_rx_status(priv, &priv->dev->stats,
3356 				&priv->xstats, p);
3357 		/* check if managed by the DMA otherwise go ahead */
3358 		if (unlikely(status & dma_own))
3359 			break;
3360 
3361 		count++;
3362 
3363 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3364 		next_entry = rx_q->cur_rx;
3365 
3366 		if (priv->extend_desc)
3367 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3368 		else
3369 			np = rx_q->dma_rx + next_entry;
3370 
3371 		prefetch(np);
3372 
3373 		if (priv->extend_desc)
3374 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3375 					&priv->xstats, rx_q->dma_erx + entry);
3376 		if (unlikely(status == discard_frame)) {
3377 			priv->dev->stats.rx_errors++;
3378 			if (priv->hwts_rx_en && !priv->extend_desc) {
3379 				/* DESC2 & DESC3 will be overwritten by device
3380 				 * with timestamp value, hence reinitialize
3381 				 * them in stmmac_rx_refill() function so that
3382 				 * device can reuse it.
3383 				 */
3384 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3385 				rx_q->rx_skbuff[entry] = NULL;
3386 				dma_unmap_single(priv->device,
3387 						 rx_q->rx_skbuff_dma[entry],
3388 						 priv->dma_buf_sz,
3389 						 DMA_FROM_DEVICE);
3390 			}
3391 		} else {
3392 			struct sk_buff *skb;
3393 			int frame_len;
3394 			unsigned int des;
3395 
3396 			stmmac_get_desc_addr(priv, p, &des);
3397 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3398 
3399 			/*  If frame length is greater than skb buffer size
3400 			 *  (preallocated during init) then the packet is
3401 			 *  ignored
3402 			 */
3403 			if (frame_len > priv->dma_buf_sz) {
3404 				netdev_err(priv->dev,
3405 					   "len %d larger than size (%d)\n",
3406 					   frame_len, priv->dma_buf_sz);
3407 				priv->dev->stats.rx_length_errors++;
3408 				break;
3409 			}
3410 
3411 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3412 			 * Type frames (LLC/LLC-SNAP)
3413 			 *
3414 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3415 			 * feature is always disabled and packets need to be
3416 			 * stripped manually.
3417 			 */
3418 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3419 			    unlikely(status != llc_snap))
3420 				frame_len -= ETH_FCS_LEN;
3421 
3422 			if (netif_msg_rx_status(priv)) {
3423 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3424 					   p, entry, des);
3425 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3426 					   frame_len, status);
3427 			}
3428 
3429 			/* The zero-copy is always used for all the sizes
3430 			 * in case of GMAC4 because it needs
3431 			 * to refill the used descriptors, always.
3432 			 */
3433 			if (unlikely(!xmac &&
3434 				     ((frame_len < priv->rx_copybreak) ||
3435 				     stmmac_rx_threshold_count(rx_q)))) {
3436 				skb = netdev_alloc_skb_ip_align(priv->dev,
3437 								frame_len);
3438 				if (unlikely(!skb)) {
3439 					if (net_ratelimit())
3440 						dev_warn(priv->device,
3441 							 "packet dropped\n");
3442 					priv->dev->stats.rx_dropped++;
3443 					break;
3444 				}
3445 
3446 				dma_sync_single_for_cpu(priv->device,
3447 							rx_q->rx_skbuff_dma
3448 							[entry], frame_len,
3449 							DMA_FROM_DEVICE);
3450 				skb_copy_to_linear_data(skb,
3451 							rx_q->
3452 							rx_skbuff[entry]->data,
3453 							frame_len);
3454 
3455 				skb_put(skb, frame_len);
3456 				dma_sync_single_for_device(priv->device,
3457 							   rx_q->rx_skbuff_dma
3458 							   [entry], frame_len,
3459 							   DMA_FROM_DEVICE);
3460 			} else {
3461 				skb = rx_q->rx_skbuff[entry];
3462 				if (unlikely(!skb)) {
3463 					netdev_err(priv->dev,
3464 						   "%s: Inconsistent Rx chain\n",
3465 						   priv->dev->name);
3466 					priv->dev->stats.rx_dropped++;
3467 					break;
3468 				}
3469 				prefetch(skb->data - NET_IP_ALIGN);
3470 				rx_q->rx_skbuff[entry] = NULL;
3471 				rx_q->rx_zeroc_thresh++;
3472 
3473 				skb_put(skb, frame_len);
3474 				dma_unmap_single(priv->device,
3475 						 rx_q->rx_skbuff_dma[entry],
3476 						 priv->dma_buf_sz,
3477 						 DMA_FROM_DEVICE);
3478 			}
3479 
3480 			if (netif_msg_pktdata(priv)) {
3481 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3482 					   frame_len);
3483 				print_pkt(skb->data, frame_len);
3484 			}
3485 
3486 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3487 
3488 			stmmac_rx_vlan(priv->dev, skb);
3489 
3490 			skb->protocol = eth_type_trans(skb, priv->dev);
3491 
3492 			if (unlikely(!coe))
3493 				skb_checksum_none_assert(skb);
3494 			else
3495 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3496 
3497 			napi_gro_receive(&rx_q->napi, skb);
3498 
3499 			priv->dev->stats.rx_packets++;
3500 			priv->dev->stats.rx_bytes += frame_len;
3501 		}
3502 		entry = next_entry;
3503 	}
3504 
3505 	stmmac_rx_refill(priv, queue);
3506 
3507 	priv->xstats.rx_pkt_n += count;
3508 
3509 	return count;
3510 }
3511 
3512 /**
3513  *  stmmac_poll - stmmac poll method (NAPI)
3514  *  @napi : pointer to the napi structure.
3515  *  @budget : maximum number of packets that the current CPU can receive from
3516  *	      all interfaces.
3517  *  Description :
3518  *  To look at the incoming frames and clear the tx resources.
3519  */
3520 static int stmmac_poll(struct napi_struct *napi, int budget)
3521 {
3522 	struct stmmac_rx_queue *rx_q =
3523 		container_of(napi, struct stmmac_rx_queue, napi);
3524 	struct stmmac_priv *priv = rx_q->priv_data;
3525 	u32 tx_count = priv->plat->tx_queues_to_use;
3526 	u32 chan = rx_q->queue_index;
3527 	int work_done = 0;
3528 	u32 queue;
3529 
3530 	priv->xstats.napi_poll++;
3531 
3532 	/* check all the queues */
3533 	for (queue = 0; queue < tx_count; queue++)
3534 		stmmac_tx_clean(priv, queue);
3535 
3536 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3537 	if (work_done < budget) {
3538 		napi_complete_done(napi, work_done);
3539 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3540 	}
3541 	return work_done;
3542 }
3543 
3544 /**
3545  *  stmmac_tx_timeout
3546  *  @dev : Pointer to net device structure
3547  *  Description: this function is called when a packet transmission fails to
3548  *   complete within a reasonable time. The driver will mark the error in the
3549  *   netdev structure and arrange for the device to be reset to a sane state
3550  *   in order to transmit a new packet.
3551  */
3552 static void stmmac_tx_timeout(struct net_device *dev)
3553 {
3554 	struct stmmac_priv *priv = netdev_priv(dev);
3555 
3556 	stmmac_global_err(priv);
3557 }
3558 
3559 /**
3560  *  stmmac_set_rx_mode - entry point for multicast addressing
3561  *  @dev : pointer to the device structure
3562  *  Description:
3563  *  This function is a driver entry point which gets called by the kernel
3564  *  whenever multicast addresses must be enabled/disabled.
3565  *  Return value:
3566  *  void.
3567  */
3568 static void stmmac_set_rx_mode(struct net_device *dev)
3569 {
3570 	struct stmmac_priv *priv = netdev_priv(dev);
3571 
3572 	stmmac_set_filter(priv, priv->hw, dev);
3573 }
3574 
3575 /**
3576  *  stmmac_change_mtu - entry point to change MTU size for the device.
3577  *  @dev : device pointer.
3578  *  @new_mtu : the new MTU size for the device.
3579  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3580  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3581  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3582  *  Return value:
3583  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3584  *  file on failure.
3585  */
3586 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3587 {
3588 	struct stmmac_priv *priv = netdev_priv(dev);
3589 
3590 	if (netif_running(dev)) {
3591 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3592 		return -EBUSY;
3593 	}
3594 
3595 	dev->mtu = new_mtu;
3596 
3597 	netdev_update_features(dev);
3598 
3599 	return 0;
3600 }
3601 
3602 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3603 					     netdev_features_t features)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 
3607 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3608 		features &= ~NETIF_F_RXCSUM;
3609 
3610 	if (!priv->plat->tx_coe)
3611 		features &= ~NETIF_F_CSUM_MASK;
3612 
3613 	/* Some GMAC devices have a bugged Jumbo frame support that
3614 	 * needs to have the Tx COE disabled for oversized frames
3615 	 * (due to limited buffer sizes). In this case we disable
3616 	 * the TX csum insertion in the TDES and not use SF.
3617 	 */
3618 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3619 		features &= ~NETIF_F_CSUM_MASK;
3620 
3621 	/* Disable tso if asked by ethtool */
3622 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3623 		if (features & NETIF_F_TSO)
3624 			priv->tso = true;
3625 		else
3626 			priv->tso = false;
3627 	}
3628 
3629 	return features;
3630 }
3631 
3632 static int stmmac_set_features(struct net_device *netdev,
3633 			       netdev_features_t features)
3634 {
3635 	struct stmmac_priv *priv = netdev_priv(netdev);
3636 
3637 	/* Keep the COE Type in case of csum is supporting */
3638 	if (features & NETIF_F_RXCSUM)
3639 		priv->hw->rx_csum = priv->plat->rx_coe;
3640 	else
3641 		priv->hw->rx_csum = 0;
3642 	/* No check needed because rx_coe has been set before and it will be
3643 	 * fixed in case of issue.
3644 	 */
3645 	stmmac_rx_ipc(priv, priv->hw);
3646 
3647 	return 0;
3648 }
3649 
3650 /**
3651  *  stmmac_interrupt - main ISR
3652  *  @irq: interrupt number.
3653  *  @dev_id: to pass the net device pointer.
3654  *  Description: this is the main driver interrupt service routine.
3655  *  It can call:
3656  *  o DMA service routine (to manage incoming frame reception and transmission
3657  *    status)
3658  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3659  *    interrupts.
3660  */
3661 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3662 {
3663 	struct net_device *dev = (struct net_device *)dev_id;
3664 	struct stmmac_priv *priv = netdev_priv(dev);
3665 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3666 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3667 	u32 queues_count;
3668 	u32 queue;
3669 	bool xmac;
3670 
3671 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3672 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3673 
3674 	if (priv->irq_wake)
3675 		pm_wakeup_event(priv->device, 0);
3676 
3677 	if (unlikely(!dev)) {
3678 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3679 		return IRQ_NONE;
3680 	}
3681 
3682 	/* Check if adapter is up */
3683 	if (test_bit(STMMAC_DOWN, &priv->state))
3684 		return IRQ_HANDLED;
3685 	/* Check if a fatal error happened */
3686 	if (stmmac_safety_feat_interrupt(priv))
3687 		return IRQ_HANDLED;
3688 
3689 	/* To handle GMAC own interrupts */
3690 	if ((priv->plat->has_gmac) || xmac) {
3691 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3692 		int mtl_status;
3693 
3694 		if (unlikely(status)) {
3695 			/* For LPI we need to save the tx status */
3696 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3697 				priv->tx_path_in_lpi_mode = true;
3698 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3699 				priv->tx_path_in_lpi_mode = false;
3700 		}
3701 
3702 		for (queue = 0; queue < queues_count; queue++) {
3703 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3704 
3705 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3706 								queue);
3707 			if (mtl_status != -EINVAL)
3708 				status |= mtl_status;
3709 
3710 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3711 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3712 						       rx_q->rx_tail_addr,
3713 						       queue);
3714 		}
3715 
3716 		/* PCS link status */
3717 		if (priv->hw->pcs) {
3718 			if (priv->xstats.pcs_link)
3719 				netif_carrier_on(dev);
3720 			else
3721 				netif_carrier_off(dev);
3722 		}
3723 	}
3724 
3725 	/* To handle DMA interrupts */
3726 	stmmac_dma_interrupt(priv);
3727 
3728 	return IRQ_HANDLED;
3729 }
3730 
3731 #ifdef CONFIG_NET_POLL_CONTROLLER
3732 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3733  * to allow network I/O with interrupts disabled.
3734  */
3735 static void stmmac_poll_controller(struct net_device *dev)
3736 {
3737 	disable_irq(dev->irq);
3738 	stmmac_interrupt(dev->irq, dev);
3739 	enable_irq(dev->irq);
3740 }
3741 #endif
3742 
3743 /**
3744  *  stmmac_ioctl - Entry point for the Ioctl
3745  *  @dev: Device pointer.
3746  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3747  *  a proprietary structure used to pass information to the driver.
3748  *  @cmd: IOCTL command
3749  *  Description:
3750  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3751  */
3752 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3753 {
3754 	int ret = -EOPNOTSUPP;
3755 
3756 	if (!netif_running(dev))
3757 		return -EINVAL;
3758 
3759 	switch (cmd) {
3760 	case SIOCGMIIPHY:
3761 	case SIOCGMIIREG:
3762 	case SIOCSMIIREG:
3763 		if (!dev->phydev)
3764 			return -EINVAL;
3765 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3766 		break;
3767 	case SIOCSHWTSTAMP:
3768 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3769 		break;
3770 	default:
3771 		break;
3772 	}
3773 
3774 	return ret;
3775 }
3776 
3777 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3778 				    void *cb_priv)
3779 {
3780 	struct stmmac_priv *priv = cb_priv;
3781 	int ret = -EOPNOTSUPP;
3782 
3783 	stmmac_disable_all_queues(priv);
3784 
3785 	switch (type) {
3786 	case TC_SETUP_CLSU32:
3787 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3788 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3789 		break;
3790 	default:
3791 		break;
3792 	}
3793 
3794 	stmmac_enable_all_queues(priv);
3795 	return ret;
3796 }
3797 
3798 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3799 				 struct tc_block_offload *f)
3800 {
3801 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3802 		return -EOPNOTSUPP;
3803 
3804 	switch (f->command) {
3805 	case TC_BLOCK_BIND:
3806 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3807 				priv, priv, f->extack);
3808 	case TC_BLOCK_UNBIND:
3809 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3810 		return 0;
3811 	default:
3812 		return -EOPNOTSUPP;
3813 	}
3814 }
3815 
3816 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3817 			   void *type_data)
3818 {
3819 	struct stmmac_priv *priv = netdev_priv(ndev);
3820 
3821 	switch (type) {
3822 	case TC_SETUP_BLOCK:
3823 		return stmmac_setup_tc_block(priv, type_data);
3824 	case TC_SETUP_QDISC_CBS:
3825 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3826 	default:
3827 		return -EOPNOTSUPP;
3828 	}
3829 }
3830 
3831 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3832 {
3833 	struct stmmac_priv *priv = netdev_priv(ndev);
3834 	int ret = 0;
3835 
3836 	ret = eth_mac_addr(ndev, addr);
3837 	if (ret)
3838 		return ret;
3839 
3840 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3841 
3842 	return ret;
3843 }
3844 
3845 #ifdef CONFIG_DEBUG_FS
3846 static struct dentry *stmmac_fs_dir;
3847 
3848 static void sysfs_display_ring(void *head, int size, int extend_desc,
3849 			       struct seq_file *seq)
3850 {
3851 	int i;
3852 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3853 	struct dma_desc *p = (struct dma_desc *)head;
3854 
3855 	for (i = 0; i < size; i++) {
3856 		if (extend_desc) {
3857 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3858 				   i, (unsigned int)virt_to_phys(ep),
3859 				   le32_to_cpu(ep->basic.des0),
3860 				   le32_to_cpu(ep->basic.des1),
3861 				   le32_to_cpu(ep->basic.des2),
3862 				   le32_to_cpu(ep->basic.des3));
3863 			ep++;
3864 		} else {
3865 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3866 				   i, (unsigned int)virt_to_phys(p),
3867 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3868 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3869 			p++;
3870 		}
3871 		seq_printf(seq, "\n");
3872 	}
3873 }
3874 
3875 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3876 {
3877 	struct net_device *dev = seq->private;
3878 	struct stmmac_priv *priv = netdev_priv(dev);
3879 	u32 rx_count = priv->plat->rx_queues_to_use;
3880 	u32 tx_count = priv->plat->tx_queues_to_use;
3881 	u32 queue;
3882 
3883 	for (queue = 0; queue < rx_count; queue++) {
3884 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3885 
3886 		seq_printf(seq, "RX Queue %d:\n", queue);
3887 
3888 		if (priv->extend_desc) {
3889 			seq_printf(seq, "Extended descriptor ring:\n");
3890 			sysfs_display_ring((void *)rx_q->dma_erx,
3891 					   DMA_RX_SIZE, 1, seq);
3892 		} else {
3893 			seq_printf(seq, "Descriptor ring:\n");
3894 			sysfs_display_ring((void *)rx_q->dma_rx,
3895 					   DMA_RX_SIZE, 0, seq);
3896 		}
3897 	}
3898 
3899 	for (queue = 0; queue < tx_count; queue++) {
3900 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3901 
3902 		seq_printf(seq, "TX Queue %d:\n", queue);
3903 
3904 		if (priv->extend_desc) {
3905 			seq_printf(seq, "Extended descriptor ring:\n");
3906 			sysfs_display_ring((void *)tx_q->dma_etx,
3907 					   DMA_TX_SIZE, 1, seq);
3908 		} else {
3909 			seq_printf(seq, "Descriptor ring:\n");
3910 			sysfs_display_ring((void *)tx_q->dma_tx,
3911 					   DMA_TX_SIZE, 0, seq);
3912 		}
3913 	}
3914 
3915 	return 0;
3916 }
3917 
3918 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3919 {
3920 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3921 }
3922 
3923 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3924 
3925 static const struct file_operations stmmac_rings_status_fops = {
3926 	.owner = THIS_MODULE,
3927 	.open = stmmac_sysfs_ring_open,
3928 	.read = seq_read,
3929 	.llseek = seq_lseek,
3930 	.release = single_release,
3931 };
3932 
3933 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3934 {
3935 	struct net_device *dev = seq->private;
3936 	struct stmmac_priv *priv = netdev_priv(dev);
3937 
3938 	if (!priv->hw_cap_support) {
3939 		seq_printf(seq, "DMA HW features not supported\n");
3940 		return 0;
3941 	}
3942 
3943 	seq_printf(seq, "==============================\n");
3944 	seq_printf(seq, "\tDMA HW features\n");
3945 	seq_printf(seq, "==============================\n");
3946 
3947 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3948 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3949 	seq_printf(seq, "\t1000 Mbps: %s\n",
3950 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3951 	seq_printf(seq, "\tHalf duplex: %s\n",
3952 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3953 	seq_printf(seq, "\tHash Filter: %s\n",
3954 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3955 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3956 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3957 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3958 		   (priv->dma_cap.pcs) ? "Y" : "N");
3959 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3960 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3961 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3962 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3963 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3964 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3965 	seq_printf(seq, "\tRMON module: %s\n",
3966 		   (priv->dma_cap.rmon) ? "Y" : "N");
3967 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3968 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3969 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3970 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3971 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3972 		   (priv->dma_cap.eee) ? "Y" : "N");
3973 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3974 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3975 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3976 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3977 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3978 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3979 	} else {
3980 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3981 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3982 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3983 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3984 	}
3985 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3986 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3987 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3988 		   priv->dma_cap.number_rx_channel);
3989 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3990 		   priv->dma_cap.number_tx_channel);
3991 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3992 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3993 
3994 	return 0;
3995 }
3996 
3997 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3998 {
3999 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4000 }
4001 
4002 static const struct file_operations stmmac_dma_cap_fops = {
4003 	.owner = THIS_MODULE,
4004 	.open = stmmac_sysfs_dma_cap_open,
4005 	.read = seq_read,
4006 	.llseek = seq_lseek,
4007 	.release = single_release,
4008 };
4009 
4010 static int stmmac_init_fs(struct net_device *dev)
4011 {
4012 	struct stmmac_priv *priv = netdev_priv(dev);
4013 
4014 	/* Create per netdev entries */
4015 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4016 
4017 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4018 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4019 
4020 		return -ENOMEM;
4021 	}
4022 
4023 	/* Entry to report DMA RX/TX rings */
4024 	priv->dbgfs_rings_status =
4025 		debugfs_create_file("descriptors_status", 0444,
4026 				    priv->dbgfs_dir, dev,
4027 				    &stmmac_rings_status_fops);
4028 
4029 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4030 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4031 		debugfs_remove_recursive(priv->dbgfs_dir);
4032 
4033 		return -ENOMEM;
4034 	}
4035 
4036 	/* Entry to report the DMA HW features */
4037 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4038 						  priv->dbgfs_dir,
4039 						  dev, &stmmac_dma_cap_fops);
4040 
4041 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4042 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4043 		debugfs_remove_recursive(priv->dbgfs_dir);
4044 
4045 		return -ENOMEM;
4046 	}
4047 
4048 	return 0;
4049 }
4050 
4051 static void stmmac_exit_fs(struct net_device *dev)
4052 {
4053 	struct stmmac_priv *priv = netdev_priv(dev);
4054 
4055 	debugfs_remove_recursive(priv->dbgfs_dir);
4056 }
4057 #endif /* CONFIG_DEBUG_FS */
4058 
4059 static const struct net_device_ops stmmac_netdev_ops = {
4060 	.ndo_open = stmmac_open,
4061 	.ndo_start_xmit = stmmac_xmit,
4062 	.ndo_stop = stmmac_release,
4063 	.ndo_change_mtu = stmmac_change_mtu,
4064 	.ndo_fix_features = stmmac_fix_features,
4065 	.ndo_set_features = stmmac_set_features,
4066 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4067 	.ndo_tx_timeout = stmmac_tx_timeout,
4068 	.ndo_do_ioctl = stmmac_ioctl,
4069 	.ndo_setup_tc = stmmac_setup_tc,
4070 #ifdef CONFIG_NET_POLL_CONTROLLER
4071 	.ndo_poll_controller = stmmac_poll_controller,
4072 #endif
4073 	.ndo_set_mac_address = stmmac_set_mac_address,
4074 };
4075 
4076 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4077 {
4078 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4079 		return;
4080 	if (test_bit(STMMAC_DOWN, &priv->state))
4081 		return;
4082 
4083 	netdev_err(priv->dev, "Reset adapter.\n");
4084 
4085 	rtnl_lock();
4086 	netif_trans_update(priv->dev);
4087 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4088 		usleep_range(1000, 2000);
4089 
4090 	set_bit(STMMAC_DOWN, &priv->state);
4091 	dev_close(priv->dev);
4092 	dev_open(priv->dev);
4093 	clear_bit(STMMAC_DOWN, &priv->state);
4094 	clear_bit(STMMAC_RESETING, &priv->state);
4095 	rtnl_unlock();
4096 }
4097 
4098 static void stmmac_service_task(struct work_struct *work)
4099 {
4100 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4101 			service_task);
4102 
4103 	stmmac_reset_subtask(priv);
4104 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4105 }
4106 
4107 /**
4108  *  stmmac_hw_init - Init the MAC device
4109  *  @priv: driver private structure
4110  *  Description: this function is to configure the MAC device according to
4111  *  some platform parameters or the HW capability register. It prepares the
4112  *  driver to use either ring or chain modes and to setup either enhanced or
4113  *  normal descriptors.
4114  */
4115 static int stmmac_hw_init(struct stmmac_priv *priv)
4116 {
4117 	int ret;
4118 
4119 	/* dwmac-sun8i only work in chain mode */
4120 	if (priv->plat->has_sun8i)
4121 		chain_mode = 1;
4122 	priv->chain_mode = chain_mode;
4123 
4124 	/* Initialize HW Interface */
4125 	ret = stmmac_hwif_init(priv);
4126 	if (ret)
4127 		return ret;
4128 
4129 	/* Get the HW capability (new GMAC newer than 3.50a) */
4130 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4131 	if (priv->hw_cap_support) {
4132 		dev_info(priv->device, "DMA HW capability register supported\n");
4133 
4134 		/* We can override some gmac/dma configuration fields: e.g.
4135 		 * enh_desc, tx_coe (e.g. that are passed through the
4136 		 * platform) with the values from the HW capability
4137 		 * register (if supported).
4138 		 */
4139 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4140 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4141 		priv->hw->pmt = priv->plat->pmt;
4142 
4143 		/* TXCOE doesn't work in thresh DMA mode */
4144 		if (priv->plat->force_thresh_dma_mode)
4145 			priv->plat->tx_coe = 0;
4146 		else
4147 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4148 
4149 		/* In case of GMAC4 rx_coe is from HW cap register. */
4150 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4151 
4152 		if (priv->dma_cap.rx_coe_type2)
4153 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4154 		else if (priv->dma_cap.rx_coe_type1)
4155 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4156 
4157 	} else {
4158 		dev_info(priv->device, "No HW DMA feature register supported\n");
4159 	}
4160 
4161 	if (priv->plat->rx_coe) {
4162 		priv->hw->rx_csum = priv->plat->rx_coe;
4163 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4164 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4165 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4166 	}
4167 	if (priv->plat->tx_coe)
4168 		dev_info(priv->device, "TX Checksum insertion supported\n");
4169 
4170 	if (priv->plat->pmt) {
4171 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4172 		device_set_wakeup_capable(priv->device, 1);
4173 	}
4174 
4175 	if (priv->dma_cap.tsoen)
4176 		dev_info(priv->device, "TSO supported\n");
4177 
4178 	/* Run HW quirks, if any */
4179 	if (priv->hwif_quirks) {
4180 		ret = priv->hwif_quirks(priv);
4181 		if (ret)
4182 			return ret;
4183 	}
4184 
4185 	return 0;
4186 }
4187 
4188 /**
4189  * stmmac_dvr_probe
4190  * @device: device pointer
4191  * @plat_dat: platform data pointer
4192  * @res: stmmac resource pointer
4193  * Description: this is the main probe function used to
4194  * call the alloc_etherdev, allocate the priv structure.
4195  * Return:
4196  * returns 0 on success, otherwise errno.
4197  */
4198 int stmmac_dvr_probe(struct device *device,
4199 		     struct plat_stmmacenet_data *plat_dat,
4200 		     struct stmmac_resources *res)
4201 {
4202 	struct net_device *ndev = NULL;
4203 	struct stmmac_priv *priv;
4204 	int ret = 0;
4205 	u32 queue;
4206 
4207 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4208 				  MTL_MAX_TX_QUEUES,
4209 				  MTL_MAX_RX_QUEUES);
4210 	if (!ndev)
4211 		return -ENOMEM;
4212 
4213 	SET_NETDEV_DEV(ndev, device);
4214 
4215 	priv = netdev_priv(ndev);
4216 	priv->device = device;
4217 	priv->dev = ndev;
4218 
4219 	stmmac_set_ethtool_ops(ndev);
4220 	priv->pause = pause;
4221 	priv->plat = plat_dat;
4222 	priv->ioaddr = res->addr;
4223 	priv->dev->base_addr = (unsigned long)res->addr;
4224 
4225 	priv->dev->irq = res->irq;
4226 	priv->wol_irq = res->wol_irq;
4227 	priv->lpi_irq = res->lpi_irq;
4228 
4229 	if (res->mac)
4230 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4231 
4232 	dev_set_drvdata(device, priv->dev);
4233 
4234 	/* Verify driver arguments */
4235 	stmmac_verify_args();
4236 
4237 	/* Allocate workqueue */
4238 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4239 	if (!priv->wq) {
4240 		dev_err(priv->device, "failed to create workqueue\n");
4241 		goto error_wq;
4242 	}
4243 
4244 	INIT_WORK(&priv->service_task, stmmac_service_task);
4245 
4246 	/* Override with kernel parameters if supplied XXX CRS XXX
4247 	 * this needs to have multiple instances
4248 	 */
4249 	if ((phyaddr >= 0) && (phyaddr <= 31))
4250 		priv->plat->phy_addr = phyaddr;
4251 
4252 	if (priv->plat->stmmac_rst) {
4253 		ret = reset_control_assert(priv->plat->stmmac_rst);
4254 		reset_control_deassert(priv->plat->stmmac_rst);
4255 		/* Some reset controllers have only reset callback instead of
4256 		 * assert + deassert callbacks pair.
4257 		 */
4258 		if (ret == -ENOTSUPP)
4259 			reset_control_reset(priv->plat->stmmac_rst);
4260 	}
4261 
4262 	/* Init MAC and get the capabilities */
4263 	ret = stmmac_hw_init(priv);
4264 	if (ret)
4265 		goto error_hw_init;
4266 
4267 	/* Configure real RX and TX queues */
4268 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4269 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4270 
4271 	ndev->netdev_ops = &stmmac_netdev_ops;
4272 
4273 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4274 			    NETIF_F_RXCSUM;
4275 
4276 	ret = stmmac_tc_init(priv, priv);
4277 	if (!ret) {
4278 		ndev->hw_features |= NETIF_F_HW_TC;
4279 	}
4280 
4281 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4282 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4283 		priv->tso = true;
4284 		dev_info(priv->device, "TSO feature enabled\n");
4285 	}
4286 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4287 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4288 #ifdef STMMAC_VLAN_TAG_USED
4289 	/* Both mac100 and gmac support receive VLAN tag detection */
4290 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4291 #endif
4292 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4293 
4294 	/* MTU range: 46 - hw-specific max */
4295 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4296 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4297 		ndev->max_mtu = JUMBO_LEN;
4298 	else if (priv->plat->has_xgmac)
4299 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4300 	else
4301 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4302 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4303 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4304 	 */
4305 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4306 	    (priv->plat->maxmtu >= ndev->min_mtu))
4307 		ndev->max_mtu = priv->plat->maxmtu;
4308 	else if (priv->plat->maxmtu < ndev->min_mtu)
4309 		dev_warn(priv->device,
4310 			 "%s: warning: maxmtu having invalid value (%d)\n",
4311 			 __func__, priv->plat->maxmtu);
4312 
4313 	if (flow_ctrl)
4314 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4315 
4316 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4317 	 * In some case, for example on bugged HW this feature
4318 	 * has to be disable and this can be done by passing the
4319 	 * riwt_off field from the platform.
4320 	 */
4321 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4322 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4323 		priv->use_riwt = 1;
4324 		dev_info(priv->device,
4325 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4326 	}
4327 
4328 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4329 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4330 
4331 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4332 			       (8 * priv->plat->rx_queues_to_use));
4333 	}
4334 
4335 	mutex_init(&priv->lock);
4336 
4337 	/* If a specific clk_csr value is passed from the platform
4338 	 * this means that the CSR Clock Range selection cannot be
4339 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4340 	 * set the MDC clock dynamically according to the csr actual
4341 	 * clock input.
4342 	 */
4343 	if (!priv->plat->clk_csr)
4344 		stmmac_clk_csr_set(priv);
4345 	else
4346 		priv->clk_csr = priv->plat->clk_csr;
4347 
4348 	stmmac_check_pcs_mode(priv);
4349 
4350 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4351 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4352 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4353 		/* MDIO bus Registration */
4354 		ret = stmmac_mdio_register(ndev);
4355 		if (ret < 0) {
4356 			dev_err(priv->device,
4357 				"%s: MDIO bus (id: %d) registration failed",
4358 				__func__, priv->plat->bus_id);
4359 			goto error_mdio_register;
4360 		}
4361 	}
4362 
4363 	ret = register_netdev(ndev);
4364 	if (ret) {
4365 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4366 			__func__, ret);
4367 		goto error_netdev_register;
4368 	}
4369 
4370 	return ret;
4371 
4372 error_netdev_register:
4373 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4374 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4375 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4376 		stmmac_mdio_unregister(ndev);
4377 error_mdio_register:
4378 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4379 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4380 
4381 		netif_napi_del(&rx_q->napi);
4382 	}
4383 error_hw_init:
4384 	destroy_workqueue(priv->wq);
4385 error_wq:
4386 	free_netdev(ndev);
4387 
4388 	return ret;
4389 }
4390 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4391 
4392 /**
4393  * stmmac_dvr_remove
4394  * @dev: device pointer
4395  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4396  * changes the link status, releases the DMA descriptor rings.
4397  */
4398 int stmmac_dvr_remove(struct device *dev)
4399 {
4400 	struct net_device *ndev = dev_get_drvdata(dev);
4401 	struct stmmac_priv *priv = netdev_priv(ndev);
4402 
4403 	netdev_info(priv->dev, "%s: removing driver", __func__);
4404 
4405 	stmmac_stop_all_dma(priv);
4406 
4407 	stmmac_mac_set(priv, priv->ioaddr, false);
4408 	netif_carrier_off(ndev);
4409 	unregister_netdev(ndev);
4410 	if (priv->plat->stmmac_rst)
4411 		reset_control_assert(priv->plat->stmmac_rst);
4412 	clk_disable_unprepare(priv->plat->pclk);
4413 	clk_disable_unprepare(priv->plat->stmmac_clk);
4414 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4415 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4416 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4417 		stmmac_mdio_unregister(ndev);
4418 	destroy_workqueue(priv->wq);
4419 	mutex_destroy(&priv->lock);
4420 	free_netdev(ndev);
4421 
4422 	return 0;
4423 }
4424 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4425 
4426 /**
4427  * stmmac_suspend - suspend callback
4428  * @dev: device pointer
4429  * Description: this is the function to suspend the device and it is called
4430  * by the platform driver to stop the network queue, release the resources,
4431  * program the PMT register (for WoL), clean and release driver resources.
4432  */
4433 int stmmac_suspend(struct device *dev)
4434 {
4435 	struct net_device *ndev = dev_get_drvdata(dev);
4436 	struct stmmac_priv *priv = netdev_priv(ndev);
4437 
4438 	if (!ndev || !netif_running(ndev))
4439 		return 0;
4440 
4441 	if (ndev->phydev)
4442 		phy_stop(ndev->phydev);
4443 
4444 	mutex_lock(&priv->lock);
4445 
4446 	netif_device_detach(ndev);
4447 	stmmac_stop_all_queues(priv);
4448 
4449 	stmmac_disable_all_queues(priv);
4450 
4451 	/* Stop TX/RX DMA */
4452 	stmmac_stop_all_dma(priv);
4453 
4454 	/* Enable Power down mode by programming the PMT regs */
4455 	if (device_may_wakeup(priv->device)) {
4456 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4457 		priv->irq_wake = 1;
4458 	} else {
4459 		stmmac_mac_set(priv, priv->ioaddr, false);
4460 		pinctrl_pm_select_sleep_state(priv->device);
4461 		/* Disable clock in case of PWM is off */
4462 		clk_disable(priv->plat->pclk);
4463 		clk_disable(priv->plat->stmmac_clk);
4464 	}
4465 	mutex_unlock(&priv->lock);
4466 
4467 	priv->oldlink = false;
4468 	priv->speed = SPEED_UNKNOWN;
4469 	priv->oldduplex = DUPLEX_UNKNOWN;
4470 	return 0;
4471 }
4472 EXPORT_SYMBOL_GPL(stmmac_suspend);
4473 
4474 /**
4475  * stmmac_reset_queues_param - reset queue parameters
4476  * @dev: device pointer
4477  */
4478 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4479 {
4480 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4481 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4482 	u32 queue;
4483 
4484 	for (queue = 0; queue < rx_cnt; queue++) {
4485 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4486 
4487 		rx_q->cur_rx = 0;
4488 		rx_q->dirty_rx = 0;
4489 	}
4490 
4491 	for (queue = 0; queue < tx_cnt; queue++) {
4492 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4493 
4494 		tx_q->cur_tx = 0;
4495 		tx_q->dirty_tx = 0;
4496 		tx_q->mss = 0;
4497 	}
4498 }
4499 
4500 /**
4501  * stmmac_resume - resume callback
4502  * @dev: device pointer
4503  * Description: when resume this function is invoked to setup the DMA and CORE
4504  * in a usable state.
4505  */
4506 int stmmac_resume(struct device *dev)
4507 {
4508 	struct net_device *ndev = dev_get_drvdata(dev);
4509 	struct stmmac_priv *priv = netdev_priv(ndev);
4510 
4511 	if (!netif_running(ndev))
4512 		return 0;
4513 
4514 	/* Power Down bit, into the PM register, is cleared
4515 	 * automatically as soon as a magic packet or a Wake-up frame
4516 	 * is received. Anyway, it's better to manually clear
4517 	 * this bit because it can generate problems while resuming
4518 	 * from another devices (e.g. serial console).
4519 	 */
4520 	if (device_may_wakeup(priv->device)) {
4521 		mutex_lock(&priv->lock);
4522 		stmmac_pmt(priv, priv->hw, 0);
4523 		mutex_unlock(&priv->lock);
4524 		priv->irq_wake = 0;
4525 	} else {
4526 		pinctrl_pm_select_default_state(priv->device);
4527 		/* enable the clk previously disabled */
4528 		clk_enable(priv->plat->stmmac_clk);
4529 		clk_enable(priv->plat->pclk);
4530 		/* reset the phy so that it's ready */
4531 		if (priv->mii)
4532 			stmmac_mdio_reset(priv->mii);
4533 	}
4534 
4535 	netif_device_attach(ndev);
4536 
4537 	mutex_lock(&priv->lock);
4538 
4539 	stmmac_reset_queues_param(priv);
4540 
4541 	stmmac_clear_descriptors(priv);
4542 
4543 	stmmac_hw_setup(ndev, false);
4544 	stmmac_init_tx_coalesce(priv);
4545 	stmmac_set_rx_mode(ndev);
4546 
4547 	stmmac_enable_all_queues(priv);
4548 
4549 	stmmac_start_all_queues(priv);
4550 
4551 	mutex_unlock(&priv->lock);
4552 
4553 	if (ndev->phydev)
4554 		phy_start(ndev->phydev);
4555 
4556 	return 0;
4557 }
4558 EXPORT_SYMBOL_GPL(stmmac_resume);
4559 
4560 #ifndef MODULE
4561 static int __init stmmac_cmdline_opt(char *str)
4562 {
4563 	char *opt;
4564 
4565 	if (!str || !*str)
4566 		return -EINVAL;
4567 	while ((opt = strsep(&str, ",")) != NULL) {
4568 		if (!strncmp(opt, "debug:", 6)) {
4569 			if (kstrtoint(opt + 6, 0, &debug))
4570 				goto err;
4571 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4572 			if (kstrtoint(opt + 8, 0, &phyaddr))
4573 				goto err;
4574 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4575 			if (kstrtoint(opt + 7, 0, &buf_sz))
4576 				goto err;
4577 		} else if (!strncmp(opt, "tc:", 3)) {
4578 			if (kstrtoint(opt + 3, 0, &tc))
4579 				goto err;
4580 		} else if (!strncmp(opt, "watchdog:", 9)) {
4581 			if (kstrtoint(opt + 9, 0, &watchdog))
4582 				goto err;
4583 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4584 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4585 				goto err;
4586 		} else if (!strncmp(opt, "pause:", 6)) {
4587 			if (kstrtoint(opt + 6, 0, &pause))
4588 				goto err;
4589 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4590 			if (kstrtoint(opt + 10, 0, &eee_timer))
4591 				goto err;
4592 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4593 			if (kstrtoint(opt + 11, 0, &chain_mode))
4594 				goto err;
4595 		}
4596 	}
4597 	return 0;
4598 
4599 err:
4600 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4601 	return -EINVAL;
4602 }
4603 
4604 __setup("stmmaceth=", stmmac_cmdline_opt);
4605 #endif /* MODULE */
4606 
4607 static int __init stmmac_init(void)
4608 {
4609 #ifdef CONFIG_DEBUG_FS
4610 	/* Create debugfs main directory if it doesn't exist yet */
4611 	if (!stmmac_fs_dir) {
4612 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4613 
4614 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4615 			pr_err("ERROR %s, debugfs create directory failed\n",
4616 			       STMMAC_RESOURCE_NAME);
4617 
4618 			return -ENOMEM;
4619 		}
4620 	}
4621 #endif
4622 
4623 	return 0;
4624 }
4625 
4626 static void __exit stmmac_exit(void)
4627 {
4628 #ifdef CONFIG_DEBUG_FS
4629 	debugfs_remove_recursive(stmmac_fs_dir);
4630 #endif
4631 }
4632 
4633 module_init(stmmac_init)
4634 module_exit(stmmac_exit)
4635 
4636 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4637 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4638 MODULE_LICENSE("GPL");
4639