xref: /openbmc/linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <linux/udp.h>
40 #include <net/pkt_cls.h>
41 #include "stmmac_ptp.h"
42 #include "stmmac.h"
43 #include <linux/reset.h>
44 #include <linux/of_mdio.h>
45 #include "dwmac1000.h"
46 #include "dwxgmac2.h"
47 #include "hwif.h"
48 
49 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
51 
52 /* Module parameters */
53 #define TX_TIMEO	5000
54 static int watchdog = TX_TIMEO;
55 module_param(watchdog, int, 0644);
56 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
57 
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
61 
62 static int phyaddr = -1;
63 module_param(phyaddr, int, 0444);
64 MODULE_PARM_DESC(phyaddr, "Physical device address");
65 
66 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
67 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
68 
69 static int flow_ctrl = FLOW_AUTO;
70 module_param(flow_ctrl, int, 0644);
71 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
72 
73 static int pause = PAUSE_TIME;
74 module_param(pause, int, 0644);
75 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
76 
77 #define TC_DEFAULT 64
78 static int tc = TC_DEFAULT;
79 module_param(tc, int, 0644);
80 MODULE_PARM_DESC(tc, "DMA threshold control value");
81 
82 #define	DEFAULT_BUFSIZE	1536
83 static int buf_sz = DEFAULT_BUFSIZE;
84 module_param(buf_sz, int, 0644);
85 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
86 
87 #define	STMMAC_RX_COPYBREAK	256
88 
89 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
91 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
92 
93 #define STMMAC_DEFAULT_LPI_TIMER	1000
94 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95 module_param(eee_timer, int, 0644);
96 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
98 
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
100  * but allow user to force to use the chain instead of the ring
101  */
102 static unsigned int chain_mode;
103 module_param(chain_mode, int, 0444);
104 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
105 
106 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
107 
108 #ifdef CONFIG_DEBUG_FS
109 static const struct net_device_ops stmmac_netdev_ops;
110 static void stmmac_init_fs(struct net_device *dev);
111 static void stmmac_exit_fs(struct net_device *dev);
112 #endif
113 
114 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
115 
116 /**
117  * stmmac_verify_args - verify the driver parameters.
118  * Description: it checks the driver parameters and set a default in case of
119  * errors.
120  */
121 static void stmmac_verify_args(void)
122 {
123 	if (unlikely(watchdog < 0))
124 		watchdog = TX_TIMEO;
125 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126 		buf_sz = DEFAULT_BUFSIZE;
127 	if (unlikely(flow_ctrl > 1))
128 		flow_ctrl = FLOW_AUTO;
129 	else if (likely(flow_ctrl < 0))
130 		flow_ctrl = FLOW_OFF;
131 	if (unlikely((pause < 0) || (pause > 0xffff)))
132 		pause = PAUSE_TIME;
133 	if (eee_timer < 0)
134 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
135 }
136 
137 /**
138  * stmmac_disable_all_queues - Disable all queues
139  * @priv: driver private structure
140  */
141 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142 {
143 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
144 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
145 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146 	u32 queue;
147 
148 	for (queue = 0; queue < maxq; queue++) {
149 		struct stmmac_channel *ch = &priv->channel[queue];
150 
151 		if (queue < rx_queues_cnt)
152 			napi_disable(&ch->rx_napi);
153 		if (queue < tx_queues_cnt)
154 			napi_disable(&ch->tx_napi);
155 	}
156 }
157 
158 /**
159  * stmmac_enable_all_queues - Enable all queues
160  * @priv: driver private structure
161  */
162 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 {
164 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
166 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167 	u32 queue;
168 
169 	for (queue = 0; queue < maxq; queue++) {
170 		struct stmmac_channel *ch = &priv->channel[queue];
171 
172 		if (queue < rx_queues_cnt)
173 			napi_enable(&ch->rx_napi);
174 		if (queue < tx_queues_cnt)
175 			napi_enable(&ch->tx_napi);
176 	}
177 }
178 
179 /**
180  * stmmac_stop_all_queues - Stop all queues
181  * @priv: driver private structure
182  */
183 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
184 {
185 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
186 	u32 queue;
187 
188 	for (queue = 0; queue < tx_queues_cnt; queue++)
189 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
190 }
191 
192 /**
193  * stmmac_start_all_queues - Start all queues
194  * @priv: driver private structure
195  */
196 static void stmmac_start_all_queues(struct stmmac_priv *priv)
197 {
198 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
199 	u32 queue;
200 
201 	for (queue = 0; queue < tx_queues_cnt; queue++)
202 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
203 }
204 
205 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
206 {
207 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
208 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
209 		queue_work(priv->wq, &priv->service_task);
210 }
211 
212 static void stmmac_global_err(struct stmmac_priv *priv)
213 {
214 	netif_carrier_off(priv->dev);
215 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
216 	stmmac_service_event_schedule(priv);
217 }
218 
219 /**
220  * stmmac_clk_csr_set - dynamically set the MDC clock
221  * @priv: driver private structure
222  * Description: this is to dynamically set the MDC clock according to the csr
223  * clock input.
224  * Note:
225  *	If a specific clk_csr value is passed from the platform
226  *	this means that the CSR Clock Range selection cannot be
227  *	changed at run-time and it is fixed (as reported in the driver
228  *	documentation). Viceversa the driver will try to set the MDC
229  *	clock dynamically according to the actual clock input.
230  */
231 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
232 {
233 	u32 clk_rate;
234 
235 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
236 
237 	/* Platform provided default clk_csr would be assumed valid
238 	 * for all other cases except for the below mentioned ones.
239 	 * For values higher than the IEEE 802.3 specified frequency
240 	 * we can not estimate the proper divider as it is not known
241 	 * the frequency of clk_csr_i. So we do not change the default
242 	 * divider.
243 	 */
244 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
245 		if (clk_rate < CSR_F_35M)
246 			priv->clk_csr = STMMAC_CSR_20_35M;
247 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
248 			priv->clk_csr = STMMAC_CSR_35_60M;
249 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
250 			priv->clk_csr = STMMAC_CSR_60_100M;
251 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
252 			priv->clk_csr = STMMAC_CSR_100_150M;
253 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
254 			priv->clk_csr = STMMAC_CSR_150_250M;
255 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
256 			priv->clk_csr = STMMAC_CSR_250_300M;
257 	}
258 
259 	if (priv->plat->has_sun8i) {
260 		if (clk_rate > 160000000)
261 			priv->clk_csr = 0x03;
262 		else if (clk_rate > 80000000)
263 			priv->clk_csr = 0x02;
264 		else if (clk_rate > 40000000)
265 			priv->clk_csr = 0x01;
266 		else
267 			priv->clk_csr = 0;
268 	}
269 
270 	if (priv->plat->has_xgmac) {
271 		if (clk_rate > 400000000)
272 			priv->clk_csr = 0x5;
273 		else if (clk_rate > 350000000)
274 			priv->clk_csr = 0x4;
275 		else if (clk_rate > 300000000)
276 			priv->clk_csr = 0x3;
277 		else if (clk_rate > 250000000)
278 			priv->clk_csr = 0x2;
279 		else if (clk_rate > 150000000)
280 			priv->clk_csr = 0x1;
281 		else
282 			priv->clk_csr = 0x0;
283 	}
284 }
285 
286 static void print_pkt(unsigned char *buf, int len)
287 {
288 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
289 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
290 }
291 
292 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
293 {
294 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
295 	u32 avail;
296 
297 	if (tx_q->dirty_tx > tx_q->cur_tx)
298 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
299 	else
300 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
301 
302 	return avail;
303 }
304 
305 /**
306  * stmmac_rx_dirty - Get RX queue dirty
307  * @priv: driver private structure
308  * @queue: RX queue index
309  */
310 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
311 {
312 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
313 	u32 dirty;
314 
315 	if (rx_q->dirty_rx <= rx_q->cur_rx)
316 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
317 	else
318 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
319 
320 	return dirty;
321 }
322 
323 /**
324  * stmmac_enable_eee_mode - check and enter in LPI mode
325  * @priv: driver private structure
326  * Description: this function is to verify and enter in LPI mode in case of
327  * EEE.
328  */
329 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
330 {
331 	u32 tx_cnt = priv->plat->tx_queues_to_use;
332 	u32 queue;
333 
334 	/* check if all TX queues have the work finished */
335 	for (queue = 0; queue < tx_cnt; queue++) {
336 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
337 
338 		if (tx_q->dirty_tx != tx_q->cur_tx)
339 			return; /* still unfinished work */
340 	}
341 
342 	/* Check and enter in LPI mode */
343 	if (!priv->tx_path_in_lpi_mode)
344 		stmmac_set_eee_mode(priv, priv->hw,
345 				priv->plat->en_tx_lpi_clockgating);
346 }
347 
348 /**
349  * stmmac_disable_eee_mode - disable and exit from LPI mode
350  * @priv: driver private structure
351  * Description: this function is to exit and disable EEE in case of
352  * LPI state is true. This is called by the xmit.
353  */
354 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
355 {
356 	stmmac_reset_eee_mode(priv, priv->hw);
357 	del_timer_sync(&priv->eee_ctrl_timer);
358 	priv->tx_path_in_lpi_mode = false;
359 }
360 
361 /**
362  * stmmac_eee_ctrl_timer - EEE TX SW timer.
363  * @arg : data hook
364  * Description:
365  *  if there is no data transfer and if we are not in LPI state,
366  *  then MAC Transmitter can be moved to LPI state.
367  */
368 static void stmmac_eee_ctrl_timer(struct timer_list *t)
369 {
370 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
371 
372 	stmmac_enable_eee_mode(priv);
373 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
374 }
375 
376 /**
377  * stmmac_eee_init - init EEE
378  * @priv: driver private structure
379  * Description:
380  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
381  *  can also manage EEE, this function enable the LPI state and start related
382  *  timer.
383  */
384 bool stmmac_eee_init(struct stmmac_priv *priv)
385 {
386 	int tx_lpi_timer = priv->tx_lpi_timer;
387 
388 	/* Using PCS we cannot dial with the phy registers at this stage
389 	 * so we do not support extra feature like EEE.
390 	 */
391 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
392 	    priv->hw->pcs == STMMAC_PCS_RTBI)
393 		return false;
394 
395 	/* Check if MAC core supports the EEE feature. */
396 	if (!priv->dma_cap.eee)
397 		return false;
398 
399 	mutex_lock(&priv->lock);
400 
401 	/* Check if it needs to be deactivated */
402 	if (!priv->eee_active) {
403 		if (priv->eee_enabled) {
404 			netdev_dbg(priv->dev, "disable EEE\n");
405 			del_timer_sync(&priv->eee_ctrl_timer);
406 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407 		}
408 		mutex_unlock(&priv->lock);
409 		return false;
410 	}
411 
412 	if (priv->eee_active && !priv->eee_enabled) {
413 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
414 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
415 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
416 				     tx_lpi_timer);
417 	}
418 
419 	mutex_unlock(&priv->lock);
420 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 	return true;
422 }
423 
424 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
425  * @priv: driver private structure
426  * @p : descriptor pointer
427  * @skb : the socket buffer
428  * Description :
429  * This function will read timestamp from the descriptor & pass it to stack.
430  * and also perform some sanity checks.
431  */
432 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433 				   struct dma_desc *p, struct sk_buff *skb)
434 {
435 	struct skb_shared_hwtstamps shhwtstamp;
436 	bool found = false;
437 	u64 ns = 0;
438 
439 	if (!priv->hwts_tx_en)
440 		return;
441 
442 	/* exit if skb doesn't support hw tstamp */
443 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444 		return;
445 
446 	/* check tx tstamp status */
447 	if (stmmac_get_tx_timestamp_status(priv, p)) {
448 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
449 		found = true;
450 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
451 		found = true;
452 	}
453 
454 	if (found) {
455 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
457 
458 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459 		/* pass tstamp to stack */
460 		skb_tstamp_tx(skb, &shhwtstamp);
461 	}
462 }
463 
464 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
465  * @priv: driver private structure
466  * @p : descriptor pointer
467  * @np : next descriptor pointer
468  * @skb : the socket buffer
469  * Description :
470  * This function will read received packet's timestamp from the descriptor
471  * and pass it to stack. It also perform some sanity checks.
472  */
473 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474 				   struct dma_desc *np, struct sk_buff *skb)
475 {
476 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
477 	struct dma_desc *desc = p;
478 	u64 ns = 0;
479 
480 	if (!priv->hwts_rx_en)
481 		return;
482 	/* For GMAC4, the valid timestamp is from CTX next desc. */
483 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
484 		desc = np;
485 
486 	/* Check if timestamp is available */
487 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
488 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
489 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 		shhwtstamp = skb_hwtstamps(skb);
491 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 	} else  {
494 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495 	}
496 }
497 
498 /**
499  *  stmmac_hwtstamp_set - control hardware timestamping.
500  *  @dev: device pointer.
501  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502  *  a proprietary structure used to pass information to the driver.
503  *  Description:
504  *  This function configures the MAC to enable/disable both outgoing(TX)
505  *  and incoming(RX) packets time stamping based on user input.
506  *  Return Value:
507  *  0 on success and an appropriate -ve integer on failure.
508  */
509 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
510 {
511 	struct stmmac_priv *priv = netdev_priv(dev);
512 	struct hwtstamp_config config;
513 	struct timespec64 now;
514 	u64 temp = 0;
515 	u32 ptp_v2 = 0;
516 	u32 tstamp_all = 0;
517 	u32 ptp_over_ipv4_udp = 0;
518 	u32 ptp_over_ipv6_udp = 0;
519 	u32 ptp_over_ethernet = 0;
520 	u32 snap_type_sel = 0;
521 	u32 ts_master_en = 0;
522 	u32 ts_event_en = 0;
523 	u32 sec_inc = 0;
524 	u32 value = 0;
525 	bool xmac;
526 
527 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
528 
529 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530 		netdev_alert(priv->dev, "No support for HW time stamping\n");
531 		priv->hwts_tx_en = 0;
532 		priv->hwts_rx_en = 0;
533 
534 		return -EOPNOTSUPP;
535 	}
536 
537 	if (copy_from_user(&config, ifr->ifr_data,
538 			   sizeof(config)))
539 		return -EFAULT;
540 
541 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542 		   __func__, config.flags, config.tx_type, config.rx_filter);
543 
544 	/* reserved for future extensions */
545 	if (config.flags)
546 		return -EINVAL;
547 
548 	if (config.tx_type != HWTSTAMP_TX_OFF &&
549 	    config.tx_type != HWTSTAMP_TX_ON)
550 		return -ERANGE;
551 
552 	if (priv->adv_ts) {
553 		switch (config.rx_filter) {
554 		case HWTSTAMP_FILTER_NONE:
555 			/* time stamp no incoming packet at all */
556 			config.rx_filter = HWTSTAMP_FILTER_NONE;
557 			break;
558 
559 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560 			/* PTP v1, UDP, any kind of event packet */
561 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
562 			/* 'xmac' hardware can support Sync, Pdelay_Req and
563 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
564 			 * This leaves Delay_Req timestamps out.
565 			 * Enable all events *and* general purpose message
566 			 * timestamping
567 			 */
568 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571 			break;
572 
573 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574 			/* PTP v1, UDP, Sync packet */
575 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576 			/* take time stamp for SYNC messages only */
577 			ts_event_en = PTP_TCR_TSEVNTENA;
578 
579 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581 			break;
582 
583 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584 			/* PTP v1, UDP, Delay_req packet */
585 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586 			/* take time stamp for Delay_Req messages only */
587 			ts_master_en = PTP_TCR_TSMSTRENA;
588 			ts_event_en = PTP_TCR_TSEVNTENA;
589 
590 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 			break;
593 
594 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595 			/* PTP v2, UDP, any kind of event packet */
596 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 			ptp_v2 = PTP_TCR_TSVER2ENA;
598 			/* take time stamp for all event messages */
599 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
600 
601 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603 			break;
604 
605 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606 			/* PTP v2, UDP, Sync packet */
607 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608 			ptp_v2 = PTP_TCR_TSVER2ENA;
609 			/* take time stamp for SYNC messages only */
610 			ts_event_en = PTP_TCR_TSEVNTENA;
611 
612 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 			break;
615 
616 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617 			/* PTP v2, UDP, Delay_req packet */
618 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619 			ptp_v2 = PTP_TCR_TSVER2ENA;
620 			/* take time stamp for Delay_Req messages only */
621 			ts_master_en = PTP_TCR_TSMSTRENA;
622 			ts_event_en = PTP_TCR_TSEVNTENA;
623 
624 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626 			break;
627 
628 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
629 			/* PTP v2/802.AS1 any layer, any kind of event packet */
630 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631 			ptp_v2 = PTP_TCR_TSVER2ENA;
632 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
633 			if (priv->synopsys_id != DWMAC_CORE_5_10)
634 				ts_event_en = PTP_TCR_TSEVNTENA;
635 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637 			ptp_over_ethernet = PTP_TCR_TSIPENA;
638 			break;
639 
640 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
641 			/* PTP v2/802.AS1, any layer, Sync packet */
642 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
643 			ptp_v2 = PTP_TCR_TSVER2ENA;
644 			/* take time stamp for SYNC messages only */
645 			ts_event_en = PTP_TCR_TSEVNTENA;
646 
647 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649 			ptp_over_ethernet = PTP_TCR_TSIPENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653 			/* PTP v2/802.AS1, any layer, Delay_req packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			ptp_over_ethernet = PTP_TCR_TSIPENA;
663 			break;
664 
665 		case HWTSTAMP_FILTER_NTP_ALL:
666 		case HWTSTAMP_FILTER_ALL:
667 			/* time stamp any incoming packet */
668 			config.rx_filter = HWTSTAMP_FILTER_ALL;
669 			tstamp_all = PTP_TCR_TSENALL;
670 			break;
671 
672 		default:
673 			return -ERANGE;
674 		}
675 	} else {
676 		switch (config.rx_filter) {
677 		case HWTSTAMP_FILTER_NONE:
678 			config.rx_filter = HWTSTAMP_FILTER_NONE;
679 			break;
680 		default:
681 			/* PTP v1, UDP, any kind of event packet */
682 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
683 			break;
684 		}
685 	}
686 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
687 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688 
689 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
690 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
691 	else {
692 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
693 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
694 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
695 			 ts_master_en | snap_type_sel);
696 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
697 
698 		/* program Sub Second Increment reg */
699 		stmmac_config_sub_second_increment(priv,
700 				priv->ptpaddr, priv->plat->clk_ptp_rate,
701 				xmac, &sec_inc);
702 		temp = div_u64(1000000000ULL, sec_inc);
703 
704 		/* Store sub second increment and flags for later use */
705 		priv->sub_second_inc = sec_inc;
706 		priv->systime_flags = value;
707 
708 		/* calculate default added value:
709 		 * formula is :
710 		 * addend = (2^32)/freq_div_ratio;
711 		 * where, freq_div_ratio = 1e9ns/sec_inc
712 		 */
713 		temp = (u64)(temp << 32);
714 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
715 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
716 
717 		/* initialize system time */
718 		ktime_get_real_ts64(&now);
719 
720 		/* lower 32 bits of tv_sec are safe until y2106 */
721 		stmmac_init_systime(priv, priv->ptpaddr,
722 				(u32)now.tv_sec, now.tv_nsec);
723 	}
724 
725 	memcpy(&priv->tstamp_config, &config, sizeof(config));
726 
727 	return copy_to_user(ifr->ifr_data, &config,
728 			    sizeof(config)) ? -EFAULT : 0;
729 }
730 
731 /**
732  *  stmmac_hwtstamp_get - read hardware timestamping.
733  *  @dev: device pointer.
734  *  @ifr: An IOCTL specific structure, that can contain a pointer to
735  *  a proprietary structure used to pass information to the driver.
736  *  Description:
737  *  This function obtain the current hardware timestamping settings
738     as requested.
739  */
740 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
741 {
742 	struct stmmac_priv *priv = netdev_priv(dev);
743 	struct hwtstamp_config *config = &priv->tstamp_config;
744 
745 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
746 		return -EOPNOTSUPP;
747 
748 	return copy_to_user(ifr->ifr_data, config,
749 			    sizeof(*config)) ? -EFAULT : 0;
750 }
751 
752 /**
753  * stmmac_init_ptp - init PTP
754  * @priv: driver private structure
755  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
756  * This is done by looking at the HW cap. register.
757  * This function also registers the ptp driver.
758  */
759 static int stmmac_init_ptp(struct stmmac_priv *priv)
760 {
761 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
762 
763 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
764 		return -EOPNOTSUPP;
765 
766 	priv->adv_ts = 0;
767 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
768 	if (xmac && priv->dma_cap.atime_stamp)
769 		priv->adv_ts = 1;
770 	/* Dwmac 3.x core with extend_desc can support adv_ts */
771 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
772 		priv->adv_ts = 1;
773 
774 	if (priv->dma_cap.time_stamp)
775 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
776 
777 	if (priv->adv_ts)
778 		netdev_info(priv->dev,
779 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
780 
781 	priv->hwts_tx_en = 0;
782 	priv->hwts_rx_en = 0;
783 
784 	stmmac_ptp_register(priv);
785 
786 	return 0;
787 }
788 
789 static void stmmac_release_ptp(struct stmmac_priv *priv)
790 {
791 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
792 	stmmac_ptp_unregister(priv);
793 }
794 
795 /**
796  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
797  *  @priv: driver private structure
798  *  Description: It is used for configuring the flow control in all queues
799  */
800 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
801 {
802 	u32 tx_cnt = priv->plat->tx_queues_to_use;
803 
804 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
805 			priv->pause, tx_cnt);
806 }
807 
808 static void stmmac_validate(struct phylink_config *config,
809 			    unsigned long *supported,
810 			    struct phylink_link_state *state)
811 {
812 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
813 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815 	int tx_cnt = priv->plat->tx_queues_to_use;
816 	int max_speed = priv->plat->max_speed;
817 
818 	phylink_set(mac_supported, 10baseT_Half);
819 	phylink_set(mac_supported, 10baseT_Full);
820 	phylink_set(mac_supported, 100baseT_Half);
821 	phylink_set(mac_supported, 100baseT_Full);
822 	phylink_set(mac_supported, 1000baseT_Half);
823 	phylink_set(mac_supported, 1000baseT_Full);
824 	phylink_set(mac_supported, 1000baseKX_Full);
825 
826 	phylink_set(mac_supported, Autoneg);
827 	phylink_set(mac_supported, Pause);
828 	phylink_set(mac_supported, Asym_Pause);
829 	phylink_set_port_modes(mac_supported);
830 
831 	/* Cut down 1G if asked to */
832 	if ((max_speed > 0) && (max_speed < 1000)) {
833 		phylink_set(mask, 1000baseT_Full);
834 		phylink_set(mask, 1000baseX_Full);
835 	} else if (priv->plat->has_xgmac) {
836 		if (!max_speed || (max_speed >= 2500)) {
837 			phylink_set(mac_supported, 2500baseT_Full);
838 			phylink_set(mac_supported, 2500baseX_Full);
839 		}
840 		if (!max_speed || (max_speed >= 5000)) {
841 			phylink_set(mac_supported, 5000baseT_Full);
842 		}
843 		if (!max_speed || (max_speed >= 10000)) {
844 			phylink_set(mac_supported, 10000baseSR_Full);
845 			phylink_set(mac_supported, 10000baseLR_Full);
846 			phylink_set(mac_supported, 10000baseER_Full);
847 			phylink_set(mac_supported, 10000baseLRM_Full);
848 			phylink_set(mac_supported, 10000baseT_Full);
849 			phylink_set(mac_supported, 10000baseKX4_Full);
850 			phylink_set(mac_supported, 10000baseKR_Full);
851 		}
852 		if (!max_speed || (max_speed >= 25000)) {
853 			phylink_set(mac_supported, 25000baseCR_Full);
854 			phylink_set(mac_supported, 25000baseKR_Full);
855 			phylink_set(mac_supported, 25000baseSR_Full);
856 		}
857 		if (!max_speed || (max_speed >= 40000)) {
858 			phylink_set(mac_supported, 40000baseKR4_Full);
859 			phylink_set(mac_supported, 40000baseCR4_Full);
860 			phylink_set(mac_supported, 40000baseSR4_Full);
861 			phylink_set(mac_supported, 40000baseLR4_Full);
862 		}
863 		if (!max_speed || (max_speed >= 50000)) {
864 			phylink_set(mac_supported, 50000baseCR2_Full);
865 			phylink_set(mac_supported, 50000baseKR2_Full);
866 			phylink_set(mac_supported, 50000baseSR2_Full);
867 			phylink_set(mac_supported, 50000baseKR_Full);
868 			phylink_set(mac_supported, 50000baseSR_Full);
869 			phylink_set(mac_supported, 50000baseCR_Full);
870 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
871 			phylink_set(mac_supported, 50000baseDR_Full);
872 		}
873 		if (!max_speed || (max_speed >= 100000)) {
874 			phylink_set(mac_supported, 100000baseKR4_Full);
875 			phylink_set(mac_supported, 100000baseSR4_Full);
876 			phylink_set(mac_supported, 100000baseCR4_Full);
877 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
878 			phylink_set(mac_supported, 100000baseKR2_Full);
879 			phylink_set(mac_supported, 100000baseSR2_Full);
880 			phylink_set(mac_supported, 100000baseCR2_Full);
881 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
882 			phylink_set(mac_supported, 100000baseDR2_Full);
883 		}
884 	}
885 
886 	/* Half-Duplex can only work with single queue */
887 	if (tx_cnt > 1) {
888 		phylink_set(mask, 10baseT_Half);
889 		phylink_set(mask, 100baseT_Half);
890 		phylink_set(mask, 1000baseT_Half);
891 	}
892 
893 	linkmode_and(supported, supported, mac_supported);
894 	linkmode_andnot(supported, supported, mask);
895 
896 	linkmode_and(state->advertising, state->advertising, mac_supported);
897 	linkmode_andnot(state->advertising, state->advertising, mask);
898 
899 	/* If PCS is supported, check which modes it supports. */
900 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
901 }
902 
903 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
904 				     struct phylink_link_state *state)
905 {
906 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907 
908 	state->link = 0;
909 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
910 }
911 
912 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
913 			      const struct phylink_link_state *state)
914 {
915 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
916 
917 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
918 }
919 
920 static void stmmac_mac_an_restart(struct phylink_config *config)
921 {
922 	/* Not Supported */
923 }
924 
925 static void stmmac_mac_link_down(struct phylink_config *config,
926 				 unsigned int mode, phy_interface_t interface)
927 {
928 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
929 
930 	stmmac_mac_set(priv, priv->ioaddr, false);
931 	priv->eee_active = false;
932 	stmmac_eee_init(priv);
933 	stmmac_set_eee_pls(priv, priv->hw, false);
934 }
935 
936 static void stmmac_mac_link_up(struct phylink_config *config,
937 			       struct phy_device *phy,
938 			       unsigned int mode, phy_interface_t interface,
939 			       int speed, int duplex,
940 			       bool tx_pause, bool rx_pause)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 	u32 ctrl;
944 
945 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
946 
947 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
948 	ctrl &= ~priv->hw->link.speed_mask;
949 
950 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
951 		switch (speed) {
952 		case SPEED_10000:
953 			ctrl |= priv->hw->link.xgmii.speed10000;
954 			break;
955 		case SPEED_5000:
956 			ctrl |= priv->hw->link.xgmii.speed5000;
957 			break;
958 		case SPEED_2500:
959 			ctrl |= priv->hw->link.xgmii.speed2500;
960 			break;
961 		default:
962 			return;
963 		}
964 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
965 		switch (speed) {
966 		case SPEED_100000:
967 			ctrl |= priv->hw->link.xlgmii.speed100000;
968 			break;
969 		case SPEED_50000:
970 			ctrl |= priv->hw->link.xlgmii.speed50000;
971 			break;
972 		case SPEED_40000:
973 			ctrl |= priv->hw->link.xlgmii.speed40000;
974 			break;
975 		case SPEED_25000:
976 			ctrl |= priv->hw->link.xlgmii.speed25000;
977 			break;
978 		case SPEED_10000:
979 			ctrl |= priv->hw->link.xgmii.speed10000;
980 			break;
981 		case SPEED_2500:
982 			ctrl |= priv->hw->link.speed2500;
983 			break;
984 		case SPEED_1000:
985 			ctrl |= priv->hw->link.speed1000;
986 			break;
987 		default:
988 			return;
989 		}
990 	} else {
991 		switch (speed) {
992 		case SPEED_2500:
993 			ctrl |= priv->hw->link.speed2500;
994 			break;
995 		case SPEED_1000:
996 			ctrl |= priv->hw->link.speed1000;
997 			break;
998 		case SPEED_100:
999 			ctrl |= priv->hw->link.speed100;
1000 			break;
1001 		case SPEED_10:
1002 			ctrl |= priv->hw->link.speed10;
1003 			break;
1004 		default:
1005 			return;
1006 		}
1007 	}
1008 
1009 	priv->speed = speed;
1010 
1011 	if (priv->plat->fix_mac_speed)
1012 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1013 
1014 	if (!duplex)
1015 		ctrl &= ~priv->hw->link.duplex;
1016 	else
1017 		ctrl |= priv->hw->link.duplex;
1018 
1019 	/* Flow Control operation */
1020 	if (tx_pause && rx_pause)
1021 		stmmac_mac_flow_ctrl(priv, duplex);
1022 
1023 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1024 
1025 	stmmac_mac_set(priv, priv->ioaddr, true);
1026 	if (phy && priv->dma_cap.eee) {
1027 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1028 		priv->eee_enabled = stmmac_eee_init(priv);
1029 		stmmac_set_eee_pls(priv, priv->hw, true);
1030 	}
1031 }
1032 
1033 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1034 	.validate = stmmac_validate,
1035 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
1036 	.mac_config = stmmac_mac_config,
1037 	.mac_an_restart = stmmac_mac_an_restart,
1038 	.mac_link_down = stmmac_mac_link_down,
1039 	.mac_link_up = stmmac_mac_link_up,
1040 };
1041 
1042 /**
1043  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1044  * @priv: driver private structure
1045  * Description: this is to verify if the HW supports the PCS.
1046  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1047  * configured for the TBI, RTBI, or SGMII PHY interface.
1048  */
1049 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1050 {
1051 	int interface = priv->plat->interface;
1052 
1053 	if (priv->dma_cap.pcs) {
1054 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1055 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1056 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1057 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1058 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1059 			priv->hw->pcs = STMMAC_PCS_RGMII;
1060 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1061 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1062 			priv->hw->pcs = STMMAC_PCS_SGMII;
1063 		}
1064 	}
1065 }
1066 
1067 /**
1068  * stmmac_init_phy - PHY initialization
1069  * @dev: net device structure
1070  * Description: it initializes the driver's PHY state, and attaches the PHY
1071  * to the mac driver.
1072  *  Return value:
1073  *  0 on success
1074  */
1075 static int stmmac_init_phy(struct net_device *dev)
1076 {
1077 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1078 	struct stmmac_priv *priv = netdev_priv(dev);
1079 	struct device_node *node;
1080 	int ret;
1081 
1082 	node = priv->plat->phylink_node;
1083 
1084 	if (node)
1085 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1086 
1087 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1088 	 * manually parse it
1089 	 */
1090 	if (!node || ret) {
1091 		int addr = priv->plat->phy_addr;
1092 		struct phy_device *phydev;
1093 
1094 		phydev = mdiobus_get_phy(priv->mii, addr);
1095 		if (!phydev) {
1096 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1097 			return -ENODEV;
1098 		}
1099 
1100 		ret = phylink_connect_phy(priv->phylink, phydev);
1101 	}
1102 
1103 	phylink_ethtool_get_wol(priv->phylink, &wol);
1104 	device_set_wakeup_capable(priv->device, !!wol.supported);
1105 
1106 	return ret;
1107 }
1108 
1109 static int stmmac_phy_setup(struct stmmac_priv *priv)
1110 {
1111 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1112 	int mode = priv->plat->phy_interface;
1113 	struct phylink *phylink;
1114 
1115 	priv->phylink_config.dev = &priv->dev->dev;
1116 	priv->phylink_config.type = PHYLINK_NETDEV;
1117 	priv->phylink_config.pcs_poll = true;
1118 
1119 	if (!fwnode)
1120 		fwnode = dev_fwnode(priv->device);
1121 
1122 	phylink = phylink_create(&priv->phylink_config, fwnode,
1123 				 mode, &stmmac_phylink_mac_ops);
1124 	if (IS_ERR(phylink))
1125 		return PTR_ERR(phylink);
1126 
1127 	priv->phylink = phylink;
1128 	return 0;
1129 }
1130 
1131 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1132 {
1133 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1134 	void *head_rx;
1135 	u32 queue;
1136 
1137 	/* Display RX rings */
1138 	for (queue = 0; queue < rx_cnt; queue++) {
1139 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1140 
1141 		pr_info("\tRX Queue %u rings\n", queue);
1142 
1143 		if (priv->extend_desc)
1144 			head_rx = (void *)rx_q->dma_erx;
1145 		else
1146 			head_rx = (void *)rx_q->dma_rx;
1147 
1148 		/* Display RX ring */
1149 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1150 	}
1151 }
1152 
1153 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1154 {
1155 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1156 	void *head_tx;
1157 	u32 queue;
1158 
1159 	/* Display TX rings */
1160 	for (queue = 0; queue < tx_cnt; queue++) {
1161 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1162 
1163 		pr_info("\tTX Queue %d rings\n", queue);
1164 
1165 		if (priv->extend_desc)
1166 			head_tx = (void *)tx_q->dma_etx;
1167 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1168 			head_tx = (void *)tx_q->dma_entx;
1169 		else
1170 			head_tx = (void *)tx_q->dma_tx;
1171 
1172 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1173 	}
1174 }
1175 
1176 static void stmmac_display_rings(struct stmmac_priv *priv)
1177 {
1178 	/* Display RX ring */
1179 	stmmac_display_rx_rings(priv);
1180 
1181 	/* Display TX ring */
1182 	stmmac_display_tx_rings(priv);
1183 }
1184 
1185 static int stmmac_set_bfsize(int mtu, int bufsize)
1186 {
1187 	int ret = bufsize;
1188 
1189 	if (mtu >= BUF_SIZE_8KiB)
1190 		ret = BUF_SIZE_16KiB;
1191 	else if (mtu >= BUF_SIZE_4KiB)
1192 		ret = BUF_SIZE_8KiB;
1193 	else if (mtu >= BUF_SIZE_2KiB)
1194 		ret = BUF_SIZE_4KiB;
1195 	else if (mtu > DEFAULT_BUFSIZE)
1196 		ret = BUF_SIZE_2KiB;
1197 	else
1198 		ret = DEFAULT_BUFSIZE;
1199 
1200 	return ret;
1201 }
1202 
1203 /**
1204  * stmmac_clear_rx_descriptors - clear RX descriptors
1205  * @priv: driver private structure
1206  * @queue: RX queue index
1207  * Description: this function is called to clear the RX descriptors
1208  * in case of both basic and extended descriptors are used.
1209  */
1210 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1211 {
1212 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1213 	int i;
1214 
1215 	/* Clear the RX descriptors */
1216 	for (i = 0; i < DMA_RX_SIZE; i++)
1217 		if (priv->extend_desc)
1218 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1219 					priv->use_riwt, priv->mode,
1220 					(i == DMA_RX_SIZE - 1),
1221 					priv->dma_buf_sz);
1222 		else
1223 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1224 					priv->use_riwt, priv->mode,
1225 					(i == DMA_RX_SIZE - 1),
1226 					priv->dma_buf_sz);
1227 }
1228 
1229 /**
1230  * stmmac_clear_tx_descriptors - clear tx descriptors
1231  * @priv: driver private structure
1232  * @queue: TX queue index.
1233  * Description: this function is called to clear the TX descriptors
1234  * in case of both basic and extended descriptors are used.
1235  */
1236 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1237 {
1238 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1239 	int i;
1240 
1241 	/* Clear the TX descriptors */
1242 	for (i = 0; i < DMA_TX_SIZE; i++) {
1243 		int last = (i == (DMA_TX_SIZE - 1));
1244 		struct dma_desc *p;
1245 
1246 		if (priv->extend_desc)
1247 			p = &tx_q->dma_etx[i].basic;
1248 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1249 			p = &tx_q->dma_entx[i].basic;
1250 		else
1251 			p = &tx_q->dma_tx[i];
1252 
1253 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1254 	}
1255 }
1256 
1257 /**
1258  * stmmac_clear_descriptors - clear descriptors
1259  * @priv: driver private structure
1260  * Description: this function is called to clear the TX and RX descriptors
1261  * in case of both basic and extended descriptors are used.
1262  */
1263 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1264 {
1265 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1266 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1267 	u32 queue;
1268 
1269 	/* Clear the RX descriptors */
1270 	for (queue = 0; queue < rx_queue_cnt; queue++)
1271 		stmmac_clear_rx_descriptors(priv, queue);
1272 
1273 	/* Clear the TX descriptors */
1274 	for (queue = 0; queue < tx_queue_cnt; queue++)
1275 		stmmac_clear_tx_descriptors(priv, queue);
1276 }
1277 
1278 /**
1279  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1280  * @priv: driver private structure
1281  * @p: descriptor pointer
1282  * @i: descriptor index
1283  * @flags: gfp flag
1284  * @queue: RX queue index
1285  * Description: this function is called to allocate a receive buffer, perform
1286  * the DMA mapping and init the descriptor.
1287  */
1288 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1289 				  int i, gfp_t flags, u32 queue)
1290 {
1291 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1292 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1293 
1294 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1295 	if (!buf->page)
1296 		return -ENOMEM;
1297 
1298 	if (priv->sph) {
1299 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1300 		if (!buf->sec_page)
1301 			return -ENOMEM;
1302 
1303 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1304 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1305 	} else {
1306 		buf->sec_page = NULL;
1307 	}
1308 
1309 	buf->addr = page_pool_get_dma_addr(buf->page);
1310 	stmmac_set_desc_addr(priv, p, buf->addr);
1311 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1312 		stmmac_init_desc3(priv, p);
1313 
1314 	return 0;
1315 }
1316 
1317 /**
1318  * stmmac_free_rx_buffer - free RX dma buffers
1319  * @priv: private structure
1320  * @queue: RX queue index
1321  * @i: buffer index.
1322  */
1323 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1324 {
1325 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1326 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1327 
1328 	if (buf->page)
1329 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1330 	buf->page = NULL;
1331 
1332 	if (buf->sec_page)
1333 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1334 	buf->sec_page = NULL;
1335 }
1336 
1337 /**
1338  * stmmac_free_tx_buffer - free RX dma buffers
1339  * @priv: private structure
1340  * @queue: RX queue index
1341  * @i: buffer index.
1342  */
1343 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1344 {
1345 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1346 
1347 	if (tx_q->tx_skbuff_dma[i].buf) {
1348 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1349 			dma_unmap_page(priv->device,
1350 				       tx_q->tx_skbuff_dma[i].buf,
1351 				       tx_q->tx_skbuff_dma[i].len,
1352 				       DMA_TO_DEVICE);
1353 		else
1354 			dma_unmap_single(priv->device,
1355 					 tx_q->tx_skbuff_dma[i].buf,
1356 					 tx_q->tx_skbuff_dma[i].len,
1357 					 DMA_TO_DEVICE);
1358 	}
1359 
1360 	if (tx_q->tx_skbuff[i]) {
1361 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1362 		tx_q->tx_skbuff[i] = NULL;
1363 		tx_q->tx_skbuff_dma[i].buf = 0;
1364 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1365 	}
1366 }
1367 
1368 /**
1369  * init_dma_rx_desc_rings - init the RX descriptor rings
1370  * @dev: net device structure
1371  * @flags: gfp flag.
1372  * Description: this function initializes the DMA RX descriptors
1373  * and allocates the socket buffers. It supports the chained and ring
1374  * modes.
1375  */
1376 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1377 {
1378 	struct stmmac_priv *priv = netdev_priv(dev);
1379 	u32 rx_count = priv->plat->rx_queues_to_use;
1380 	int ret = -ENOMEM;
1381 	int queue;
1382 	int i;
1383 
1384 	/* RX INITIALIZATION */
1385 	netif_dbg(priv, probe, priv->dev,
1386 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1387 
1388 	for (queue = 0; queue < rx_count; queue++) {
1389 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1390 
1391 		netif_dbg(priv, probe, priv->dev,
1392 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1393 			  (u32)rx_q->dma_rx_phy);
1394 
1395 		stmmac_clear_rx_descriptors(priv, queue);
1396 
1397 		for (i = 0; i < DMA_RX_SIZE; i++) {
1398 			struct dma_desc *p;
1399 
1400 			if (priv->extend_desc)
1401 				p = &((rx_q->dma_erx + i)->basic);
1402 			else
1403 				p = rx_q->dma_rx + i;
1404 
1405 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1406 						     queue);
1407 			if (ret)
1408 				goto err_init_rx_buffers;
1409 		}
1410 
1411 		rx_q->cur_rx = 0;
1412 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1413 
1414 		/* Setup the chained descriptor addresses */
1415 		if (priv->mode == STMMAC_CHAIN_MODE) {
1416 			if (priv->extend_desc)
1417 				stmmac_mode_init(priv, rx_q->dma_erx,
1418 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1419 			else
1420 				stmmac_mode_init(priv, rx_q->dma_rx,
1421 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1422 		}
1423 	}
1424 
1425 	return 0;
1426 
1427 err_init_rx_buffers:
1428 	while (queue >= 0) {
1429 		while (--i >= 0)
1430 			stmmac_free_rx_buffer(priv, queue, i);
1431 
1432 		if (queue == 0)
1433 			break;
1434 
1435 		i = DMA_RX_SIZE;
1436 		queue--;
1437 	}
1438 
1439 	return ret;
1440 }
1441 
1442 /**
1443  * init_dma_tx_desc_rings - init the TX descriptor rings
1444  * @dev: net device structure.
1445  * Description: this function initializes the DMA TX descriptors
1446  * and allocates the socket buffers. It supports the chained and ring
1447  * modes.
1448  */
1449 static int init_dma_tx_desc_rings(struct net_device *dev)
1450 {
1451 	struct stmmac_priv *priv = netdev_priv(dev);
1452 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1453 	u32 queue;
1454 	int i;
1455 
1456 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1457 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1458 
1459 		netif_dbg(priv, probe, priv->dev,
1460 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1461 			 (u32)tx_q->dma_tx_phy);
1462 
1463 		/* Setup the chained descriptor addresses */
1464 		if (priv->mode == STMMAC_CHAIN_MODE) {
1465 			if (priv->extend_desc)
1466 				stmmac_mode_init(priv, tx_q->dma_etx,
1467 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1468 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1469 				stmmac_mode_init(priv, tx_q->dma_tx,
1470 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1471 		}
1472 
1473 		for (i = 0; i < DMA_TX_SIZE; i++) {
1474 			struct dma_desc *p;
1475 			if (priv->extend_desc)
1476 				p = &((tx_q->dma_etx + i)->basic);
1477 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1478 				p = &((tx_q->dma_entx + i)->basic);
1479 			else
1480 				p = tx_q->dma_tx + i;
1481 
1482 			stmmac_clear_desc(priv, p);
1483 
1484 			tx_q->tx_skbuff_dma[i].buf = 0;
1485 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1486 			tx_q->tx_skbuff_dma[i].len = 0;
1487 			tx_q->tx_skbuff_dma[i].last_segment = false;
1488 			tx_q->tx_skbuff[i] = NULL;
1489 		}
1490 
1491 		tx_q->dirty_tx = 0;
1492 		tx_q->cur_tx = 0;
1493 		tx_q->mss = 0;
1494 
1495 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 /**
1502  * init_dma_desc_rings - init the RX/TX descriptor rings
1503  * @dev: net device structure
1504  * @flags: gfp flag.
1505  * Description: this function initializes the DMA RX/TX descriptors
1506  * and allocates the socket buffers. It supports the chained and ring
1507  * modes.
1508  */
1509 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1510 {
1511 	struct stmmac_priv *priv = netdev_priv(dev);
1512 	int ret;
1513 
1514 	ret = init_dma_rx_desc_rings(dev, flags);
1515 	if (ret)
1516 		return ret;
1517 
1518 	ret = init_dma_tx_desc_rings(dev);
1519 
1520 	stmmac_clear_descriptors(priv);
1521 
1522 	if (netif_msg_hw(priv))
1523 		stmmac_display_rings(priv);
1524 
1525 	return ret;
1526 }
1527 
1528 /**
1529  * dma_free_rx_skbufs - free RX dma buffers
1530  * @priv: private structure
1531  * @queue: RX queue index
1532  */
1533 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1534 {
1535 	int i;
1536 
1537 	for (i = 0; i < DMA_RX_SIZE; i++)
1538 		stmmac_free_rx_buffer(priv, queue, i);
1539 }
1540 
1541 /**
1542  * dma_free_tx_skbufs - free TX dma buffers
1543  * @priv: private structure
1544  * @queue: TX queue index
1545  */
1546 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1547 {
1548 	int i;
1549 
1550 	for (i = 0; i < DMA_TX_SIZE; i++)
1551 		stmmac_free_tx_buffer(priv, queue, i);
1552 }
1553 
1554 /**
1555  * free_dma_rx_desc_resources - free RX dma desc resources
1556  * @priv: private structure
1557  */
1558 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1559 {
1560 	u32 rx_count = priv->plat->rx_queues_to_use;
1561 	u32 queue;
1562 
1563 	/* Free RX queue resources */
1564 	for (queue = 0; queue < rx_count; queue++) {
1565 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1566 
1567 		/* Release the DMA RX socket buffers */
1568 		dma_free_rx_skbufs(priv, queue);
1569 
1570 		/* Free DMA regions of consistent memory previously allocated */
1571 		if (!priv->extend_desc)
1572 			dma_free_coherent(priv->device,
1573 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1574 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1575 		else
1576 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1577 					  sizeof(struct dma_extended_desc),
1578 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1579 
1580 		kfree(rx_q->buf_pool);
1581 		if (rx_q->page_pool)
1582 			page_pool_destroy(rx_q->page_pool);
1583 	}
1584 }
1585 
1586 /**
1587  * free_dma_tx_desc_resources - free TX dma desc resources
1588  * @priv: private structure
1589  */
1590 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1591 {
1592 	u32 tx_count = priv->plat->tx_queues_to_use;
1593 	u32 queue;
1594 
1595 	/* Free TX queue resources */
1596 	for (queue = 0; queue < tx_count; queue++) {
1597 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1598 		size_t size;
1599 		void *addr;
1600 
1601 		/* Release the DMA TX socket buffers */
1602 		dma_free_tx_skbufs(priv, queue);
1603 
1604 		if (priv->extend_desc) {
1605 			size = sizeof(struct dma_extended_desc);
1606 			addr = tx_q->dma_etx;
1607 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1608 			size = sizeof(struct dma_edesc);
1609 			addr = tx_q->dma_entx;
1610 		} else {
1611 			size = sizeof(struct dma_desc);
1612 			addr = tx_q->dma_tx;
1613 		}
1614 
1615 		size *= DMA_TX_SIZE;
1616 
1617 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1618 
1619 		kfree(tx_q->tx_skbuff_dma);
1620 		kfree(tx_q->tx_skbuff);
1621 	}
1622 }
1623 
1624 /**
1625  * alloc_dma_rx_desc_resources - alloc RX resources.
1626  * @priv: private structure
1627  * Description: according to which descriptor can be used (extend or basic)
1628  * this function allocates the resources for TX and RX paths. In case of
1629  * reception, for example, it pre-allocated the RX socket buffer in order to
1630  * allow zero-copy mechanism.
1631  */
1632 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1633 {
1634 	u32 rx_count = priv->plat->rx_queues_to_use;
1635 	int ret = -ENOMEM;
1636 	u32 queue;
1637 
1638 	/* RX queues buffers and DMA */
1639 	for (queue = 0; queue < rx_count; queue++) {
1640 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1641 		struct page_pool_params pp_params = { 0 };
1642 		unsigned int num_pages;
1643 
1644 		rx_q->queue_index = queue;
1645 		rx_q->priv_data = priv;
1646 
1647 		pp_params.flags = PP_FLAG_DMA_MAP;
1648 		pp_params.pool_size = DMA_RX_SIZE;
1649 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1650 		pp_params.order = ilog2(num_pages);
1651 		pp_params.nid = dev_to_node(priv->device);
1652 		pp_params.dev = priv->device;
1653 		pp_params.dma_dir = DMA_FROM_DEVICE;
1654 
1655 		rx_q->page_pool = page_pool_create(&pp_params);
1656 		if (IS_ERR(rx_q->page_pool)) {
1657 			ret = PTR_ERR(rx_q->page_pool);
1658 			rx_q->page_pool = NULL;
1659 			goto err_dma;
1660 		}
1661 
1662 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1663 					 GFP_KERNEL);
1664 		if (!rx_q->buf_pool)
1665 			goto err_dma;
1666 
1667 		if (priv->extend_desc) {
1668 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1669 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1670 							   &rx_q->dma_rx_phy,
1671 							   GFP_KERNEL);
1672 			if (!rx_q->dma_erx)
1673 				goto err_dma;
1674 
1675 		} else {
1676 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1677 							  DMA_RX_SIZE * sizeof(struct dma_desc),
1678 							  &rx_q->dma_rx_phy,
1679 							  GFP_KERNEL);
1680 			if (!rx_q->dma_rx)
1681 				goto err_dma;
1682 		}
1683 	}
1684 
1685 	return 0;
1686 
1687 err_dma:
1688 	free_dma_rx_desc_resources(priv);
1689 
1690 	return ret;
1691 }
1692 
1693 /**
1694  * alloc_dma_tx_desc_resources - alloc TX resources.
1695  * @priv: private structure
1696  * Description: according to which descriptor can be used (extend or basic)
1697  * this function allocates the resources for TX and RX paths. In case of
1698  * reception, for example, it pre-allocated the RX socket buffer in order to
1699  * allow zero-copy mechanism.
1700  */
1701 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1702 {
1703 	u32 tx_count = priv->plat->tx_queues_to_use;
1704 	int ret = -ENOMEM;
1705 	u32 queue;
1706 
1707 	/* TX queues buffers and DMA */
1708 	for (queue = 0; queue < tx_count; queue++) {
1709 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1710 		size_t size;
1711 		void *addr;
1712 
1713 		tx_q->queue_index = queue;
1714 		tx_q->priv_data = priv;
1715 
1716 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1717 					      sizeof(*tx_q->tx_skbuff_dma),
1718 					      GFP_KERNEL);
1719 		if (!tx_q->tx_skbuff_dma)
1720 			goto err_dma;
1721 
1722 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1723 					  sizeof(struct sk_buff *),
1724 					  GFP_KERNEL);
1725 		if (!tx_q->tx_skbuff)
1726 			goto err_dma;
1727 
1728 		if (priv->extend_desc)
1729 			size = sizeof(struct dma_extended_desc);
1730 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1731 			size = sizeof(struct dma_edesc);
1732 		else
1733 			size = sizeof(struct dma_desc);
1734 
1735 		size *= DMA_TX_SIZE;
1736 
1737 		addr = dma_alloc_coherent(priv->device, size,
1738 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1739 		if (!addr)
1740 			goto err_dma;
1741 
1742 		if (priv->extend_desc)
1743 			tx_q->dma_etx = addr;
1744 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1745 			tx_q->dma_entx = addr;
1746 		else
1747 			tx_q->dma_tx = addr;
1748 	}
1749 
1750 	return 0;
1751 
1752 err_dma:
1753 	free_dma_tx_desc_resources(priv);
1754 	return ret;
1755 }
1756 
1757 /**
1758  * alloc_dma_desc_resources - alloc TX/RX resources.
1759  * @priv: private structure
1760  * Description: according to which descriptor can be used (extend or basic)
1761  * this function allocates the resources for TX and RX paths. In case of
1762  * reception, for example, it pre-allocated the RX socket buffer in order to
1763  * allow zero-copy mechanism.
1764  */
1765 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1766 {
1767 	/* RX Allocation */
1768 	int ret = alloc_dma_rx_desc_resources(priv);
1769 
1770 	if (ret)
1771 		return ret;
1772 
1773 	ret = alloc_dma_tx_desc_resources(priv);
1774 
1775 	return ret;
1776 }
1777 
1778 /**
1779  * free_dma_desc_resources - free dma desc resources
1780  * @priv: private structure
1781  */
1782 static void free_dma_desc_resources(struct stmmac_priv *priv)
1783 {
1784 	/* Release the DMA RX socket buffers */
1785 	free_dma_rx_desc_resources(priv);
1786 
1787 	/* Release the DMA TX socket buffers */
1788 	free_dma_tx_desc_resources(priv);
1789 }
1790 
1791 /**
1792  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1793  *  @priv: driver private structure
1794  *  Description: It is used for enabling the rx queues in the MAC
1795  */
1796 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1797 {
1798 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1799 	int queue;
1800 	u8 mode;
1801 
1802 	for (queue = 0; queue < rx_queues_count; queue++) {
1803 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1804 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1805 	}
1806 }
1807 
1808 /**
1809  * stmmac_start_rx_dma - start RX DMA channel
1810  * @priv: driver private structure
1811  * @chan: RX channel index
1812  * Description:
1813  * This starts a RX DMA channel
1814  */
1815 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1816 {
1817 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1818 	stmmac_start_rx(priv, priv->ioaddr, chan);
1819 }
1820 
1821 /**
1822  * stmmac_start_tx_dma - start TX DMA channel
1823  * @priv: driver private structure
1824  * @chan: TX channel index
1825  * Description:
1826  * This starts a TX DMA channel
1827  */
1828 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1829 {
1830 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1831 	stmmac_start_tx(priv, priv->ioaddr, chan);
1832 }
1833 
1834 /**
1835  * stmmac_stop_rx_dma - stop RX DMA channel
1836  * @priv: driver private structure
1837  * @chan: RX channel index
1838  * Description:
1839  * This stops a RX DMA channel
1840  */
1841 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1842 {
1843 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1844 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1845 }
1846 
1847 /**
1848  * stmmac_stop_tx_dma - stop TX DMA channel
1849  * @priv: driver private structure
1850  * @chan: TX channel index
1851  * Description:
1852  * This stops a TX DMA channel
1853  */
1854 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1855 {
1856 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1857 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1858 }
1859 
1860 /**
1861  * stmmac_start_all_dma - start all RX and TX DMA channels
1862  * @priv: driver private structure
1863  * Description:
1864  * This starts all the RX and TX DMA channels
1865  */
1866 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1867 {
1868 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1869 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1870 	u32 chan = 0;
1871 
1872 	for (chan = 0; chan < rx_channels_count; chan++)
1873 		stmmac_start_rx_dma(priv, chan);
1874 
1875 	for (chan = 0; chan < tx_channels_count; chan++)
1876 		stmmac_start_tx_dma(priv, chan);
1877 }
1878 
1879 /**
1880  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1881  * @priv: driver private structure
1882  * Description:
1883  * This stops the RX and TX DMA channels
1884  */
1885 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1886 {
1887 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1888 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1889 	u32 chan = 0;
1890 
1891 	for (chan = 0; chan < rx_channels_count; chan++)
1892 		stmmac_stop_rx_dma(priv, chan);
1893 
1894 	for (chan = 0; chan < tx_channels_count; chan++)
1895 		stmmac_stop_tx_dma(priv, chan);
1896 }
1897 
1898 /**
1899  *  stmmac_dma_operation_mode - HW DMA operation mode
1900  *  @priv: driver private structure
1901  *  Description: it is used for configuring the DMA operation mode register in
1902  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1903  */
1904 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1905 {
1906 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1907 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1908 	int rxfifosz = priv->plat->rx_fifo_size;
1909 	int txfifosz = priv->plat->tx_fifo_size;
1910 	u32 txmode = 0;
1911 	u32 rxmode = 0;
1912 	u32 chan = 0;
1913 	u8 qmode = 0;
1914 
1915 	if (rxfifosz == 0)
1916 		rxfifosz = priv->dma_cap.rx_fifo_size;
1917 	if (txfifosz == 0)
1918 		txfifosz = priv->dma_cap.tx_fifo_size;
1919 
1920 	/* Adjust for real per queue fifo size */
1921 	rxfifosz /= rx_channels_count;
1922 	txfifosz /= tx_channels_count;
1923 
1924 	if (priv->plat->force_thresh_dma_mode) {
1925 		txmode = tc;
1926 		rxmode = tc;
1927 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1928 		/*
1929 		 * In case of GMAC, SF mode can be enabled
1930 		 * to perform the TX COE in HW. This depends on:
1931 		 * 1) TX COE if actually supported
1932 		 * 2) There is no bugged Jumbo frame support
1933 		 *    that needs to not insert csum in the TDES.
1934 		 */
1935 		txmode = SF_DMA_MODE;
1936 		rxmode = SF_DMA_MODE;
1937 		priv->xstats.threshold = SF_DMA_MODE;
1938 	} else {
1939 		txmode = tc;
1940 		rxmode = SF_DMA_MODE;
1941 	}
1942 
1943 	/* configure all channels */
1944 	for (chan = 0; chan < rx_channels_count; chan++) {
1945 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1946 
1947 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1948 				rxfifosz, qmode);
1949 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1950 				chan);
1951 	}
1952 
1953 	for (chan = 0; chan < tx_channels_count; chan++) {
1954 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1955 
1956 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1957 				txfifosz, qmode);
1958 	}
1959 }
1960 
1961 /**
1962  * stmmac_tx_clean - to manage the transmission completion
1963  * @priv: driver private structure
1964  * @queue: TX queue index
1965  * Description: it reclaims the transmit resources after transmission completes.
1966  */
1967 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1968 {
1969 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1970 	unsigned int bytes_compl = 0, pkts_compl = 0;
1971 	unsigned int entry, count = 0;
1972 
1973 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1974 
1975 	priv->xstats.tx_clean++;
1976 
1977 	entry = tx_q->dirty_tx;
1978 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1979 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1980 		struct dma_desc *p;
1981 		int status;
1982 
1983 		if (priv->extend_desc)
1984 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1985 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1986 			p = &tx_q->dma_entx[entry].basic;
1987 		else
1988 			p = tx_q->dma_tx + entry;
1989 
1990 		status = stmmac_tx_status(priv, &priv->dev->stats,
1991 				&priv->xstats, p, priv->ioaddr);
1992 		/* Check if the descriptor is owned by the DMA */
1993 		if (unlikely(status & tx_dma_own))
1994 			break;
1995 
1996 		count++;
1997 
1998 		/* Make sure descriptor fields are read after reading
1999 		 * the own bit.
2000 		 */
2001 		dma_rmb();
2002 
2003 		/* Just consider the last segment and ...*/
2004 		if (likely(!(status & tx_not_ls))) {
2005 			/* ... verify the status error condition */
2006 			if (unlikely(status & tx_err)) {
2007 				priv->dev->stats.tx_errors++;
2008 			} else {
2009 				priv->dev->stats.tx_packets++;
2010 				priv->xstats.tx_pkt_n++;
2011 			}
2012 			stmmac_get_tx_hwtstamp(priv, p, skb);
2013 		}
2014 
2015 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2016 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2017 				dma_unmap_page(priv->device,
2018 					       tx_q->tx_skbuff_dma[entry].buf,
2019 					       tx_q->tx_skbuff_dma[entry].len,
2020 					       DMA_TO_DEVICE);
2021 			else
2022 				dma_unmap_single(priv->device,
2023 						 tx_q->tx_skbuff_dma[entry].buf,
2024 						 tx_q->tx_skbuff_dma[entry].len,
2025 						 DMA_TO_DEVICE);
2026 			tx_q->tx_skbuff_dma[entry].buf = 0;
2027 			tx_q->tx_skbuff_dma[entry].len = 0;
2028 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2029 		}
2030 
2031 		stmmac_clean_desc3(priv, tx_q, p);
2032 
2033 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2034 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2035 
2036 		if (likely(skb != NULL)) {
2037 			pkts_compl++;
2038 			bytes_compl += skb->len;
2039 			dev_consume_skb_any(skb);
2040 			tx_q->tx_skbuff[entry] = NULL;
2041 		}
2042 
2043 		stmmac_release_tx_desc(priv, p, priv->mode);
2044 
2045 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2046 	}
2047 	tx_q->dirty_tx = entry;
2048 
2049 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2050 				  pkts_compl, bytes_compl);
2051 
2052 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2053 								queue))) &&
2054 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2055 
2056 		netif_dbg(priv, tx_done, priv->dev,
2057 			  "%s: restart transmit\n", __func__);
2058 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2059 	}
2060 
2061 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2062 		stmmac_enable_eee_mode(priv);
2063 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2064 	}
2065 
2066 	/* We still have pending packets, let's call for a new scheduling */
2067 	if (tx_q->dirty_tx != tx_q->cur_tx)
2068 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2069 
2070 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2071 
2072 	return count;
2073 }
2074 
2075 /**
2076  * stmmac_tx_err - to manage the tx error
2077  * @priv: driver private structure
2078  * @chan: channel index
2079  * Description: it cleans the descriptors and restarts the transmission
2080  * in case of transmission errors.
2081  */
2082 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2083 {
2084 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2085 
2086 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2087 
2088 	stmmac_stop_tx_dma(priv, chan);
2089 	dma_free_tx_skbufs(priv, chan);
2090 	stmmac_clear_tx_descriptors(priv, chan);
2091 	tx_q->dirty_tx = 0;
2092 	tx_q->cur_tx = 0;
2093 	tx_q->mss = 0;
2094 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2095 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2096 			    tx_q->dma_tx_phy, chan);
2097 	stmmac_start_tx_dma(priv, chan);
2098 
2099 	priv->dev->stats.tx_errors++;
2100 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2101 }
2102 
2103 /**
2104  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2105  *  @priv: driver private structure
2106  *  @txmode: TX operating mode
2107  *  @rxmode: RX operating mode
2108  *  @chan: channel index
2109  *  Description: it is used for configuring of the DMA operation mode in
2110  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2111  *  mode.
2112  */
2113 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2114 					  u32 rxmode, u32 chan)
2115 {
2116 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2117 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2118 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2119 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2120 	int rxfifosz = priv->plat->rx_fifo_size;
2121 	int txfifosz = priv->plat->tx_fifo_size;
2122 
2123 	if (rxfifosz == 0)
2124 		rxfifosz = priv->dma_cap.rx_fifo_size;
2125 	if (txfifosz == 0)
2126 		txfifosz = priv->dma_cap.tx_fifo_size;
2127 
2128 	/* Adjust for real per queue fifo size */
2129 	rxfifosz /= rx_channels_count;
2130 	txfifosz /= tx_channels_count;
2131 
2132 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2133 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2134 }
2135 
2136 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2137 {
2138 	int ret;
2139 
2140 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2141 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2142 	if (ret && (ret != -EINVAL)) {
2143 		stmmac_global_err(priv);
2144 		return true;
2145 	}
2146 
2147 	return false;
2148 }
2149 
2150 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2151 {
2152 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2153 						 &priv->xstats, chan);
2154 	struct stmmac_channel *ch = &priv->channel[chan];
2155 	unsigned long flags;
2156 
2157 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2158 		if (napi_schedule_prep(&ch->rx_napi)) {
2159 			spin_lock_irqsave(&ch->lock, flags);
2160 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2161 			spin_unlock_irqrestore(&ch->lock, flags);
2162 			__napi_schedule_irqoff(&ch->rx_napi);
2163 		}
2164 	}
2165 
2166 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2167 		if (napi_schedule_prep(&ch->tx_napi)) {
2168 			spin_lock_irqsave(&ch->lock, flags);
2169 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2170 			spin_unlock_irqrestore(&ch->lock, flags);
2171 			__napi_schedule_irqoff(&ch->tx_napi);
2172 		}
2173 	}
2174 
2175 	return status;
2176 }
2177 
2178 /**
2179  * stmmac_dma_interrupt - DMA ISR
2180  * @priv: driver private structure
2181  * Description: this is the DMA ISR. It is called by the main ISR.
2182  * It calls the dwmac dma routine and schedule poll method in case of some
2183  * work can be done.
2184  */
2185 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2186 {
2187 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2188 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2189 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2190 				tx_channel_count : rx_channel_count;
2191 	u32 chan;
2192 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2193 
2194 	/* Make sure we never check beyond our status buffer. */
2195 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2196 		channels_to_check = ARRAY_SIZE(status);
2197 
2198 	for (chan = 0; chan < channels_to_check; chan++)
2199 		status[chan] = stmmac_napi_check(priv, chan);
2200 
2201 	for (chan = 0; chan < tx_channel_count; chan++) {
2202 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2203 			/* Try to bump up the dma threshold on this failure */
2204 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2205 			    (tc <= 256)) {
2206 				tc += 64;
2207 				if (priv->plat->force_thresh_dma_mode)
2208 					stmmac_set_dma_operation_mode(priv,
2209 								      tc,
2210 								      tc,
2211 								      chan);
2212 				else
2213 					stmmac_set_dma_operation_mode(priv,
2214 								    tc,
2215 								    SF_DMA_MODE,
2216 								    chan);
2217 				priv->xstats.threshold = tc;
2218 			}
2219 		} else if (unlikely(status[chan] == tx_hard_error)) {
2220 			stmmac_tx_err(priv, chan);
2221 		}
2222 	}
2223 }
2224 
2225 /**
2226  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2227  * @priv: driver private structure
2228  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2229  */
2230 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2231 {
2232 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2233 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2234 
2235 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2236 
2237 	if (priv->dma_cap.rmon) {
2238 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2239 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2240 	} else
2241 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2242 }
2243 
2244 /**
2245  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2246  * @priv: driver private structure
2247  * Description:
2248  *  new GMAC chip generations have a new register to indicate the
2249  *  presence of the optional feature/functions.
2250  *  This can be also used to override the value passed through the
2251  *  platform and necessary for old MAC10/100 and GMAC chips.
2252  */
2253 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2254 {
2255 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2256 }
2257 
2258 /**
2259  * stmmac_check_ether_addr - check if the MAC addr is valid
2260  * @priv: driver private structure
2261  * Description:
2262  * it is to verify if the MAC address is valid, in case of failures it
2263  * generates a random MAC address
2264  */
2265 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2266 {
2267 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2268 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2269 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2270 			eth_hw_addr_random(priv->dev);
2271 		dev_info(priv->device, "device MAC address %pM\n",
2272 			 priv->dev->dev_addr);
2273 	}
2274 }
2275 
2276 /**
2277  * stmmac_init_dma_engine - DMA init.
2278  * @priv: driver private structure
2279  * Description:
2280  * It inits the DMA invoking the specific MAC/GMAC callback.
2281  * Some DMA parameters can be passed from the platform;
2282  * in case of these are not passed a default is kept for the MAC or GMAC.
2283  */
2284 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2285 {
2286 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2287 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2288 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2289 	struct stmmac_rx_queue *rx_q;
2290 	struct stmmac_tx_queue *tx_q;
2291 	u32 chan = 0;
2292 	int atds = 0;
2293 	int ret = 0;
2294 
2295 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2296 		dev_err(priv->device, "Invalid DMA configuration\n");
2297 		return -EINVAL;
2298 	}
2299 
2300 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2301 		atds = 1;
2302 
2303 	ret = stmmac_reset(priv, priv->ioaddr);
2304 	if (ret) {
2305 		dev_err(priv->device, "Failed to reset the dma\n");
2306 		return ret;
2307 	}
2308 
2309 	/* DMA Configuration */
2310 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2311 
2312 	if (priv->plat->axi)
2313 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2314 
2315 	/* DMA CSR Channel configuration */
2316 	for (chan = 0; chan < dma_csr_ch; chan++)
2317 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2318 
2319 	/* DMA RX Channel Configuration */
2320 	for (chan = 0; chan < rx_channels_count; chan++) {
2321 		rx_q = &priv->rx_queue[chan];
2322 
2323 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2324 				    rx_q->dma_rx_phy, chan);
2325 
2326 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2327 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2328 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2329 				       rx_q->rx_tail_addr, chan);
2330 	}
2331 
2332 	/* DMA TX Channel Configuration */
2333 	for (chan = 0; chan < tx_channels_count; chan++) {
2334 		tx_q = &priv->tx_queue[chan];
2335 
2336 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2337 				    tx_q->dma_tx_phy, chan);
2338 
2339 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2340 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2341 				       tx_q->tx_tail_addr, chan);
2342 	}
2343 
2344 	return ret;
2345 }
2346 
2347 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2348 {
2349 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2350 
2351 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2352 }
2353 
2354 /**
2355  * stmmac_tx_timer - mitigation sw timer for tx.
2356  * @data: data pointer
2357  * Description:
2358  * This is the timer handler to directly invoke the stmmac_tx_clean.
2359  */
2360 static void stmmac_tx_timer(struct timer_list *t)
2361 {
2362 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2363 	struct stmmac_priv *priv = tx_q->priv_data;
2364 	struct stmmac_channel *ch;
2365 
2366 	ch = &priv->channel[tx_q->queue_index];
2367 
2368 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2369 		unsigned long flags;
2370 
2371 		spin_lock_irqsave(&ch->lock, flags);
2372 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2373 		spin_unlock_irqrestore(&ch->lock, flags);
2374 		__napi_schedule(&ch->tx_napi);
2375 	}
2376 }
2377 
2378 /**
2379  * stmmac_init_coalesce - init mitigation options.
2380  * @priv: driver private structure
2381  * Description:
2382  * This inits the coalesce parameters: i.e. timer rate,
2383  * timer handler and default threshold used for enabling the
2384  * interrupt on completion bit.
2385  */
2386 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2387 {
2388 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2389 	u32 chan;
2390 
2391 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2392 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2393 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
2394 
2395 	for (chan = 0; chan < tx_channel_count; chan++) {
2396 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2397 
2398 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2399 	}
2400 }
2401 
2402 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2403 {
2404 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2405 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2406 	u32 chan;
2407 
2408 	/* set TX ring length */
2409 	for (chan = 0; chan < tx_channels_count; chan++)
2410 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2411 				(DMA_TX_SIZE - 1), chan);
2412 
2413 	/* set RX ring length */
2414 	for (chan = 0; chan < rx_channels_count; chan++)
2415 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2416 				(DMA_RX_SIZE - 1), chan);
2417 }
2418 
2419 /**
2420  *  stmmac_set_tx_queue_weight - Set TX queue weight
2421  *  @priv: driver private structure
2422  *  Description: It is used for setting TX queues weight
2423  */
2424 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2425 {
2426 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2427 	u32 weight;
2428 	u32 queue;
2429 
2430 	for (queue = 0; queue < tx_queues_count; queue++) {
2431 		weight = priv->plat->tx_queues_cfg[queue].weight;
2432 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2433 	}
2434 }
2435 
2436 /**
2437  *  stmmac_configure_cbs - Configure CBS in TX queue
2438  *  @priv: driver private structure
2439  *  Description: It is used for configuring CBS in AVB TX queues
2440  */
2441 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2442 {
2443 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2444 	u32 mode_to_use;
2445 	u32 queue;
2446 
2447 	/* queue 0 is reserved for legacy traffic */
2448 	for (queue = 1; queue < tx_queues_count; queue++) {
2449 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2450 		if (mode_to_use == MTL_QUEUE_DCB)
2451 			continue;
2452 
2453 		stmmac_config_cbs(priv, priv->hw,
2454 				priv->plat->tx_queues_cfg[queue].send_slope,
2455 				priv->plat->tx_queues_cfg[queue].idle_slope,
2456 				priv->plat->tx_queues_cfg[queue].high_credit,
2457 				priv->plat->tx_queues_cfg[queue].low_credit,
2458 				queue);
2459 	}
2460 }
2461 
2462 /**
2463  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2464  *  @priv: driver private structure
2465  *  Description: It is used for mapping RX queues to RX dma channels
2466  */
2467 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2468 {
2469 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2470 	u32 queue;
2471 	u32 chan;
2472 
2473 	for (queue = 0; queue < rx_queues_count; queue++) {
2474 		chan = priv->plat->rx_queues_cfg[queue].chan;
2475 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2476 	}
2477 }
2478 
2479 /**
2480  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2481  *  @priv: driver private structure
2482  *  Description: It is used for configuring the RX Queue Priority
2483  */
2484 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2485 {
2486 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2487 	u32 queue;
2488 	u32 prio;
2489 
2490 	for (queue = 0; queue < rx_queues_count; queue++) {
2491 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2492 			continue;
2493 
2494 		prio = priv->plat->rx_queues_cfg[queue].prio;
2495 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2496 	}
2497 }
2498 
2499 /**
2500  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2501  *  @priv: driver private structure
2502  *  Description: It is used for configuring the TX Queue Priority
2503  */
2504 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2505 {
2506 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2507 	u32 queue;
2508 	u32 prio;
2509 
2510 	for (queue = 0; queue < tx_queues_count; queue++) {
2511 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2512 			continue;
2513 
2514 		prio = priv->plat->tx_queues_cfg[queue].prio;
2515 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2516 	}
2517 }
2518 
2519 /**
2520  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2521  *  @priv: driver private structure
2522  *  Description: It is used for configuring the RX queue routing
2523  */
2524 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2525 {
2526 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2527 	u32 queue;
2528 	u8 packet;
2529 
2530 	for (queue = 0; queue < rx_queues_count; queue++) {
2531 		/* no specific packet type routing specified for the queue */
2532 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2533 			continue;
2534 
2535 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2536 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2537 	}
2538 }
2539 
2540 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2541 {
2542 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2543 		priv->rss.enable = false;
2544 		return;
2545 	}
2546 
2547 	if (priv->dev->features & NETIF_F_RXHASH)
2548 		priv->rss.enable = true;
2549 	else
2550 		priv->rss.enable = false;
2551 
2552 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
2553 			     priv->plat->rx_queues_to_use);
2554 }
2555 
2556 /**
2557  *  stmmac_mtl_configuration - Configure MTL
2558  *  @priv: driver private structure
2559  *  Description: It is used for configurring MTL
2560  */
2561 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2562 {
2563 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2564 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2565 
2566 	if (tx_queues_count > 1)
2567 		stmmac_set_tx_queue_weight(priv);
2568 
2569 	/* Configure MTL RX algorithms */
2570 	if (rx_queues_count > 1)
2571 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2572 				priv->plat->rx_sched_algorithm);
2573 
2574 	/* Configure MTL TX algorithms */
2575 	if (tx_queues_count > 1)
2576 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2577 				priv->plat->tx_sched_algorithm);
2578 
2579 	/* Configure CBS in AVB TX queues */
2580 	if (tx_queues_count > 1)
2581 		stmmac_configure_cbs(priv);
2582 
2583 	/* Map RX MTL to DMA channels */
2584 	stmmac_rx_queue_dma_chan_map(priv);
2585 
2586 	/* Enable MAC RX Queues */
2587 	stmmac_mac_enable_rx_queues(priv);
2588 
2589 	/* Set RX priorities */
2590 	if (rx_queues_count > 1)
2591 		stmmac_mac_config_rx_queues_prio(priv);
2592 
2593 	/* Set TX priorities */
2594 	if (tx_queues_count > 1)
2595 		stmmac_mac_config_tx_queues_prio(priv);
2596 
2597 	/* Set RX routing */
2598 	if (rx_queues_count > 1)
2599 		stmmac_mac_config_rx_queues_routing(priv);
2600 
2601 	/* Receive Side Scaling */
2602 	if (rx_queues_count > 1)
2603 		stmmac_mac_config_rss(priv);
2604 }
2605 
2606 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2607 {
2608 	if (priv->dma_cap.asp) {
2609 		netdev_info(priv->dev, "Enabling Safety Features\n");
2610 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2611 	} else {
2612 		netdev_info(priv->dev, "No Safety Features support found\n");
2613 	}
2614 }
2615 
2616 /**
2617  * stmmac_hw_setup - setup mac in a usable state.
2618  *  @dev : pointer to the device structure.
2619  *  Description:
2620  *  this is the main function to setup the HW in a usable state because the
2621  *  dma engine is reset, the core registers are configured (e.g. AXI,
2622  *  Checksum features, timers). The DMA is ready to start receiving and
2623  *  transmitting.
2624  *  Return value:
2625  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2626  *  file on failure.
2627  */
2628 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2629 {
2630 	struct stmmac_priv *priv = netdev_priv(dev);
2631 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2632 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2633 	u32 chan;
2634 	int ret;
2635 
2636 	/* DMA initialization and SW reset */
2637 	ret = stmmac_init_dma_engine(priv);
2638 	if (ret < 0) {
2639 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2640 			   __func__);
2641 		return ret;
2642 	}
2643 
2644 	/* Copy the MAC addr into the HW  */
2645 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2646 
2647 	/* PS and related bits will be programmed according to the speed */
2648 	if (priv->hw->pcs) {
2649 		int speed = priv->plat->mac_port_sel_speed;
2650 
2651 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2652 		    (speed == SPEED_1000)) {
2653 			priv->hw->ps = speed;
2654 		} else {
2655 			dev_warn(priv->device, "invalid port speed\n");
2656 			priv->hw->ps = 0;
2657 		}
2658 	}
2659 
2660 	/* Initialize the MAC Core */
2661 	stmmac_core_init(priv, priv->hw, dev);
2662 
2663 	/* Initialize MTL*/
2664 	stmmac_mtl_configuration(priv);
2665 
2666 	/* Initialize Safety Features */
2667 	stmmac_safety_feat_configuration(priv);
2668 
2669 	ret = stmmac_rx_ipc(priv, priv->hw);
2670 	if (!ret) {
2671 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2672 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2673 		priv->hw->rx_csum = 0;
2674 	}
2675 
2676 	/* Enable the MAC Rx/Tx */
2677 	stmmac_mac_set(priv, priv->ioaddr, true);
2678 
2679 	/* Set the HW DMA mode and the COE */
2680 	stmmac_dma_operation_mode(priv);
2681 
2682 	stmmac_mmc_setup(priv);
2683 
2684 	if (init_ptp) {
2685 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2686 		if (ret < 0)
2687 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2688 
2689 		ret = stmmac_init_ptp(priv);
2690 		if (ret == -EOPNOTSUPP)
2691 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2692 		else if (ret)
2693 			netdev_warn(priv->dev, "PTP init failed\n");
2694 	}
2695 
2696 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2697 
2698 	if (priv->use_riwt) {
2699 		if (!priv->rx_riwt)
2700 			priv->rx_riwt = DEF_DMA_RIWT;
2701 
2702 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2703 	}
2704 
2705 	if (priv->hw->pcs)
2706 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2707 
2708 	/* set TX and RX rings length */
2709 	stmmac_set_rings_length(priv);
2710 
2711 	/* Enable TSO */
2712 	if (priv->tso) {
2713 		for (chan = 0; chan < tx_cnt; chan++)
2714 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2715 	}
2716 
2717 	/* Enable Split Header */
2718 	if (priv->sph && priv->hw->rx_csum) {
2719 		for (chan = 0; chan < rx_cnt; chan++)
2720 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2721 	}
2722 
2723 	/* VLAN Tag Insertion */
2724 	if (priv->dma_cap.vlins)
2725 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2726 
2727 	/* TBS */
2728 	for (chan = 0; chan < tx_cnt; chan++) {
2729 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2730 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2731 
2732 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2733 	}
2734 
2735 	/* Start the ball rolling... */
2736 	stmmac_start_all_dma(priv);
2737 
2738 	return 0;
2739 }
2740 
2741 static void stmmac_hw_teardown(struct net_device *dev)
2742 {
2743 	struct stmmac_priv *priv = netdev_priv(dev);
2744 
2745 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2746 }
2747 
2748 /**
2749  *  stmmac_open - open entry point of the driver
2750  *  @dev : pointer to the device structure.
2751  *  Description:
2752  *  This function is the open entry point of the driver.
2753  *  Return value:
2754  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2755  *  file on failure.
2756  */
2757 static int stmmac_open(struct net_device *dev)
2758 {
2759 	struct stmmac_priv *priv = netdev_priv(dev);
2760 	int bfsize = 0;
2761 	u32 chan;
2762 	int ret;
2763 
2764 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2765 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2766 	    priv->hw->xpcs == NULL) {
2767 		ret = stmmac_init_phy(dev);
2768 		if (ret) {
2769 			netdev_err(priv->dev,
2770 				   "%s: Cannot attach to PHY (error: %d)\n",
2771 				   __func__, ret);
2772 			return ret;
2773 		}
2774 	}
2775 
2776 	/* Extra statistics */
2777 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2778 	priv->xstats.threshold = tc;
2779 
2780 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2781 	if (bfsize < 0)
2782 		bfsize = 0;
2783 
2784 	if (bfsize < BUF_SIZE_16KiB)
2785 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2786 
2787 	priv->dma_buf_sz = bfsize;
2788 	buf_sz = bfsize;
2789 
2790 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2791 
2792 	/* Earlier check for TBS */
2793 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2794 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2795 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2796 
2797 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2798 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2799 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2800 	}
2801 
2802 	ret = alloc_dma_desc_resources(priv);
2803 	if (ret < 0) {
2804 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2805 			   __func__);
2806 		goto dma_desc_error;
2807 	}
2808 
2809 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2810 	if (ret < 0) {
2811 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2812 			   __func__);
2813 		goto init_error;
2814 	}
2815 
2816 	ret = stmmac_hw_setup(dev, true);
2817 	if (ret < 0) {
2818 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2819 		goto init_error;
2820 	}
2821 
2822 	stmmac_init_coalesce(priv);
2823 
2824 	phylink_start(priv->phylink);
2825 	/* We may have called phylink_speed_down before */
2826 	phylink_speed_up(priv->phylink);
2827 
2828 	/* Request the IRQ lines */
2829 	ret = request_irq(dev->irq, stmmac_interrupt,
2830 			  IRQF_SHARED, dev->name, dev);
2831 	if (unlikely(ret < 0)) {
2832 		netdev_err(priv->dev,
2833 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2834 			   __func__, dev->irq, ret);
2835 		goto irq_error;
2836 	}
2837 
2838 	/* Request the Wake IRQ in case of another line is used for WoL */
2839 	if (priv->wol_irq != dev->irq) {
2840 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2841 				  IRQF_SHARED, dev->name, dev);
2842 		if (unlikely(ret < 0)) {
2843 			netdev_err(priv->dev,
2844 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2845 				   __func__, priv->wol_irq, ret);
2846 			goto wolirq_error;
2847 		}
2848 	}
2849 
2850 	/* Request the IRQ lines */
2851 	if (priv->lpi_irq > 0) {
2852 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2853 				  dev->name, dev);
2854 		if (unlikely(ret < 0)) {
2855 			netdev_err(priv->dev,
2856 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2857 				   __func__, priv->lpi_irq, ret);
2858 			goto lpiirq_error;
2859 		}
2860 	}
2861 
2862 	stmmac_enable_all_queues(priv);
2863 	stmmac_start_all_queues(priv);
2864 
2865 	return 0;
2866 
2867 lpiirq_error:
2868 	if (priv->wol_irq != dev->irq)
2869 		free_irq(priv->wol_irq, dev);
2870 wolirq_error:
2871 	free_irq(dev->irq, dev);
2872 irq_error:
2873 	phylink_stop(priv->phylink);
2874 
2875 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2876 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2877 
2878 	stmmac_hw_teardown(dev);
2879 init_error:
2880 	free_dma_desc_resources(priv);
2881 dma_desc_error:
2882 	phylink_disconnect_phy(priv->phylink);
2883 	return ret;
2884 }
2885 
2886 /**
2887  *  stmmac_release - close entry point of the driver
2888  *  @dev : device pointer.
2889  *  Description:
2890  *  This is the stop entry point of the driver.
2891  */
2892 static int stmmac_release(struct net_device *dev)
2893 {
2894 	struct stmmac_priv *priv = netdev_priv(dev);
2895 	u32 chan;
2896 
2897 	if (priv->eee_enabled)
2898 		del_timer_sync(&priv->eee_ctrl_timer);
2899 
2900 	if (device_may_wakeup(priv->device))
2901 		phylink_speed_down(priv->phylink, false);
2902 	/* Stop and disconnect the PHY */
2903 	phylink_stop(priv->phylink);
2904 	phylink_disconnect_phy(priv->phylink);
2905 
2906 	stmmac_stop_all_queues(priv);
2907 
2908 	stmmac_disable_all_queues(priv);
2909 
2910 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2911 		del_timer_sync(&priv->tx_queue[chan].txtimer);
2912 
2913 	/* Free the IRQ lines */
2914 	free_irq(dev->irq, dev);
2915 	if (priv->wol_irq != dev->irq)
2916 		free_irq(priv->wol_irq, dev);
2917 	if (priv->lpi_irq > 0)
2918 		free_irq(priv->lpi_irq, dev);
2919 
2920 	/* Stop TX/RX DMA and clear the descriptors */
2921 	stmmac_stop_all_dma(priv);
2922 
2923 	/* Release and free the Rx/Tx resources */
2924 	free_dma_desc_resources(priv);
2925 
2926 	/* Disable the MAC Rx/Tx */
2927 	stmmac_mac_set(priv, priv->ioaddr, false);
2928 
2929 	netif_carrier_off(dev);
2930 
2931 	stmmac_release_ptp(priv);
2932 
2933 	return 0;
2934 }
2935 
2936 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2937 			       struct stmmac_tx_queue *tx_q)
2938 {
2939 	u16 tag = 0x0, inner_tag = 0x0;
2940 	u32 inner_type = 0x0;
2941 	struct dma_desc *p;
2942 
2943 	if (!priv->dma_cap.vlins)
2944 		return false;
2945 	if (!skb_vlan_tag_present(skb))
2946 		return false;
2947 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2948 		inner_tag = skb_vlan_tag_get(skb);
2949 		inner_type = STMMAC_VLAN_INSERT;
2950 	}
2951 
2952 	tag = skb_vlan_tag_get(skb);
2953 
2954 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
2955 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
2956 	else
2957 		p = &tx_q->dma_tx[tx_q->cur_tx];
2958 
2959 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2960 		return false;
2961 
2962 	stmmac_set_tx_owner(priv, p);
2963 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2964 	return true;
2965 }
2966 
2967 /**
2968  *  stmmac_tso_allocator - close entry point of the driver
2969  *  @priv: driver private structure
2970  *  @des: buffer start address
2971  *  @total_len: total length to fill in descriptors
2972  *  @last_segmant: condition for the last descriptor
2973  *  @queue: TX queue index
2974  *  Description:
2975  *  This function fills descriptor and request new descriptors according to
2976  *  buffer length to fill
2977  */
2978 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2979 				 int total_len, bool last_segment, u32 queue)
2980 {
2981 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2982 	struct dma_desc *desc;
2983 	u32 buff_size;
2984 	int tmp_len;
2985 
2986 	tmp_len = total_len;
2987 
2988 	while (tmp_len > 0) {
2989 		dma_addr_t curr_addr;
2990 
2991 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2992 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2993 
2994 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
2995 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
2996 		else
2997 			desc = &tx_q->dma_tx[tx_q->cur_tx];
2998 
2999 		curr_addr = des + (total_len - tmp_len);
3000 		if (priv->dma_cap.addr64 <= 32)
3001 			desc->des0 = cpu_to_le32(curr_addr);
3002 		else
3003 			stmmac_set_desc_addr(priv, desc, curr_addr);
3004 
3005 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3006 			    TSO_MAX_BUFF_SIZE : tmp_len;
3007 
3008 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3009 				0, 1,
3010 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3011 				0, 0);
3012 
3013 		tmp_len -= TSO_MAX_BUFF_SIZE;
3014 	}
3015 }
3016 
3017 /**
3018  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3019  *  @skb : the socket buffer
3020  *  @dev : device pointer
3021  *  Description: this is the transmit function that is called on TSO frames
3022  *  (support available on GMAC4 and newer chips).
3023  *  Diagram below show the ring programming in case of TSO frames:
3024  *
3025  *  First Descriptor
3026  *   --------
3027  *   | DES0 |---> buffer1 = L2/L3/L4 header
3028  *   | DES1 |---> TCP Payload (can continue on next descr...)
3029  *   | DES2 |---> buffer 1 and 2 len
3030  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3031  *   --------
3032  *	|
3033  *     ...
3034  *	|
3035  *   --------
3036  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3037  *   | DES1 | --|
3038  *   | DES2 | --> buffer 1 and 2 len
3039  *   | DES3 |
3040  *   --------
3041  *
3042  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3043  */
3044 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3045 {
3046 	struct dma_desc *desc, *first, *mss_desc = NULL;
3047 	struct stmmac_priv *priv = netdev_priv(dev);
3048 	int desc_size, tmp_pay_len = 0, first_tx;
3049 	int nfrags = skb_shinfo(skb)->nr_frags;
3050 	u32 queue = skb_get_queue_mapping(skb);
3051 	unsigned int first_entry, tx_packets;
3052 	struct stmmac_tx_queue *tx_q;
3053 	bool has_vlan, set_ic;
3054 	u8 proto_hdr_len, hdr;
3055 	u32 pay_len, mss;
3056 	dma_addr_t des;
3057 	int i;
3058 
3059 	tx_q = &priv->tx_queue[queue];
3060 	first_tx = tx_q->cur_tx;
3061 
3062 	/* Compute header lengths */
3063 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3064 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3065 		hdr = sizeof(struct udphdr);
3066 	} else {
3067 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3068 		hdr = tcp_hdrlen(skb);
3069 	}
3070 
3071 	/* Desc availability based on threshold should be enough safe */
3072 	if (unlikely(stmmac_tx_avail(priv, queue) <
3073 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3074 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3075 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3076 								queue));
3077 			/* This is a hard error, log it. */
3078 			netdev_err(priv->dev,
3079 				   "%s: Tx Ring full when queue awake\n",
3080 				   __func__);
3081 		}
3082 		return NETDEV_TX_BUSY;
3083 	}
3084 
3085 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3086 
3087 	mss = skb_shinfo(skb)->gso_size;
3088 
3089 	/* set new MSS value if needed */
3090 	if (mss != tx_q->mss) {
3091 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3092 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3093 		else
3094 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3095 
3096 		stmmac_set_mss(priv, mss_desc, mss);
3097 		tx_q->mss = mss;
3098 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3099 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3100 	}
3101 
3102 	if (netif_msg_tx_queued(priv)) {
3103 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3104 			__func__, hdr, proto_hdr_len, pay_len, mss);
3105 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3106 			skb->data_len);
3107 	}
3108 
3109 	/* Check if VLAN can be inserted by HW */
3110 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3111 
3112 	first_entry = tx_q->cur_tx;
3113 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3114 
3115 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3116 		desc = &tx_q->dma_entx[first_entry].basic;
3117 	else
3118 		desc = &tx_q->dma_tx[first_entry];
3119 	first = desc;
3120 
3121 	if (has_vlan)
3122 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3123 
3124 	/* first descriptor: fill Headers on Buf1 */
3125 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3126 			     DMA_TO_DEVICE);
3127 	if (dma_mapping_error(priv->device, des))
3128 		goto dma_map_err;
3129 
3130 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3131 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3132 
3133 	if (priv->dma_cap.addr64 <= 32) {
3134 		first->des0 = cpu_to_le32(des);
3135 
3136 		/* Fill start of payload in buff2 of first descriptor */
3137 		if (pay_len)
3138 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3139 
3140 		/* If needed take extra descriptors to fill the remaining payload */
3141 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3142 	} else {
3143 		stmmac_set_desc_addr(priv, first, des);
3144 		tmp_pay_len = pay_len;
3145 		des += proto_hdr_len;
3146 		pay_len = 0;
3147 	}
3148 
3149 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3150 
3151 	/* Prepare fragments */
3152 	for (i = 0; i < nfrags; i++) {
3153 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3154 
3155 		des = skb_frag_dma_map(priv->device, frag, 0,
3156 				       skb_frag_size(frag),
3157 				       DMA_TO_DEVICE);
3158 		if (dma_mapping_error(priv->device, des))
3159 			goto dma_map_err;
3160 
3161 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3162 				     (i == nfrags - 1), queue);
3163 
3164 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3165 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3166 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3167 	}
3168 
3169 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3170 
3171 	/* Only the last descriptor gets to point to the skb. */
3172 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3173 
3174 	/* Manage tx mitigation */
3175 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3176 	tx_q->tx_count_frames += tx_packets;
3177 
3178 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3179 		set_ic = true;
3180 	else if (!priv->tx_coal_frames)
3181 		set_ic = false;
3182 	else if (tx_packets > priv->tx_coal_frames)
3183 		set_ic = true;
3184 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3185 		set_ic = true;
3186 	else
3187 		set_ic = false;
3188 
3189 	if (set_ic) {
3190 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3191 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3192 		else
3193 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3194 
3195 		tx_q->tx_count_frames = 0;
3196 		stmmac_set_tx_ic(priv, desc);
3197 		priv->xstats.tx_set_ic_bit++;
3198 	}
3199 
3200 	/* We've used all descriptors we need for this skb, however,
3201 	 * advance cur_tx so that it references a fresh descriptor.
3202 	 * ndo_start_xmit will fill this descriptor the next time it's
3203 	 * called and stmmac_tx_clean may clean up to this descriptor.
3204 	 */
3205 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3206 
3207 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3208 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3209 			  __func__);
3210 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3211 	}
3212 
3213 	dev->stats.tx_bytes += skb->len;
3214 	priv->xstats.tx_tso_frames++;
3215 	priv->xstats.tx_tso_nfrags += nfrags;
3216 
3217 	if (priv->sarc_type)
3218 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3219 
3220 	skb_tx_timestamp(skb);
3221 
3222 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3223 		     priv->hwts_tx_en)) {
3224 		/* declare that device is doing timestamping */
3225 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3226 		stmmac_enable_tx_timestamp(priv, first);
3227 	}
3228 
3229 	/* Complete the first descriptor before granting the DMA */
3230 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3231 			proto_hdr_len,
3232 			pay_len,
3233 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3234 			hdr / 4, (skb->len - proto_hdr_len));
3235 
3236 	/* If context desc is used to change MSS */
3237 	if (mss_desc) {
3238 		/* Make sure that first descriptor has been completely
3239 		 * written, including its own bit. This is because MSS is
3240 		 * actually before first descriptor, so we need to make
3241 		 * sure that MSS's own bit is the last thing written.
3242 		 */
3243 		dma_wmb();
3244 		stmmac_set_tx_owner(priv, mss_desc);
3245 	}
3246 
3247 	/* The own bit must be the latest setting done when prepare the
3248 	 * descriptor and then barrier is needed to make sure that
3249 	 * all is coherent before granting the DMA engine.
3250 	 */
3251 	wmb();
3252 
3253 	if (netif_msg_pktdata(priv)) {
3254 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3255 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3256 			tx_q->cur_tx, first, nfrags);
3257 		pr_info(">>> frame to be transmitted: ");
3258 		print_pkt(skb->data, skb_headlen(skb));
3259 	}
3260 
3261 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3262 
3263 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3264 		desc_size = sizeof(struct dma_edesc);
3265 	else
3266 		desc_size = sizeof(struct dma_desc);
3267 
3268 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3269 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3270 	stmmac_tx_timer_arm(priv, queue);
3271 
3272 	return NETDEV_TX_OK;
3273 
3274 dma_map_err:
3275 	dev_err(priv->device, "Tx dma map failed\n");
3276 	dev_kfree_skb(skb);
3277 	priv->dev->stats.tx_dropped++;
3278 	return NETDEV_TX_OK;
3279 }
3280 
3281 /**
3282  *  stmmac_xmit - Tx entry point of the driver
3283  *  @skb : the socket buffer
3284  *  @dev : device pointer
3285  *  Description : this is the tx entry point of the driver.
3286  *  It programs the chain or the ring and supports oversized frames
3287  *  and SG feature.
3288  */
3289 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3290 {
3291 	unsigned int first_entry, tx_packets, enh_desc;
3292 	struct stmmac_priv *priv = netdev_priv(dev);
3293 	unsigned int nopaged_len = skb_headlen(skb);
3294 	int i, csum_insertion = 0, is_jumbo = 0;
3295 	u32 queue = skb_get_queue_mapping(skb);
3296 	int nfrags = skb_shinfo(skb)->nr_frags;
3297 	int gso = skb_shinfo(skb)->gso_type;
3298 	struct dma_edesc *tbs_desc = NULL;
3299 	int entry, desc_size, first_tx;
3300 	struct dma_desc *desc, *first;
3301 	struct stmmac_tx_queue *tx_q;
3302 	bool has_vlan, set_ic;
3303 	dma_addr_t des;
3304 
3305 	tx_q = &priv->tx_queue[queue];
3306 	first_tx = tx_q->cur_tx;
3307 
3308 	if (priv->tx_path_in_lpi_mode)
3309 		stmmac_disable_eee_mode(priv);
3310 
3311 	/* Manage oversized TCP frames for GMAC4 device */
3312 	if (skb_is_gso(skb) && priv->tso) {
3313 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3314 			return stmmac_tso_xmit(skb, dev);
3315 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3316 			return stmmac_tso_xmit(skb, dev);
3317 	}
3318 
3319 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3320 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3321 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3322 								queue));
3323 			/* This is a hard error, log it. */
3324 			netdev_err(priv->dev,
3325 				   "%s: Tx Ring full when queue awake\n",
3326 				   __func__);
3327 		}
3328 		return NETDEV_TX_BUSY;
3329 	}
3330 
3331 	/* Check if VLAN can be inserted by HW */
3332 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3333 
3334 	entry = tx_q->cur_tx;
3335 	first_entry = entry;
3336 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3337 
3338 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3339 
3340 	if (likely(priv->extend_desc))
3341 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3342 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3343 		desc = &tx_q->dma_entx[entry].basic;
3344 	else
3345 		desc = tx_q->dma_tx + entry;
3346 
3347 	first = desc;
3348 
3349 	if (has_vlan)
3350 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3351 
3352 	enh_desc = priv->plat->enh_desc;
3353 	/* To program the descriptors according to the size of the frame */
3354 	if (enh_desc)
3355 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3356 
3357 	if (unlikely(is_jumbo)) {
3358 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3359 		if (unlikely(entry < 0) && (entry != -EINVAL))
3360 			goto dma_map_err;
3361 	}
3362 
3363 	for (i = 0; i < nfrags; i++) {
3364 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3365 		int len = skb_frag_size(frag);
3366 		bool last_segment = (i == (nfrags - 1));
3367 
3368 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3369 		WARN_ON(tx_q->tx_skbuff[entry]);
3370 
3371 		if (likely(priv->extend_desc))
3372 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3373 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3374 			desc = &tx_q->dma_entx[entry].basic;
3375 		else
3376 			desc = tx_q->dma_tx + entry;
3377 
3378 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3379 				       DMA_TO_DEVICE);
3380 		if (dma_mapping_error(priv->device, des))
3381 			goto dma_map_err; /* should reuse desc w/o issues */
3382 
3383 		tx_q->tx_skbuff_dma[entry].buf = des;
3384 
3385 		stmmac_set_desc_addr(priv, desc, des);
3386 
3387 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3388 		tx_q->tx_skbuff_dma[entry].len = len;
3389 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3390 
3391 		/* Prepare the descriptor and set the own bit too */
3392 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3393 				priv->mode, 1, last_segment, skb->len);
3394 	}
3395 
3396 	/* Only the last descriptor gets to point to the skb. */
3397 	tx_q->tx_skbuff[entry] = skb;
3398 
3399 	/* According to the coalesce parameter the IC bit for the latest
3400 	 * segment is reset and the timer re-started to clean the tx status.
3401 	 * This approach takes care about the fragments: desc is the first
3402 	 * element in case of no SG.
3403 	 */
3404 	tx_packets = (entry + 1) - first_tx;
3405 	tx_q->tx_count_frames += tx_packets;
3406 
3407 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3408 		set_ic = true;
3409 	else if (!priv->tx_coal_frames)
3410 		set_ic = false;
3411 	else if (tx_packets > priv->tx_coal_frames)
3412 		set_ic = true;
3413 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3414 		set_ic = true;
3415 	else
3416 		set_ic = false;
3417 
3418 	if (set_ic) {
3419 		if (likely(priv->extend_desc))
3420 			desc = &tx_q->dma_etx[entry].basic;
3421 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3422 			desc = &tx_q->dma_entx[entry].basic;
3423 		else
3424 			desc = &tx_q->dma_tx[entry];
3425 
3426 		tx_q->tx_count_frames = 0;
3427 		stmmac_set_tx_ic(priv, desc);
3428 		priv->xstats.tx_set_ic_bit++;
3429 	}
3430 
3431 	/* We've used all descriptors we need for this skb, however,
3432 	 * advance cur_tx so that it references a fresh descriptor.
3433 	 * ndo_start_xmit will fill this descriptor the next time it's
3434 	 * called and stmmac_tx_clean may clean up to this descriptor.
3435 	 */
3436 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3437 	tx_q->cur_tx = entry;
3438 
3439 	if (netif_msg_pktdata(priv)) {
3440 		netdev_dbg(priv->dev,
3441 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3442 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3443 			   entry, first, nfrags);
3444 
3445 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3446 		print_pkt(skb->data, skb->len);
3447 	}
3448 
3449 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3450 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3451 			  __func__);
3452 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3453 	}
3454 
3455 	dev->stats.tx_bytes += skb->len;
3456 
3457 	if (priv->sarc_type)
3458 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3459 
3460 	skb_tx_timestamp(skb);
3461 
3462 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3463 	 * problems because all the descriptors are actually ready to be
3464 	 * passed to the DMA engine.
3465 	 */
3466 	if (likely(!is_jumbo)) {
3467 		bool last_segment = (nfrags == 0);
3468 
3469 		des = dma_map_single(priv->device, skb->data,
3470 				     nopaged_len, DMA_TO_DEVICE);
3471 		if (dma_mapping_error(priv->device, des))
3472 			goto dma_map_err;
3473 
3474 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3475 
3476 		stmmac_set_desc_addr(priv, first, des);
3477 
3478 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3479 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3480 
3481 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3482 			     priv->hwts_tx_en)) {
3483 			/* declare that device is doing timestamping */
3484 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3485 			stmmac_enable_tx_timestamp(priv, first);
3486 		}
3487 
3488 		/* Prepare the first descriptor setting the OWN bit too */
3489 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3490 				csum_insertion, priv->mode, 0, last_segment,
3491 				skb->len);
3492 	}
3493 
3494 	if (tx_q->tbs & STMMAC_TBS_EN) {
3495 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3496 
3497 		tbs_desc = &tx_q->dma_entx[first_entry];
3498 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3499 	}
3500 
3501 	stmmac_set_tx_owner(priv, first);
3502 
3503 	/* The own bit must be the latest setting done when prepare the
3504 	 * descriptor and then barrier is needed to make sure that
3505 	 * all is coherent before granting the DMA engine.
3506 	 */
3507 	wmb();
3508 
3509 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3510 
3511 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3512 
3513 	if (likely(priv->extend_desc))
3514 		desc_size = sizeof(struct dma_extended_desc);
3515 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3516 		desc_size = sizeof(struct dma_edesc);
3517 	else
3518 		desc_size = sizeof(struct dma_desc);
3519 
3520 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3521 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3522 	stmmac_tx_timer_arm(priv, queue);
3523 
3524 	return NETDEV_TX_OK;
3525 
3526 dma_map_err:
3527 	netdev_err(priv->dev, "Tx DMA map failed\n");
3528 	dev_kfree_skb(skb);
3529 	priv->dev->stats.tx_dropped++;
3530 	return NETDEV_TX_OK;
3531 }
3532 
3533 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3534 {
3535 	struct vlan_ethhdr *veth;
3536 	__be16 vlan_proto;
3537 	u16 vlanid;
3538 
3539 	veth = (struct vlan_ethhdr *)skb->data;
3540 	vlan_proto = veth->h_vlan_proto;
3541 
3542 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3543 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3544 	    (vlan_proto == htons(ETH_P_8021AD) &&
3545 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3546 		/* pop the vlan tag */
3547 		vlanid = ntohs(veth->h_vlan_TCI);
3548 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3549 		skb_pull(skb, VLAN_HLEN);
3550 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3551 	}
3552 }
3553 
3554 /**
3555  * stmmac_rx_refill - refill used skb preallocated buffers
3556  * @priv: driver private structure
3557  * @queue: RX queue index
3558  * Description : this is to reallocate the skb for the reception process
3559  * that is based on zero-copy.
3560  */
3561 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3562 {
3563 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3564 	int len, dirty = stmmac_rx_dirty(priv, queue);
3565 	unsigned int entry = rx_q->dirty_rx;
3566 
3567 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3568 
3569 	while (dirty-- > 0) {
3570 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3571 		struct dma_desc *p;
3572 		bool use_rx_wd;
3573 
3574 		if (priv->extend_desc)
3575 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3576 		else
3577 			p = rx_q->dma_rx + entry;
3578 
3579 		if (!buf->page) {
3580 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3581 			if (!buf->page)
3582 				break;
3583 		}
3584 
3585 		if (priv->sph && !buf->sec_page) {
3586 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3587 			if (!buf->sec_page)
3588 				break;
3589 
3590 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3591 
3592 			dma_sync_single_for_device(priv->device, buf->sec_addr,
3593 						   len, DMA_FROM_DEVICE);
3594 		}
3595 
3596 		buf->addr = page_pool_get_dma_addr(buf->page);
3597 
3598 		/* Sync whole allocation to device. This will invalidate old
3599 		 * data.
3600 		 */
3601 		dma_sync_single_for_device(priv->device, buf->addr, len,
3602 					   DMA_FROM_DEVICE);
3603 
3604 		stmmac_set_desc_addr(priv, p, buf->addr);
3605 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3606 		stmmac_refill_desc3(priv, rx_q, p);
3607 
3608 		rx_q->rx_count_frames++;
3609 		rx_q->rx_count_frames += priv->rx_coal_frames;
3610 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
3611 			rx_q->rx_count_frames = 0;
3612 
3613 		use_rx_wd = !priv->rx_coal_frames;
3614 		use_rx_wd |= rx_q->rx_count_frames > 0;
3615 		if (!priv->use_riwt)
3616 			use_rx_wd = false;
3617 
3618 		dma_wmb();
3619 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3620 
3621 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3622 	}
3623 	rx_q->dirty_rx = entry;
3624 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3625 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
3626 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3627 }
3628 
3629 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3630 				       struct dma_desc *p,
3631 				       int status, unsigned int len)
3632 {
3633 	int ret, coe = priv->hw->rx_csum;
3634 	unsigned int plen = 0, hlen = 0;
3635 
3636 	/* Not first descriptor, buffer is always zero */
3637 	if (priv->sph && len)
3638 		return 0;
3639 
3640 	/* First descriptor, get split header length */
3641 	ret = stmmac_get_rx_header_len(priv, p, &hlen);
3642 	if (priv->sph && hlen) {
3643 		priv->xstats.rx_split_hdr_pkt_n++;
3644 		return hlen;
3645 	}
3646 
3647 	/* First descriptor, not last descriptor and not split header */
3648 	if (status & rx_not_ls)
3649 		return priv->dma_buf_sz;
3650 
3651 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3652 
3653 	/* First descriptor and last descriptor and not split header */
3654 	return min_t(unsigned int, priv->dma_buf_sz, plen);
3655 }
3656 
3657 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3658 				       struct dma_desc *p,
3659 				       int status, unsigned int len)
3660 {
3661 	int coe = priv->hw->rx_csum;
3662 	unsigned int plen = 0;
3663 
3664 	/* Not split header, buffer is not available */
3665 	if (!priv->sph)
3666 		return 0;
3667 
3668 	/* Not last descriptor */
3669 	if (status & rx_not_ls)
3670 		return priv->dma_buf_sz;
3671 
3672 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3673 
3674 	/* Last descriptor */
3675 	return plen - len;
3676 }
3677 
3678 /**
3679  * stmmac_rx - manage the receive process
3680  * @priv: driver private structure
3681  * @limit: napi bugget
3682  * @queue: RX queue index.
3683  * Description :  this the function called by the napi poll method.
3684  * It gets all the frames inside the ring.
3685  */
3686 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3687 {
3688 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3689 	struct stmmac_channel *ch = &priv->channel[queue];
3690 	unsigned int count = 0, error = 0, len = 0;
3691 	int status = 0, coe = priv->hw->rx_csum;
3692 	unsigned int next_entry = rx_q->cur_rx;
3693 	struct sk_buff *skb = NULL;
3694 
3695 	if (netif_msg_rx_status(priv)) {
3696 		void *rx_head;
3697 
3698 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3699 		if (priv->extend_desc)
3700 			rx_head = (void *)rx_q->dma_erx;
3701 		else
3702 			rx_head = (void *)rx_q->dma_rx;
3703 
3704 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3705 	}
3706 	while (count < limit) {
3707 		unsigned int buf1_len = 0, buf2_len = 0;
3708 		enum pkt_hash_types hash_type;
3709 		struct stmmac_rx_buffer *buf;
3710 		struct dma_desc *np, *p;
3711 		int entry;
3712 		u32 hash;
3713 
3714 		if (!count && rx_q->state_saved) {
3715 			skb = rx_q->state.skb;
3716 			error = rx_q->state.error;
3717 			len = rx_q->state.len;
3718 		} else {
3719 			rx_q->state_saved = false;
3720 			skb = NULL;
3721 			error = 0;
3722 			len = 0;
3723 		}
3724 
3725 		if (count >= limit)
3726 			break;
3727 
3728 read_again:
3729 		buf1_len = 0;
3730 		buf2_len = 0;
3731 		entry = next_entry;
3732 		buf = &rx_q->buf_pool[entry];
3733 
3734 		if (priv->extend_desc)
3735 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3736 		else
3737 			p = rx_q->dma_rx + entry;
3738 
3739 		/* read the status of the incoming frame */
3740 		status = stmmac_rx_status(priv, &priv->dev->stats,
3741 				&priv->xstats, p);
3742 		/* check if managed by the DMA otherwise go ahead */
3743 		if (unlikely(status & dma_own))
3744 			break;
3745 
3746 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3747 		next_entry = rx_q->cur_rx;
3748 
3749 		if (priv->extend_desc)
3750 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3751 		else
3752 			np = rx_q->dma_rx + next_entry;
3753 
3754 		prefetch(np);
3755 
3756 		if (priv->extend_desc)
3757 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3758 					&priv->xstats, rx_q->dma_erx + entry);
3759 		if (unlikely(status == discard_frame)) {
3760 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3761 			buf->page = NULL;
3762 			error = 1;
3763 			if (!priv->hwts_rx_en)
3764 				priv->dev->stats.rx_errors++;
3765 		}
3766 
3767 		if (unlikely(error && (status & rx_not_ls)))
3768 			goto read_again;
3769 		if (unlikely(error)) {
3770 			dev_kfree_skb(skb);
3771 			skb = NULL;
3772 			count++;
3773 			continue;
3774 		}
3775 
3776 		/* Buffer is good. Go on. */
3777 
3778 		prefetch(page_address(buf->page));
3779 		if (buf->sec_page)
3780 			prefetch(page_address(buf->sec_page));
3781 
3782 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3783 		len += buf1_len;
3784 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3785 		len += buf2_len;
3786 
3787 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3788 		 * Type frames (LLC/LLC-SNAP)
3789 		 *
3790 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3791 		 * feature is always disabled and packets need to be
3792 		 * stripped manually.
3793 		 */
3794 		if (likely(!(status & rx_not_ls)) &&
3795 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3796 		     unlikely(status != llc_snap))) {
3797 			if (buf2_len)
3798 				buf2_len -= ETH_FCS_LEN;
3799 			else
3800 				buf1_len -= ETH_FCS_LEN;
3801 
3802 			len -= ETH_FCS_LEN;
3803 		}
3804 
3805 		if (!skb) {
3806 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3807 			if (!skb) {
3808 				priv->dev->stats.rx_dropped++;
3809 				count++;
3810 				goto drain_data;
3811 			}
3812 
3813 			dma_sync_single_for_cpu(priv->device, buf->addr,
3814 						buf1_len, DMA_FROM_DEVICE);
3815 			skb_copy_to_linear_data(skb, page_address(buf->page),
3816 						buf1_len);
3817 			skb_put(skb, buf1_len);
3818 
3819 			/* Data payload copied into SKB, page ready for recycle */
3820 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3821 			buf->page = NULL;
3822 		} else if (buf1_len) {
3823 			dma_sync_single_for_cpu(priv->device, buf->addr,
3824 						buf1_len, DMA_FROM_DEVICE);
3825 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3826 					buf->page, 0, buf1_len,
3827 					priv->dma_buf_sz);
3828 
3829 			/* Data payload appended into SKB */
3830 			page_pool_release_page(rx_q->page_pool, buf->page);
3831 			buf->page = NULL;
3832 		}
3833 
3834 		if (buf2_len) {
3835 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3836 						buf2_len, DMA_FROM_DEVICE);
3837 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3838 					buf->sec_page, 0, buf2_len,
3839 					priv->dma_buf_sz);
3840 
3841 			/* Data payload appended into SKB */
3842 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
3843 			buf->sec_page = NULL;
3844 		}
3845 
3846 drain_data:
3847 		if (likely(status & rx_not_ls))
3848 			goto read_again;
3849 		if (!skb)
3850 			continue;
3851 
3852 		/* Got entire packet into SKB. Finish it. */
3853 
3854 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3855 		stmmac_rx_vlan(priv->dev, skb);
3856 		skb->protocol = eth_type_trans(skb, priv->dev);
3857 
3858 		if (unlikely(!coe))
3859 			skb_checksum_none_assert(skb);
3860 		else
3861 			skb->ip_summed = CHECKSUM_UNNECESSARY;
3862 
3863 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3864 			skb_set_hash(skb, hash, hash_type);
3865 
3866 		skb_record_rx_queue(skb, queue);
3867 		napi_gro_receive(&ch->rx_napi, skb);
3868 		skb = NULL;
3869 
3870 		priv->dev->stats.rx_packets++;
3871 		priv->dev->stats.rx_bytes += len;
3872 		count++;
3873 	}
3874 
3875 	if (status & rx_not_ls || skb) {
3876 		rx_q->state_saved = true;
3877 		rx_q->state.skb = skb;
3878 		rx_q->state.error = error;
3879 		rx_q->state.len = len;
3880 	}
3881 
3882 	stmmac_rx_refill(priv, queue);
3883 
3884 	priv->xstats.rx_pkt_n += count;
3885 
3886 	return count;
3887 }
3888 
3889 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3890 {
3891 	struct stmmac_channel *ch =
3892 		container_of(napi, struct stmmac_channel, rx_napi);
3893 	struct stmmac_priv *priv = ch->priv_data;
3894 	u32 chan = ch->index;
3895 	int work_done;
3896 
3897 	priv->xstats.napi_poll++;
3898 
3899 	work_done = stmmac_rx(priv, budget, chan);
3900 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3901 		unsigned long flags;
3902 
3903 		spin_lock_irqsave(&ch->lock, flags);
3904 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3905 		spin_unlock_irqrestore(&ch->lock, flags);
3906 	}
3907 
3908 	return work_done;
3909 }
3910 
3911 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3912 {
3913 	struct stmmac_channel *ch =
3914 		container_of(napi, struct stmmac_channel, tx_napi);
3915 	struct stmmac_priv *priv = ch->priv_data;
3916 	u32 chan = ch->index;
3917 	int work_done;
3918 
3919 	priv->xstats.napi_poll++;
3920 
3921 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3922 	work_done = min(work_done, budget);
3923 
3924 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3925 		unsigned long flags;
3926 
3927 		spin_lock_irqsave(&ch->lock, flags);
3928 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3929 		spin_unlock_irqrestore(&ch->lock, flags);
3930 	}
3931 
3932 	return work_done;
3933 }
3934 
3935 /**
3936  *  stmmac_tx_timeout
3937  *  @dev : Pointer to net device structure
3938  *  Description: this function is called when a packet transmission fails to
3939  *   complete within a reasonable time. The driver will mark the error in the
3940  *   netdev structure and arrange for the device to be reset to a sane state
3941  *   in order to transmit a new packet.
3942  */
3943 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
3944 {
3945 	struct stmmac_priv *priv = netdev_priv(dev);
3946 
3947 	stmmac_global_err(priv);
3948 }
3949 
3950 /**
3951  *  stmmac_set_rx_mode - entry point for multicast addressing
3952  *  @dev : pointer to the device structure
3953  *  Description:
3954  *  This function is a driver entry point which gets called by the kernel
3955  *  whenever multicast addresses must be enabled/disabled.
3956  *  Return value:
3957  *  void.
3958  */
3959 static void stmmac_set_rx_mode(struct net_device *dev)
3960 {
3961 	struct stmmac_priv *priv = netdev_priv(dev);
3962 
3963 	stmmac_set_filter(priv, priv->hw, dev);
3964 }
3965 
3966 /**
3967  *  stmmac_change_mtu - entry point to change MTU size for the device.
3968  *  @dev : device pointer.
3969  *  @new_mtu : the new MTU size for the device.
3970  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3971  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3972  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3973  *  Return value:
3974  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3975  *  file on failure.
3976  */
3977 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3978 {
3979 	struct stmmac_priv *priv = netdev_priv(dev);
3980 	int txfifosz = priv->plat->tx_fifo_size;
3981 
3982 	if (txfifosz == 0)
3983 		txfifosz = priv->dma_cap.tx_fifo_size;
3984 
3985 	txfifosz /= priv->plat->tx_queues_to_use;
3986 
3987 	if (netif_running(dev)) {
3988 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3989 		return -EBUSY;
3990 	}
3991 
3992 	new_mtu = STMMAC_ALIGN(new_mtu);
3993 
3994 	/* If condition true, FIFO is too small or MTU too large */
3995 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3996 		return -EINVAL;
3997 
3998 	dev->mtu = new_mtu;
3999 
4000 	netdev_update_features(dev);
4001 
4002 	return 0;
4003 }
4004 
4005 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4006 					     netdev_features_t features)
4007 {
4008 	struct stmmac_priv *priv = netdev_priv(dev);
4009 
4010 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4011 		features &= ~NETIF_F_RXCSUM;
4012 
4013 	if (!priv->plat->tx_coe)
4014 		features &= ~NETIF_F_CSUM_MASK;
4015 
4016 	/* Some GMAC devices have a bugged Jumbo frame support that
4017 	 * needs to have the Tx COE disabled for oversized frames
4018 	 * (due to limited buffer sizes). In this case we disable
4019 	 * the TX csum insertion in the TDES and not use SF.
4020 	 */
4021 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4022 		features &= ~NETIF_F_CSUM_MASK;
4023 
4024 	/* Disable tso if asked by ethtool */
4025 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4026 		if (features & NETIF_F_TSO)
4027 			priv->tso = true;
4028 		else
4029 			priv->tso = false;
4030 	}
4031 
4032 	return features;
4033 }
4034 
4035 static int stmmac_set_features(struct net_device *netdev,
4036 			       netdev_features_t features)
4037 {
4038 	struct stmmac_priv *priv = netdev_priv(netdev);
4039 	bool sph_en;
4040 	u32 chan;
4041 
4042 	/* Keep the COE Type in case of csum is supporting */
4043 	if (features & NETIF_F_RXCSUM)
4044 		priv->hw->rx_csum = priv->plat->rx_coe;
4045 	else
4046 		priv->hw->rx_csum = 0;
4047 	/* No check needed because rx_coe has been set before and it will be
4048 	 * fixed in case of issue.
4049 	 */
4050 	stmmac_rx_ipc(priv, priv->hw);
4051 
4052 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4053 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4054 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4055 
4056 	return 0;
4057 }
4058 
4059 /**
4060  *  stmmac_interrupt - main ISR
4061  *  @irq: interrupt number.
4062  *  @dev_id: to pass the net device pointer (must be valid).
4063  *  Description: this is the main driver interrupt service routine.
4064  *  It can call:
4065  *  o DMA service routine (to manage incoming frame reception and transmission
4066  *    status)
4067  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4068  *    interrupts.
4069  */
4070 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4071 {
4072 	struct net_device *dev = (struct net_device *)dev_id;
4073 	struct stmmac_priv *priv = netdev_priv(dev);
4074 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4075 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4076 	u32 queues_count;
4077 	u32 queue;
4078 	bool xmac;
4079 
4080 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4081 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4082 
4083 	if (priv->irq_wake)
4084 		pm_wakeup_event(priv->device, 0);
4085 
4086 	/* Check if adapter is up */
4087 	if (test_bit(STMMAC_DOWN, &priv->state))
4088 		return IRQ_HANDLED;
4089 	/* Check if a fatal error happened */
4090 	if (stmmac_safety_feat_interrupt(priv))
4091 		return IRQ_HANDLED;
4092 
4093 	/* To handle GMAC own interrupts */
4094 	if ((priv->plat->has_gmac) || xmac) {
4095 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4096 		int mtl_status;
4097 
4098 		if (unlikely(status)) {
4099 			/* For LPI we need to save the tx status */
4100 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4101 				priv->tx_path_in_lpi_mode = true;
4102 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4103 				priv->tx_path_in_lpi_mode = false;
4104 		}
4105 
4106 		for (queue = 0; queue < queues_count; queue++) {
4107 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4108 
4109 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
4110 								queue);
4111 			if (mtl_status != -EINVAL)
4112 				status |= mtl_status;
4113 
4114 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
4115 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
4116 						       rx_q->rx_tail_addr,
4117 						       queue);
4118 		}
4119 
4120 		/* PCS link status */
4121 		if (priv->hw->pcs) {
4122 			if (priv->xstats.pcs_link)
4123 				netif_carrier_on(dev);
4124 			else
4125 				netif_carrier_off(dev);
4126 		}
4127 	}
4128 
4129 	/* To handle DMA interrupts */
4130 	stmmac_dma_interrupt(priv);
4131 
4132 	return IRQ_HANDLED;
4133 }
4134 
4135 #ifdef CONFIG_NET_POLL_CONTROLLER
4136 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4137  * to allow network I/O with interrupts disabled.
4138  */
4139 static void stmmac_poll_controller(struct net_device *dev)
4140 {
4141 	disable_irq(dev->irq);
4142 	stmmac_interrupt(dev->irq, dev);
4143 	enable_irq(dev->irq);
4144 }
4145 #endif
4146 
4147 /**
4148  *  stmmac_ioctl - Entry point for the Ioctl
4149  *  @dev: Device pointer.
4150  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4151  *  a proprietary structure used to pass information to the driver.
4152  *  @cmd: IOCTL command
4153  *  Description:
4154  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4155  */
4156 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4157 {
4158 	struct stmmac_priv *priv = netdev_priv (dev);
4159 	int ret = -EOPNOTSUPP;
4160 
4161 	if (!netif_running(dev))
4162 		return -EINVAL;
4163 
4164 	switch (cmd) {
4165 	case SIOCGMIIPHY:
4166 	case SIOCGMIIREG:
4167 	case SIOCSMIIREG:
4168 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4169 		break;
4170 	case SIOCSHWTSTAMP:
4171 		ret = stmmac_hwtstamp_set(dev, rq);
4172 		break;
4173 	case SIOCGHWTSTAMP:
4174 		ret = stmmac_hwtstamp_get(dev, rq);
4175 		break;
4176 	default:
4177 		break;
4178 	}
4179 
4180 	return ret;
4181 }
4182 
4183 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4184 				    void *cb_priv)
4185 {
4186 	struct stmmac_priv *priv = cb_priv;
4187 	int ret = -EOPNOTSUPP;
4188 
4189 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4190 		return ret;
4191 
4192 	stmmac_disable_all_queues(priv);
4193 
4194 	switch (type) {
4195 	case TC_SETUP_CLSU32:
4196 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4197 		break;
4198 	case TC_SETUP_CLSFLOWER:
4199 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4200 		break;
4201 	default:
4202 		break;
4203 	}
4204 
4205 	stmmac_enable_all_queues(priv);
4206 	return ret;
4207 }
4208 
4209 static LIST_HEAD(stmmac_block_cb_list);
4210 
4211 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4212 			   void *type_data)
4213 {
4214 	struct stmmac_priv *priv = netdev_priv(ndev);
4215 
4216 	switch (type) {
4217 	case TC_SETUP_BLOCK:
4218 		return flow_block_cb_setup_simple(type_data,
4219 						  &stmmac_block_cb_list,
4220 						  stmmac_setup_tc_block_cb,
4221 						  priv, priv, true);
4222 	case TC_SETUP_QDISC_CBS:
4223 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4224 	case TC_SETUP_QDISC_TAPRIO:
4225 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4226 	case TC_SETUP_QDISC_ETF:
4227 		return stmmac_tc_setup_etf(priv, priv, type_data);
4228 	default:
4229 		return -EOPNOTSUPP;
4230 	}
4231 }
4232 
4233 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4234 			       struct net_device *sb_dev)
4235 {
4236 	int gso = skb_shinfo(skb)->gso_type;
4237 
4238 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4239 		/*
4240 		 * There is no way to determine the number of TSO/USO
4241 		 * capable Queues. Let's use always the Queue 0
4242 		 * because if TSO/USO is supported then at least this
4243 		 * one will be capable.
4244 		 */
4245 		return 0;
4246 	}
4247 
4248 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4249 }
4250 
4251 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4252 {
4253 	struct stmmac_priv *priv = netdev_priv(ndev);
4254 	int ret = 0;
4255 
4256 	ret = eth_mac_addr(ndev, addr);
4257 	if (ret)
4258 		return ret;
4259 
4260 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4261 
4262 	return ret;
4263 }
4264 
4265 #ifdef CONFIG_DEBUG_FS
4266 static struct dentry *stmmac_fs_dir;
4267 
4268 static void sysfs_display_ring(void *head, int size, int extend_desc,
4269 			       struct seq_file *seq)
4270 {
4271 	int i;
4272 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4273 	struct dma_desc *p = (struct dma_desc *)head;
4274 
4275 	for (i = 0; i < size; i++) {
4276 		if (extend_desc) {
4277 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4278 				   i, (unsigned int)virt_to_phys(ep),
4279 				   le32_to_cpu(ep->basic.des0),
4280 				   le32_to_cpu(ep->basic.des1),
4281 				   le32_to_cpu(ep->basic.des2),
4282 				   le32_to_cpu(ep->basic.des3));
4283 			ep++;
4284 		} else {
4285 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4286 				   i, (unsigned int)virt_to_phys(p),
4287 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4288 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4289 			p++;
4290 		}
4291 		seq_printf(seq, "\n");
4292 	}
4293 }
4294 
4295 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4296 {
4297 	struct net_device *dev = seq->private;
4298 	struct stmmac_priv *priv = netdev_priv(dev);
4299 	u32 rx_count = priv->plat->rx_queues_to_use;
4300 	u32 tx_count = priv->plat->tx_queues_to_use;
4301 	u32 queue;
4302 
4303 	if ((dev->flags & IFF_UP) == 0)
4304 		return 0;
4305 
4306 	for (queue = 0; queue < rx_count; queue++) {
4307 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4308 
4309 		seq_printf(seq, "RX Queue %d:\n", queue);
4310 
4311 		if (priv->extend_desc) {
4312 			seq_printf(seq, "Extended descriptor ring:\n");
4313 			sysfs_display_ring((void *)rx_q->dma_erx,
4314 					   DMA_RX_SIZE, 1, seq);
4315 		} else {
4316 			seq_printf(seq, "Descriptor ring:\n");
4317 			sysfs_display_ring((void *)rx_q->dma_rx,
4318 					   DMA_RX_SIZE, 0, seq);
4319 		}
4320 	}
4321 
4322 	for (queue = 0; queue < tx_count; queue++) {
4323 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4324 
4325 		seq_printf(seq, "TX Queue %d:\n", queue);
4326 
4327 		if (priv->extend_desc) {
4328 			seq_printf(seq, "Extended descriptor ring:\n");
4329 			sysfs_display_ring((void *)tx_q->dma_etx,
4330 					   DMA_TX_SIZE, 1, seq);
4331 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4332 			seq_printf(seq, "Descriptor ring:\n");
4333 			sysfs_display_ring((void *)tx_q->dma_tx,
4334 					   DMA_TX_SIZE, 0, seq);
4335 		}
4336 	}
4337 
4338 	return 0;
4339 }
4340 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4341 
4342 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4343 {
4344 	struct net_device *dev = seq->private;
4345 	struct stmmac_priv *priv = netdev_priv(dev);
4346 
4347 	if (!priv->hw_cap_support) {
4348 		seq_printf(seq, "DMA HW features not supported\n");
4349 		return 0;
4350 	}
4351 
4352 	seq_printf(seq, "==============================\n");
4353 	seq_printf(seq, "\tDMA HW features\n");
4354 	seq_printf(seq, "==============================\n");
4355 
4356 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4357 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4358 	seq_printf(seq, "\t1000 Mbps: %s\n",
4359 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4360 	seq_printf(seq, "\tHalf duplex: %s\n",
4361 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4362 	seq_printf(seq, "\tHash Filter: %s\n",
4363 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4364 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4365 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4366 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4367 		   (priv->dma_cap.pcs) ? "Y" : "N");
4368 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4369 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4370 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4371 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4372 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4373 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4374 	seq_printf(seq, "\tRMON module: %s\n",
4375 		   (priv->dma_cap.rmon) ? "Y" : "N");
4376 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4377 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4378 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4379 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4380 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4381 		   (priv->dma_cap.eee) ? "Y" : "N");
4382 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4383 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4384 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4385 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4386 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4387 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4388 	} else {
4389 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4390 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4391 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4392 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4393 	}
4394 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4395 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4396 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4397 		   priv->dma_cap.number_rx_channel);
4398 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4399 		   priv->dma_cap.number_tx_channel);
4400 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4401 		   priv->dma_cap.number_rx_queues);
4402 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4403 		   priv->dma_cap.number_tx_queues);
4404 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4405 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4406 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4407 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4408 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4409 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4410 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4411 		   priv->dma_cap.pps_out_num);
4412 	seq_printf(seq, "\tSafety Features: %s\n",
4413 		   priv->dma_cap.asp ? "Y" : "N");
4414 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
4415 		   priv->dma_cap.frpsel ? "Y" : "N");
4416 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
4417 		   priv->dma_cap.addr64);
4418 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
4419 		   priv->dma_cap.rssen ? "Y" : "N");
4420 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4421 		   priv->dma_cap.vlhash ? "Y" : "N");
4422 	seq_printf(seq, "\tSplit Header: %s\n",
4423 		   priv->dma_cap.sphen ? "Y" : "N");
4424 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4425 		   priv->dma_cap.vlins ? "Y" : "N");
4426 	seq_printf(seq, "\tDouble VLAN: %s\n",
4427 		   priv->dma_cap.dvlan ? "Y" : "N");
4428 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4429 		   priv->dma_cap.l3l4fnum);
4430 	seq_printf(seq, "\tARP Offloading: %s\n",
4431 		   priv->dma_cap.arpoffsel ? "Y" : "N");
4432 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4433 		   priv->dma_cap.estsel ? "Y" : "N");
4434 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4435 		   priv->dma_cap.fpesel ? "Y" : "N");
4436 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4437 		   priv->dma_cap.tbssel ? "Y" : "N");
4438 	return 0;
4439 }
4440 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4441 
4442 /* Use network device events to rename debugfs file entries.
4443  */
4444 static int stmmac_device_event(struct notifier_block *unused,
4445 			       unsigned long event, void *ptr)
4446 {
4447 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4448 	struct stmmac_priv *priv = netdev_priv(dev);
4449 
4450 	if (dev->netdev_ops != &stmmac_netdev_ops)
4451 		goto done;
4452 
4453 	switch (event) {
4454 	case NETDEV_CHANGENAME:
4455 		if (priv->dbgfs_dir)
4456 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4457 							 priv->dbgfs_dir,
4458 							 stmmac_fs_dir,
4459 							 dev->name);
4460 		break;
4461 	}
4462 done:
4463 	return NOTIFY_DONE;
4464 }
4465 
4466 static struct notifier_block stmmac_notifier = {
4467 	.notifier_call = stmmac_device_event,
4468 };
4469 
4470 static void stmmac_init_fs(struct net_device *dev)
4471 {
4472 	struct stmmac_priv *priv = netdev_priv(dev);
4473 
4474 	rtnl_lock();
4475 
4476 	/* Create per netdev entries */
4477 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4478 
4479 	/* Entry to report DMA RX/TX rings */
4480 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4481 			    &stmmac_rings_status_fops);
4482 
4483 	/* Entry to report the DMA HW features */
4484 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4485 			    &stmmac_dma_cap_fops);
4486 
4487 	rtnl_unlock();
4488 }
4489 
4490 static void stmmac_exit_fs(struct net_device *dev)
4491 {
4492 	struct stmmac_priv *priv = netdev_priv(dev);
4493 
4494 	debugfs_remove_recursive(priv->dbgfs_dir);
4495 }
4496 #endif /* CONFIG_DEBUG_FS */
4497 
4498 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4499 {
4500 	unsigned char *data = (unsigned char *)&vid_le;
4501 	unsigned char data_byte = 0;
4502 	u32 crc = ~0x0;
4503 	u32 temp = 0;
4504 	int i, bits;
4505 
4506 	bits = get_bitmask_order(VLAN_VID_MASK);
4507 	for (i = 0; i < bits; i++) {
4508 		if ((i % 8) == 0)
4509 			data_byte = data[i / 8];
4510 
4511 		temp = ((crc & 1) ^ data_byte) & 1;
4512 		crc >>= 1;
4513 		data_byte >>= 1;
4514 
4515 		if (temp)
4516 			crc ^= 0xedb88320;
4517 	}
4518 
4519 	return crc;
4520 }
4521 
4522 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4523 {
4524 	u32 crc, hash = 0;
4525 	__le16 pmatch = 0;
4526 	int count = 0;
4527 	u16 vid = 0;
4528 
4529 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4530 		__le16 vid_le = cpu_to_le16(vid);
4531 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4532 		hash |= (1 << crc);
4533 		count++;
4534 	}
4535 
4536 	if (!priv->dma_cap.vlhash) {
4537 		if (count > 2) /* VID = 0 always passes filter */
4538 			return -EOPNOTSUPP;
4539 
4540 		pmatch = cpu_to_le16(vid);
4541 		hash = 0;
4542 	}
4543 
4544 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4545 }
4546 
4547 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4548 {
4549 	struct stmmac_priv *priv = netdev_priv(ndev);
4550 	bool is_double = false;
4551 	int ret;
4552 
4553 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4554 		is_double = true;
4555 
4556 	set_bit(vid, priv->active_vlans);
4557 	ret = stmmac_vlan_update(priv, is_double);
4558 	if (ret) {
4559 		clear_bit(vid, priv->active_vlans);
4560 		return ret;
4561 	}
4562 
4563 	if (priv->hw->num_vlan) {
4564 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4565 		if (ret)
4566 			return ret;
4567 	}
4568 
4569 	return 0;
4570 }
4571 
4572 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4573 {
4574 	struct stmmac_priv *priv = netdev_priv(ndev);
4575 	bool is_double = false;
4576 	int ret;
4577 
4578 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4579 		is_double = true;
4580 
4581 	clear_bit(vid, priv->active_vlans);
4582 
4583 	if (priv->hw->num_vlan) {
4584 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4585 		if (ret)
4586 			return ret;
4587 	}
4588 
4589 	return stmmac_vlan_update(priv, is_double);
4590 }
4591 
4592 static const struct net_device_ops stmmac_netdev_ops = {
4593 	.ndo_open = stmmac_open,
4594 	.ndo_start_xmit = stmmac_xmit,
4595 	.ndo_stop = stmmac_release,
4596 	.ndo_change_mtu = stmmac_change_mtu,
4597 	.ndo_fix_features = stmmac_fix_features,
4598 	.ndo_set_features = stmmac_set_features,
4599 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4600 	.ndo_tx_timeout = stmmac_tx_timeout,
4601 	.ndo_do_ioctl = stmmac_ioctl,
4602 	.ndo_setup_tc = stmmac_setup_tc,
4603 	.ndo_select_queue = stmmac_select_queue,
4604 #ifdef CONFIG_NET_POLL_CONTROLLER
4605 	.ndo_poll_controller = stmmac_poll_controller,
4606 #endif
4607 	.ndo_set_mac_address = stmmac_set_mac_address,
4608 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4609 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4610 };
4611 
4612 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4613 {
4614 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4615 		return;
4616 	if (test_bit(STMMAC_DOWN, &priv->state))
4617 		return;
4618 
4619 	netdev_err(priv->dev, "Reset adapter.\n");
4620 
4621 	rtnl_lock();
4622 	netif_trans_update(priv->dev);
4623 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4624 		usleep_range(1000, 2000);
4625 
4626 	set_bit(STMMAC_DOWN, &priv->state);
4627 	dev_close(priv->dev);
4628 	dev_open(priv->dev, NULL);
4629 	clear_bit(STMMAC_DOWN, &priv->state);
4630 	clear_bit(STMMAC_RESETING, &priv->state);
4631 	rtnl_unlock();
4632 }
4633 
4634 static void stmmac_service_task(struct work_struct *work)
4635 {
4636 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4637 			service_task);
4638 
4639 	stmmac_reset_subtask(priv);
4640 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4641 }
4642 
4643 /**
4644  *  stmmac_hw_init - Init the MAC device
4645  *  @priv: driver private structure
4646  *  Description: this function is to configure the MAC device according to
4647  *  some platform parameters or the HW capability register. It prepares the
4648  *  driver to use either ring or chain modes and to setup either enhanced or
4649  *  normal descriptors.
4650  */
4651 static int stmmac_hw_init(struct stmmac_priv *priv)
4652 {
4653 	int ret;
4654 
4655 	/* dwmac-sun8i only work in chain mode */
4656 	if (priv->plat->has_sun8i)
4657 		chain_mode = 1;
4658 	priv->chain_mode = chain_mode;
4659 
4660 	/* Initialize HW Interface */
4661 	ret = stmmac_hwif_init(priv);
4662 	if (ret)
4663 		return ret;
4664 
4665 	/* Get the HW capability (new GMAC newer than 3.50a) */
4666 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4667 	if (priv->hw_cap_support) {
4668 		dev_info(priv->device, "DMA HW capability register supported\n");
4669 
4670 		/* We can override some gmac/dma configuration fields: e.g.
4671 		 * enh_desc, tx_coe (e.g. that are passed through the
4672 		 * platform) with the values from the HW capability
4673 		 * register (if supported).
4674 		 */
4675 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4676 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4677 		priv->hw->pmt = priv->plat->pmt;
4678 		if (priv->dma_cap.hash_tb_sz) {
4679 			priv->hw->multicast_filter_bins =
4680 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4681 			priv->hw->mcast_bits_log2 =
4682 					ilog2(priv->hw->multicast_filter_bins);
4683 		}
4684 
4685 		/* TXCOE doesn't work in thresh DMA mode */
4686 		if (priv->plat->force_thresh_dma_mode)
4687 			priv->plat->tx_coe = 0;
4688 		else
4689 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4690 
4691 		/* In case of GMAC4 rx_coe is from HW cap register. */
4692 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4693 
4694 		if (priv->dma_cap.rx_coe_type2)
4695 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4696 		else if (priv->dma_cap.rx_coe_type1)
4697 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4698 
4699 	} else {
4700 		dev_info(priv->device, "No HW DMA feature register supported\n");
4701 	}
4702 
4703 	if (priv->plat->rx_coe) {
4704 		priv->hw->rx_csum = priv->plat->rx_coe;
4705 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4706 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4707 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4708 	}
4709 	if (priv->plat->tx_coe)
4710 		dev_info(priv->device, "TX Checksum insertion supported\n");
4711 
4712 	if (priv->plat->pmt) {
4713 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4714 		device_set_wakeup_capable(priv->device, 1);
4715 	}
4716 
4717 	if (priv->dma_cap.tsoen)
4718 		dev_info(priv->device, "TSO supported\n");
4719 
4720 	/* Run HW quirks, if any */
4721 	if (priv->hwif_quirks) {
4722 		ret = priv->hwif_quirks(priv);
4723 		if (ret)
4724 			return ret;
4725 	}
4726 
4727 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4728 	 * In some case, for example on bugged HW this feature
4729 	 * has to be disable and this can be done by passing the
4730 	 * riwt_off field from the platform.
4731 	 */
4732 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4733 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4734 		priv->use_riwt = 1;
4735 		dev_info(priv->device,
4736 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4737 	}
4738 
4739 	return 0;
4740 }
4741 
4742 /**
4743  * stmmac_dvr_probe
4744  * @device: device pointer
4745  * @plat_dat: platform data pointer
4746  * @res: stmmac resource pointer
4747  * Description: this is the main probe function used to
4748  * call the alloc_etherdev, allocate the priv structure.
4749  * Return:
4750  * returns 0 on success, otherwise errno.
4751  */
4752 int stmmac_dvr_probe(struct device *device,
4753 		     struct plat_stmmacenet_data *plat_dat,
4754 		     struct stmmac_resources *res)
4755 {
4756 	struct net_device *ndev = NULL;
4757 	struct stmmac_priv *priv;
4758 	u32 queue, rxq, maxq;
4759 	int i, ret = 0;
4760 
4761 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4762 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4763 	if (!ndev)
4764 		return -ENOMEM;
4765 
4766 	SET_NETDEV_DEV(ndev, device);
4767 
4768 	priv = netdev_priv(ndev);
4769 	priv->device = device;
4770 	priv->dev = ndev;
4771 
4772 	stmmac_set_ethtool_ops(ndev);
4773 	priv->pause = pause;
4774 	priv->plat = plat_dat;
4775 	priv->ioaddr = res->addr;
4776 	priv->dev->base_addr = (unsigned long)res->addr;
4777 
4778 	priv->dev->irq = res->irq;
4779 	priv->wol_irq = res->wol_irq;
4780 	priv->lpi_irq = res->lpi_irq;
4781 
4782 	if (!IS_ERR_OR_NULL(res->mac))
4783 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4784 
4785 	dev_set_drvdata(device, priv->dev);
4786 
4787 	/* Verify driver arguments */
4788 	stmmac_verify_args();
4789 
4790 	/* Allocate workqueue */
4791 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4792 	if (!priv->wq) {
4793 		dev_err(priv->device, "failed to create workqueue\n");
4794 		return -ENOMEM;
4795 	}
4796 
4797 	INIT_WORK(&priv->service_task, stmmac_service_task);
4798 
4799 	/* Override with kernel parameters if supplied XXX CRS XXX
4800 	 * this needs to have multiple instances
4801 	 */
4802 	if ((phyaddr >= 0) && (phyaddr <= 31))
4803 		priv->plat->phy_addr = phyaddr;
4804 
4805 	if (priv->plat->stmmac_rst) {
4806 		ret = reset_control_assert(priv->plat->stmmac_rst);
4807 		reset_control_deassert(priv->plat->stmmac_rst);
4808 		/* Some reset controllers have only reset callback instead of
4809 		 * assert + deassert callbacks pair.
4810 		 */
4811 		if (ret == -ENOTSUPP)
4812 			reset_control_reset(priv->plat->stmmac_rst);
4813 	}
4814 
4815 	/* Init MAC and get the capabilities */
4816 	ret = stmmac_hw_init(priv);
4817 	if (ret)
4818 		goto error_hw_init;
4819 
4820 	stmmac_check_ether_addr(priv);
4821 
4822 	/* Configure real RX and TX queues */
4823 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4824 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4825 
4826 	ndev->netdev_ops = &stmmac_netdev_ops;
4827 
4828 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4829 			    NETIF_F_RXCSUM;
4830 
4831 	ret = stmmac_tc_init(priv, priv);
4832 	if (!ret) {
4833 		ndev->hw_features |= NETIF_F_HW_TC;
4834 	}
4835 
4836 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4837 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4838 		if (priv->plat->has_gmac4)
4839 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4840 		priv->tso = true;
4841 		dev_info(priv->device, "TSO feature enabled\n");
4842 	}
4843 
4844 	if (priv->dma_cap.sphen) {
4845 		ndev->hw_features |= NETIF_F_GRO;
4846 		priv->sph = true;
4847 		dev_info(priv->device, "SPH feature enabled\n");
4848 	}
4849 
4850 	if (priv->dma_cap.addr64) {
4851 		ret = dma_set_mask_and_coherent(device,
4852 				DMA_BIT_MASK(priv->dma_cap.addr64));
4853 		if (!ret) {
4854 			dev_info(priv->device, "Using %d bits DMA width\n",
4855 				 priv->dma_cap.addr64);
4856 
4857 			/*
4858 			 * If more than 32 bits can be addressed, make sure to
4859 			 * enable enhanced addressing mode.
4860 			 */
4861 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4862 				priv->plat->dma_cfg->eame = true;
4863 		} else {
4864 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4865 			if (ret) {
4866 				dev_err(priv->device, "Failed to set DMA Mask\n");
4867 				goto error_hw_init;
4868 			}
4869 
4870 			priv->dma_cap.addr64 = 32;
4871 		}
4872 	}
4873 
4874 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4875 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4876 #ifdef STMMAC_VLAN_TAG_USED
4877 	/* Both mac100 and gmac support receive VLAN tag detection */
4878 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4879 	if (priv->dma_cap.vlhash) {
4880 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4881 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4882 	}
4883 	if (priv->dma_cap.vlins) {
4884 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4885 		if (priv->dma_cap.dvlan)
4886 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4887 	}
4888 #endif
4889 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4890 
4891 	/* Initialize RSS */
4892 	rxq = priv->plat->rx_queues_to_use;
4893 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4894 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4895 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4896 
4897 	if (priv->dma_cap.rssen && priv->plat->rss_en)
4898 		ndev->features |= NETIF_F_RXHASH;
4899 
4900 	/* MTU range: 46 - hw-specific max */
4901 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4902 	if (priv->plat->has_xgmac)
4903 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4904 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4905 		ndev->max_mtu = JUMBO_LEN;
4906 	else
4907 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4908 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4909 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4910 	 */
4911 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4912 	    (priv->plat->maxmtu >= ndev->min_mtu))
4913 		ndev->max_mtu = priv->plat->maxmtu;
4914 	else if (priv->plat->maxmtu < ndev->min_mtu)
4915 		dev_warn(priv->device,
4916 			 "%s: warning: maxmtu having invalid value (%d)\n",
4917 			 __func__, priv->plat->maxmtu);
4918 
4919 	if (flow_ctrl)
4920 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4921 
4922 	/* Setup channels NAPI */
4923 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4924 
4925 	for (queue = 0; queue < maxq; queue++) {
4926 		struct stmmac_channel *ch = &priv->channel[queue];
4927 
4928 		spin_lock_init(&ch->lock);
4929 		ch->priv_data = priv;
4930 		ch->index = queue;
4931 
4932 		if (queue < priv->plat->rx_queues_to_use) {
4933 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4934 				       NAPI_POLL_WEIGHT);
4935 		}
4936 		if (queue < priv->plat->tx_queues_to_use) {
4937 			netif_tx_napi_add(ndev, &ch->tx_napi,
4938 					  stmmac_napi_poll_tx,
4939 					  NAPI_POLL_WEIGHT);
4940 		}
4941 	}
4942 
4943 	mutex_init(&priv->lock);
4944 
4945 	/* If a specific clk_csr value is passed from the platform
4946 	 * this means that the CSR Clock Range selection cannot be
4947 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4948 	 * set the MDC clock dynamically according to the csr actual
4949 	 * clock input.
4950 	 */
4951 	if (priv->plat->clk_csr >= 0)
4952 		priv->clk_csr = priv->plat->clk_csr;
4953 	else
4954 		stmmac_clk_csr_set(priv);
4955 
4956 	stmmac_check_pcs_mode(priv);
4957 
4958 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
4959 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4960 		/* MDIO bus Registration */
4961 		ret = stmmac_mdio_register(ndev);
4962 		if (ret < 0) {
4963 			dev_err(priv->device,
4964 				"%s: MDIO bus (id: %d) registration failed",
4965 				__func__, priv->plat->bus_id);
4966 			goto error_mdio_register;
4967 		}
4968 	}
4969 
4970 	ret = stmmac_phy_setup(priv);
4971 	if (ret) {
4972 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
4973 		goto error_phy_setup;
4974 	}
4975 
4976 	ret = register_netdev(ndev);
4977 	if (ret) {
4978 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4979 			__func__, ret);
4980 		goto error_netdev_register;
4981 	}
4982 
4983 	if (priv->plat->serdes_powerup) {
4984 		ret = priv->plat->serdes_powerup(ndev,
4985 						 priv->plat->bsp_priv);
4986 
4987 		if (ret < 0)
4988 			goto error_serdes_powerup;
4989 	}
4990 
4991 #ifdef CONFIG_DEBUG_FS
4992 	stmmac_init_fs(ndev);
4993 #endif
4994 
4995 	return ret;
4996 
4997 error_serdes_powerup:
4998 	unregister_netdev(ndev);
4999 error_netdev_register:
5000 	phylink_destroy(priv->phylink);
5001 error_phy_setup:
5002 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5003 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5004 		stmmac_mdio_unregister(ndev);
5005 error_mdio_register:
5006 	for (queue = 0; queue < maxq; queue++) {
5007 		struct stmmac_channel *ch = &priv->channel[queue];
5008 
5009 		if (queue < priv->plat->rx_queues_to_use)
5010 			netif_napi_del(&ch->rx_napi);
5011 		if (queue < priv->plat->tx_queues_to_use)
5012 			netif_napi_del(&ch->tx_napi);
5013 	}
5014 error_hw_init:
5015 	destroy_workqueue(priv->wq);
5016 
5017 	return ret;
5018 }
5019 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5020 
5021 /**
5022  * stmmac_dvr_remove
5023  * @dev: device pointer
5024  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5025  * changes the link status, releases the DMA descriptor rings.
5026  */
5027 int stmmac_dvr_remove(struct device *dev)
5028 {
5029 	struct net_device *ndev = dev_get_drvdata(dev);
5030 	struct stmmac_priv *priv = netdev_priv(ndev);
5031 
5032 	netdev_info(priv->dev, "%s: removing driver", __func__);
5033 
5034 	stmmac_stop_all_dma(priv);
5035 
5036 	if (priv->plat->serdes_powerdown)
5037 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5038 
5039 	stmmac_mac_set(priv, priv->ioaddr, false);
5040 	netif_carrier_off(ndev);
5041 	unregister_netdev(ndev);
5042 #ifdef CONFIG_DEBUG_FS
5043 	stmmac_exit_fs(ndev);
5044 #endif
5045 	phylink_destroy(priv->phylink);
5046 	if (priv->plat->stmmac_rst)
5047 		reset_control_assert(priv->plat->stmmac_rst);
5048 	clk_disable_unprepare(priv->plat->pclk);
5049 	clk_disable_unprepare(priv->plat->stmmac_clk);
5050 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5051 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5052 		stmmac_mdio_unregister(ndev);
5053 	destroy_workqueue(priv->wq);
5054 	mutex_destroy(&priv->lock);
5055 
5056 	return 0;
5057 }
5058 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5059 
5060 /**
5061  * stmmac_suspend - suspend callback
5062  * @dev: device pointer
5063  * Description: this is the function to suspend the device and it is called
5064  * by the platform driver to stop the network queue, release the resources,
5065  * program the PMT register (for WoL), clean and release driver resources.
5066  */
5067 int stmmac_suspend(struct device *dev)
5068 {
5069 	struct net_device *ndev = dev_get_drvdata(dev);
5070 	struct stmmac_priv *priv = netdev_priv(ndev);
5071 	u32 chan;
5072 
5073 	if (!ndev || !netif_running(ndev))
5074 		return 0;
5075 
5076 	phylink_mac_change(priv->phylink, false);
5077 
5078 	mutex_lock(&priv->lock);
5079 
5080 	netif_device_detach(ndev);
5081 	stmmac_stop_all_queues(priv);
5082 
5083 	stmmac_disable_all_queues(priv);
5084 
5085 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5086 		del_timer_sync(&priv->tx_queue[chan].txtimer);
5087 
5088 	/* Stop TX/RX DMA */
5089 	stmmac_stop_all_dma(priv);
5090 
5091 	if (priv->plat->serdes_powerdown)
5092 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5093 
5094 	/* Enable Power down mode by programming the PMT regs */
5095 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5096 		stmmac_pmt(priv, priv->hw, priv->wolopts);
5097 		priv->irq_wake = 1;
5098 	} else {
5099 		mutex_unlock(&priv->lock);
5100 		rtnl_lock();
5101 		if (device_may_wakeup(priv->device))
5102 			phylink_speed_down(priv->phylink, false);
5103 		phylink_stop(priv->phylink);
5104 		rtnl_unlock();
5105 		mutex_lock(&priv->lock);
5106 
5107 		stmmac_mac_set(priv, priv->ioaddr, false);
5108 		pinctrl_pm_select_sleep_state(priv->device);
5109 		/* Disable clock in case of PWM is off */
5110 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
5111 		clk_disable_unprepare(priv->plat->pclk);
5112 		clk_disable_unprepare(priv->plat->stmmac_clk);
5113 	}
5114 	mutex_unlock(&priv->lock);
5115 
5116 	priv->speed = SPEED_UNKNOWN;
5117 	return 0;
5118 }
5119 EXPORT_SYMBOL_GPL(stmmac_suspend);
5120 
5121 /**
5122  * stmmac_reset_queues_param - reset queue parameters
5123  * @dev: device pointer
5124  */
5125 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5126 {
5127 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5128 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5129 	u32 queue;
5130 
5131 	for (queue = 0; queue < rx_cnt; queue++) {
5132 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5133 
5134 		rx_q->cur_rx = 0;
5135 		rx_q->dirty_rx = 0;
5136 	}
5137 
5138 	for (queue = 0; queue < tx_cnt; queue++) {
5139 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5140 
5141 		tx_q->cur_tx = 0;
5142 		tx_q->dirty_tx = 0;
5143 		tx_q->mss = 0;
5144 	}
5145 }
5146 
5147 /**
5148  * stmmac_resume - resume callback
5149  * @dev: device pointer
5150  * Description: when resume this function is invoked to setup the DMA and CORE
5151  * in a usable state.
5152  */
5153 int stmmac_resume(struct device *dev)
5154 {
5155 	struct net_device *ndev = dev_get_drvdata(dev);
5156 	struct stmmac_priv *priv = netdev_priv(ndev);
5157 	int ret;
5158 
5159 	if (!netif_running(ndev))
5160 		return 0;
5161 
5162 	/* Power Down bit, into the PM register, is cleared
5163 	 * automatically as soon as a magic packet or a Wake-up frame
5164 	 * is received. Anyway, it's better to manually clear
5165 	 * this bit because it can generate problems while resuming
5166 	 * from another devices (e.g. serial console).
5167 	 */
5168 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5169 		mutex_lock(&priv->lock);
5170 		stmmac_pmt(priv, priv->hw, 0);
5171 		mutex_unlock(&priv->lock);
5172 		priv->irq_wake = 0;
5173 	} else {
5174 		pinctrl_pm_select_default_state(priv->device);
5175 		/* enable the clk previously disabled */
5176 		clk_prepare_enable(priv->plat->stmmac_clk);
5177 		clk_prepare_enable(priv->plat->pclk);
5178 		if (priv->plat->clk_ptp_ref)
5179 			clk_prepare_enable(priv->plat->clk_ptp_ref);
5180 		/* reset the phy so that it's ready */
5181 		if (priv->mii)
5182 			stmmac_mdio_reset(priv->mii);
5183 	}
5184 
5185 	if (priv->plat->serdes_powerup) {
5186 		ret = priv->plat->serdes_powerup(ndev,
5187 						 priv->plat->bsp_priv);
5188 
5189 		if (ret < 0)
5190 			return ret;
5191 	}
5192 
5193 	mutex_lock(&priv->lock);
5194 
5195 	stmmac_reset_queues_param(priv);
5196 
5197 	stmmac_clear_descriptors(priv);
5198 
5199 	stmmac_hw_setup(ndev, false);
5200 	stmmac_init_coalesce(priv);
5201 	stmmac_set_rx_mode(ndev);
5202 
5203 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5204 
5205 	stmmac_enable_all_queues(priv);
5206 
5207 	stmmac_start_all_queues(priv);
5208 
5209 	mutex_unlock(&priv->lock);
5210 
5211 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5212 		rtnl_lock();
5213 		phylink_start(priv->phylink);
5214 		/* We may have called phylink_speed_down before */
5215 		phylink_speed_up(priv->phylink);
5216 		rtnl_unlock();
5217 	}
5218 
5219 	phylink_mac_change(priv->phylink, true);
5220 
5221 	netif_device_attach(ndev);
5222 
5223 	return 0;
5224 }
5225 EXPORT_SYMBOL_GPL(stmmac_resume);
5226 
5227 #ifndef MODULE
5228 static int __init stmmac_cmdline_opt(char *str)
5229 {
5230 	char *opt;
5231 
5232 	if (!str || !*str)
5233 		return -EINVAL;
5234 	while ((opt = strsep(&str, ",")) != NULL) {
5235 		if (!strncmp(opt, "debug:", 6)) {
5236 			if (kstrtoint(opt + 6, 0, &debug))
5237 				goto err;
5238 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5239 			if (kstrtoint(opt + 8, 0, &phyaddr))
5240 				goto err;
5241 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5242 			if (kstrtoint(opt + 7, 0, &buf_sz))
5243 				goto err;
5244 		} else if (!strncmp(opt, "tc:", 3)) {
5245 			if (kstrtoint(opt + 3, 0, &tc))
5246 				goto err;
5247 		} else if (!strncmp(opt, "watchdog:", 9)) {
5248 			if (kstrtoint(opt + 9, 0, &watchdog))
5249 				goto err;
5250 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5251 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
5252 				goto err;
5253 		} else if (!strncmp(opt, "pause:", 6)) {
5254 			if (kstrtoint(opt + 6, 0, &pause))
5255 				goto err;
5256 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5257 			if (kstrtoint(opt + 10, 0, &eee_timer))
5258 				goto err;
5259 		} else if (!strncmp(opt, "chain_mode:", 11)) {
5260 			if (kstrtoint(opt + 11, 0, &chain_mode))
5261 				goto err;
5262 		}
5263 	}
5264 	return 0;
5265 
5266 err:
5267 	pr_err("%s: ERROR broken module parameter conversion", __func__);
5268 	return -EINVAL;
5269 }
5270 
5271 __setup("stmmaceth=", stmmac_cmdline_opt);
5272 #endif /* MODULE */
5273 
5274 static int __init stmmac_init(void)
5275 {
5276 #ifdef CONFIG_DEBUG_FS
5277 	/* Create debugfs main directory if it doesn't exist yet */
5278 	if (!stmmac_fs_dir)
5279 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5280 	register_netdevice_notifier(&stmmac_notifier);
5281 #endif
5282 
5283 	return 0;
5284 }
5285 
5286 static void __exit stmmac_exit(void)
5287 {
5288 #ifdef CONFIG_DEBUG_FS
5289 	unregister_netdevice_notifier(&stmmac_notifier);
5290 	debugfs_remove_recursive(stmmac_fs_dir);
5291 #endif
5292 }
5293 
5294 module_init(stmmac_init)
5295 module_exit(stmmac_exit)
5296 
5297 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5298 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5299 MODULE_LICENSE("GPL");
5300