1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4  * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5  *
6  * Right now, I am very wasteful with the buffers.  I allocate memory
7  * pages and then divide them into 2K frame buffers.  This way I know I
8  * have buffers large enough to hold one frame within one buffer descriptor.
9  * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10  * will be much more memory efficient and will easily handle lots of
11  * small packets.
12  *
13  * Much better multiple PHY support by Magnus Damm.
14  * Copyright (c) 2000 Ericsson Radio Systems AB.
15  *
16  * Support for FEC controller of ColdFire processors.
17  * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18  *
19  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20  * Copyright (c) 2004-2006 Macq Electronique SA.
21  *
22  * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <net/ip.h>
41 #include <net/selftests.h>
42 #include <net/tso.h>
43 #include <linux/tcp.h>
44 #include <linux/udp.h>
45 #include <linux/icmp.h>
46 #include <linux/spinlock.h>
47 #include <linux/workqueue.h>
48 #include <linux/bitops.h>
49 #include <linux/io.h>
50 #include <linux/irq.h>
51 #include <linux/clk.h>
52 #include <linux/crc32.h>
53 #include <linux/platform_device.h>
54 #include <linux/mdio.h>
55 #include <linux/phy.h>
56 #include <linux/fec.h>
57 #include <linux/of.h>
58 #include <linux/of_device.h>
59 #include <linux/of_gpio.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/prefetch.h>
66 #include <linux/mfd/syscon.h>
67 #include <linux/regmap.h>
68 #include <soc/imx/cpuidle.h>
69 
70 #include <asm/cacheflush.h>
71 
72 #include "fec.h"
73 
74 static void set_multicast_list(struct net_device *ndev);
75 static void fec_enet_itr_coal_init(struct net_device *ndev);
76 
77 #define DRIVER_NAME	"fec"
78 
79 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
80 
81 /* Pause frame feild and FIFO threshold */
82 #define FEC_ENET_FCE	(1 << 5)
83 #define FEC_ENET_RSEM_V	0x84
84 #define FEC_ENET_RSFL_V	16
85 #define FEC_ENET_RAEM_V	0x8
86 #define FEC_ENET_RAFL_V	0x8
87 #define FEC_ENET_OPD_V	0xFFF0
88 #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
89 
90 struct fec_devinfo {
91 	u32 quirks;
92 };
93 
94 static const struct fec_devinfo fec_imx25_info = {
95 	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
96 		  FEC_QUIRK_HAS_FRREG,
97 };
98 
99 static const struct fec_devinfo fec_imx27_info = {
100 	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
101 };
102 
103 static const struct fec_devinfo fec_imx28_info = {
104 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
105 		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
106 		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
107 		  FEC_QUIRK_NO_HARD_RESET,
108 };
109 
110 static const struct fec_devinfo fec_imx6q_info = {
111 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
112 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
113 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
114 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
115 		  FEC_QUIRK_HAS_PMQOS,
116 };
117 
118 static const struct fec_devinfo fec_mvf600_info = {
119 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
120 };
121 
122 static const struct fec_devinfo fec_imx6x_info = {
123 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
124 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
125 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
126 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
127 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
128 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
129 };
130 
131 static const struct fec_devinfo fec_imx6ul_info = {
132 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
133 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
134 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
135 		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
136 		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
137 };
138 
139 static const struct fec_devinfo fec_imx8mq_info = {
140 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
141 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
142 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
143 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
144 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
145 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
146 		  FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
147 };
148 
149 static const struct fec_devinfo fec_imx8qm_info = {
150 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
151 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
152 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
153 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
154 		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
155 		  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
156 		  FEC_QUIRK_DELAYED_CLKS_SUPPORT,
157 };
158 
159 static struct platform_device_id fec_devtype[] = {
160 	{
161 		/* keep it for coldfire */
162 		.name = DRIVER_NAME,
163 		.driver_data = 0,
164 	}, {
165 		.name = "imx25-fec",
166 		.driver_data = (kernel_ulong_t)&fec_imx25_info,
167 	}, {
168 		.name = "imx27-fec",
169 		.driver_data = (kernel_ulong_t)&fec_imx27_info,
170 	}, {
171 		.name = "imx28-fec",
172 		.driver_data = (kernel_ulong_t)&fec_imx28_info,
173 	}, {
174 		.name = "imx6q-fec",
175 		.driver_data = (kernel_ulong_t)&fec_imx6q_info,
176 	}, {
177 		.name = "mvf600-fec",
178 		.driver_data = (kernel_ulong_t)&fec_mvf600_info,
179 	}, {
180 		.name = "imx6sx-fec",
181 		.driver_data = (kernel_ulong_t)&fec_imx6x_info,
182 	}, {
183 		.name = "imx6ul-fec",
184 		.driver_data = (kernel_ulong_t)&fec_imx6ul_info,
185 	}, {
186 		.name = "imx8mq-fec",
187 		.driver_data = (kernel_ulong_t)&fec_imx8mq_info,
188 	}, {
189 		.name = "imx8qm-fec",
190 		.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
191 	}, {
192 		/* sentinel */
193 	}
194 };
195 MODULE_DEVICE_TABLE(platform, fec_devtype);
196 
197 enum imx_fec_type {
198 	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
199 	IMX27_FEC,	/* runs on i.mx27/35/51 */
200 	IMX28_FEC,
201 	IMX6Q_FEC,
202 	MVF600_FEC,
203 	IMX6SX_FEC,
204 	IMX6UL_FEC,
205 	IMX8MQ_FEC,
206 	IMX8QM_FEC,
207 };
208 
209 static const struct of_device_id fec_dt_ids[] = {
210 	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
211 	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
212 	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
213 	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
214 	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
215 	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
216 	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
217 	{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
218 	{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
219 	{ /* sentinel */ }
220 };
221 MODULE_DEVICE_TABLE(of, fec_dt_ids);
222 
223 static unsigned char macaddr[ETH_ALEN];
224 module_param_array(macaddr, byte, NULL, 0);
225 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
226 
227 #if defined(CONFIG_M5272)
228 /*
229  * Some hardware gets it MAC address out of local flash memory.
230  * if this is non-zero then assume it is the address to get MAC from.
231  */
232 #if defined(CONFIG_NETtel)
233 #define	FEC_FLASHMAC	0xf0006006
234 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
235 #define	FEC_FLASHMAC	0xf0006000
236 #elif defined(CONFIG_CANCam)
237 #define	FEC_FLASHMAC	0xf0020000
238 #elif defined (CONFIG_M5272C3)
239 #define	FEC_FLASHMAC	(0xffe04000 + 4)
240 #elif defined(CONFIG_MOD5272)
241 #define FEC_FLASHMAC	0xffc0406b
242 #else
243 #define	FEC_FLASHMAC	0
244 #endif
245 #endif /* CONFIG_M5272 */
246 
247 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
248  *
249  * 2048 byte skbufs are allocated. However, alignment requirements
250  * varies between FEC variants. Worst case is 64, so round down by 64.
251  */
252 #define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
253 #define PKT_MINBUF_SIZE		64
254 
255 /* FEC receive acceleration */
256 #define FEC_RACC_IPDIS		(1 << 1)
257 #define FEC_RACC_PRODIS		(1 << 2)
258 #define FEC_RACC_SHIFT16	BIT(7)
259 #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
260 
261 /* MIB Control Register */
262 #define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
263 
264 /*
265  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
266  * size bits. Other FEC hardware does not, so we need to take that into
267  * account when setting it.
268  */
269 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
270     defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
271     defined(CONFIG_ARM64)
272 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
273 #else
274 #define	OPT_FRAME_SIZE	0
275 #endif
276 
277 /* FEC MII MMFR bits definition */
278 #define FEC_MMFR_ST		(1 << 30)
279 #define FEC_MMFR_ST_C45		(0)
280 #define FEC_MMFR_OP_READ	(2 << 28)
281 #define FEC_MMFR_OP_READ_C45	(3 << 28)
282 #define FEC_MMFR_OP_WRITE	(1 << 28)
283 #define FEC_MMFR_OP_ADDR_WRITE	(0)
284 #define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
285 #define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
286 #define FEC_MMFR_TA		(2 << 16)
287 #define FEC_MMFR_DATA(v)	(v & 0xffff)
288 /* FEC ECR bits definition */
289 #define FEC_ECR_RESET   BIT(0)
290 #define FEC_ECR_ETHEREN BIT(1)
291 #define FEC_ECR_MAGICEN BIT(2)
292 #define FEC_ECR_SLEEP   BIT(3)
293 #define FEC_ECR_EN1588  BIT(4)
294 
295 #define FEC_MII_TIMEOUT		30000 /* us */
296 
297 /* Transmitter timeout */
298 #define TX_TIMEOUT (2 * HZ)
299 
300 #define FEC_PAUSE_FLAG_AUTONEG	0x1
301 #define FEC_PAUSE_FLAG_ENABLE	0x2
302 #define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
303 #define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
304 #define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
305 
306 #define COPYBREAK_DEFAULT	256
307 
308 /* Max number of allowed TCP segments for software TSO */
309 #define FEC_MAX_TSO_SEGS	100
310 #define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
311 
312 #define IS_TSO_HEADER(txq, addr) \
313 	((addr >= txq->tso_hdrs_dma) && \
314 	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
315 
316 static int mii_cnt;
317 
318 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
319 					     struct bufdesc_prop *bd)
320 {
321 	return (bdp >= bd->last) ? bd->base
322 			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
323 }
324 
325 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
326 					     struct bufdesc_prop *bd)
327 {
328 	return (bdp <= bd->base) ? bd->last
329 			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
330 }
331 
332 static int fec_enet_get_bd_index(struct bufdesc *bdp,
333 				 struct bufdesc_prop *bd)
334 {
335 	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
336 }
337 
338 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
339 {
340 	int entries;
341 
342 	entries = (((const char *)txq->dirty_tx -
343 			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
344 
345 	return entries >= 0 ? entries : entries + txq->bd.ring_size;
346 }
347 
348 static void swap_buffer(void *bufaddr, int len)
349 {
350 	int i;
351 	unsigned int *buf = bufaddr;
352 
353 	for (i = 0; i < len; i += 4, buf++)
354 		swab32s(buf);
355 }
356 
357 static void swap_buffer2(void *dst_buf, void *src_buf, int len)
358 {
359 	int i;
360 	unsigned int *src = src_buf;
361 	unsigned int *dst = dst_buf;
362 
363 	for (i = 0; i < len; i += 4, src++, dst++)
364 		*dst = swab32p(src);
365 }
366 
367 static void fec_dump(struct net_device *ndev)
368 {
369 	struct fec_enet_private *fep = netdev_priv(ndev);
370 	struct bufdesc *bdp;
371 	struct fec_enet_priv_tx_q *txq;
372 	int index = 0;
373 
374 	netdev_info(ndev, "TX ring dump\n");
375 	pr_info("Nr     SC     addr       len  SKB\n");
376 
377 	txq = fep->tx_queue[0];
378 	bdp = txq->bd.base;
379 
380 	do {
381 		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
382 			index,
383 			bdp == txq->bd.cur ? 'S' : ' ',
384 			bdp == txq->dirty_tx ? 'H' : ' ',
385 			fec16_to_cpu(bdp->cbd_sc),
386 			fec32_to_cpu(bdp->cbd_bufaddr),
387 			fec16_to_cpu(bdp->cbd_datlen),
388 			txq->tx_skbuff[index]);
389 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
390 		index++;
391 	} while (bdp != txq->bd.base);
392 }
393 
394 static inline bool is_ipv4_pkt(struct sk_buff *skb)
395 {
396 	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
397 }
398 
399 static int
400 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
401 {
402 	/* Only run for packets requiring a checksum. */
403 	if (skb->ip_summed != CHECKSUM_PARTIAL)
404 		return 0;
405 
406 	if (unlikely(skb_cow_head(skb, 0)))
407 		return -1;
408 
409 	if (is_ipv4_pkt(skb))
410 		ip_hdr(skb)->check = 0;
411 	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
412 
413 	return 0;
414 }
415 
416 static struct bufdesc *
417 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
418 			     struct sk_buff *skb,
419 			     struct net_device *ndev)
420 {
421 	struct fec_enet_private *fep = netdev_priv(ndev);
422 	struct bufdesc *bdp = txq->bd.cur;
423 	struct bufdesc_ex *ebdp;
424 	int nr_frags = skb_shinfo(skb)->nr_frags;
425 	int frag, frag_len;
426 	unsigned short status;
427 	unsigned int estatus = 0;
428 	skb_frag_t *this_frag;
429 	unsigned int index;
430 	void *bufaddr;
431 	dma_addr_t addr;
432 	int i;
433 
434 	for (frag = 0; frag < nr_frags; frag++) {
435 		this_frag = &skb_shinfo(skb)->frags[frag];
436 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
437 		ebdp = (struct bufdesc_ex *)bdp;
438 
439 		status = fec16_to_cpu(bdp->cbd_sc);
440 		status &= ~BD_ENET_TX_STATS;
441 		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
442 		frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
443 
444 		/* Handle the last BD specially */
445 		if (frag == nr_frags - 1) {
446 			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
447 			if (fep->bufdesc_ex) {
448 				estatus |= BD_ENET_TX_INT;
449 				if (unlikely(skb_shinfo(skb)->tx_flags &
450 					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
451 					estatus |= BD_ENET_TX_TS;
452 			}
453 		}
454 
455 		if (fep->bufdesc_ex) {
456 			if (fep->quirks & FEC_QUIRK_HAS_AVB)
457 				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
458 			if (skb->ip_summed == CHECKSUM_PARTIAL)
459 				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
460 
461 			ebdp->cbd_bdu = 0;
462 			ebdp->cbd_esc = cpu_to_fec32(estatus);
463 		}
464 
465 		bufaddr = skb_frag_address(this_frag);
466 
467 		index = fec_enet_get_bd_index(bdp, &txq->bd);
468 		if (((unsigned long) bufaddr) & fep->tx_align ||
469 			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
470 			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
471 			bufaddr = txq->tx_bounce[index];
472 
473 			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
474 				swap_buffer(bufaddr, frag_len);
475 		}
476 
477 		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
478 				      DMA_TO_DEVICE);
479 		if (dma_mapping_error(&fep->pdev->dev, addr)) {
480 			if (net_ratelimit())
481 				netdev_err(ndev, "Tx DMA memory map failed\n");
482 			goto dma_mapping_error;
483 		}
484 
485 		bdp->cbd_bufaddr = cpu_to_fec32(addr);
486 		bdp->cbd_datlen = cpu_to_fec16(frag_len);
487 		/* Make sure the updates to rest of the descriptor are
488 		 * performed before transferring ownership.
489 		 */
490 		wmb();
491 		bdp->cbd_sc = cpu_to_fec16(status);
492 	}
493 
494 	return bdp;
495 dma_mapping_error:
496 	bdp = txq->bd.cur;
497 	for (i = 0; i < frag; i++) {
498 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
499 		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
500 				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
501 	}
502 	return ERR_PTR(-ENOMEM);
503 }
504 
505 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
506 				   struct sk_buff *skb, struct net_device *ndev)
507 {
508 	struct fec_enet_private *fep = netdev_priv(ndev);
509 	int nr_frags = skb_shinfo(skb)->nr_frags;
510 	struct bufdesc *bdp, *last_bdp;
511 	void *bufaddr;
512 	dma_addr_t addr;
513 	unsigned short status;
514 	unsigned short buflen;
515 	unsigned int estatus = 0;
516 	unsigned int index;
517 	int entries_free;
518 
519 	entries_free = fec_enet_get_free_txdesc_num(txq);
520 	if (entries_free < MAX_SKB_FRAGS + 1) {
521 		dev_kfree_skb_any(skb);
522 		if (net_ratelimit())
523 			netdev_err(ndev, "NOT enough BD for SG!\n");
524 		return NETDEV_TX_OK;
525 	}
526 
527 	/* Protocol checksum off-load for TCP and UDP. */
528 	if (fec_enet_clear_csum(skb, ndev)) {
529 		dev_kfree_skb_any(skb);
530 		return NETDEV_TX_OK;
531 	}
532 
533 	/* Fill in a Tx ring entry */
534 	bdp = txq->bd.cur;
535 	last_bdp = bdp;
536 	status = fec16_to_cpu(bdp->cbd_sc);
537 	status &= ~BD_ENET_TX_STATS;
538 
539 	/* Set buffer length and buffer pointer */
540 	bufaddr = skb->data;
541 	buflen = skb_headlen(skb);
542 
543 	index = fec_enet_get_bd_index(bdp, &txq->bd);
544 	if (((unsigned long) bufaddr) & fep->tx_align ||
545 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
546 		memcpy(txq->tx_bounce[index], skb->data, buflen);
547 		bufaddr = txq->tx_bounce[index];
548 
549 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
550 			swap_buffer(bufaddr, buflen);
551 	}
552 
553 	/* Push the data cache so the CPM does not get stale memory data. */
554 	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
555 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
556 		dev_kfree_skb_any(skb);
557 		if (net_ratelimit())
558 			netdev_err(ndev, "Tx DMA memory map failed\n");
559 		return NETDEV_TX_OK;
560 	}
561 
562 	if (nr_frags) {
563 		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
564 		if (IS_ERR(last_bdp)) {
565 			dma_unmap_single(&fep->pdev->dev, addr,
566 					 buflen, DMA_TO_DEVICE);
567 			dev_kfree_skb_any(skb);
568 			return NETDEV_TX_OK;
569 		}
570 	} else {
571 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
572 		if (fep->bufdesc_ex) {
573 			estatus = BD_ENET_TX_INT;
574 			if (unlikely(skb_shinfo(skb)->tx_flags &
575 				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
576 				estatus |= BD_ENET_TX_TS;
577 		}
578 	}
579 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
580 	bdp->cbd_datlen = cpu_to_fec16(buflen);
581 
582 	if (fep->bufdesc_ex) {
583 
584 		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
585 
586 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
587 			fep->hwts_tx_en))
588 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
589 
590 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
591 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
592 
593 		if (skb->ip_summed == CHECKSUM_PARTIAL)
594 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
595 
596 		ebdp->cbd_bdu = 0;
597 		ebdp->cbd_esc = cpu_to_fec32(estatus);
598 	}
599 
600 	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
601 	/* Save skb pointer */
602 	txq->tx_skbuff[index] = skb;
603 
604 	/* Make sure the updates to rest of the descriptor are performed before
605 	 * transferring ownership.
606 	 */
607 	wmb();
608 
609 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
610 	 * it's the last BD of the frame, and to put the CRC on the end.
611 	 */
612 	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
613 	bdp->cbd_sc = cpu_to_fec16(status);
614 
615 	/* If this was the last BD in the ring, start at the beginning again. */
616 	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
617 
618 	skb_tx_timestamp(skb);
619 
620 	/* Make sure the update to bdp and tx_skbuff are performed before
621 	 * txq->bd.cur.
622 	 */
623 	wmb();
624 	txq->bd.cur = bdp;
625 
626 	/* Trigger transmission start */
627 	writel(0, txq->bd.reg_desc_active);
628 
629 	return 0;
630 }
631 
632 static int
633 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
634 			  struct net_device *ndev,
635 			  struct bufdesc *bdp, int index, char *data,
636 			  int size, bool last_tcp, bool is_last)
637 {
638 	struct fec_enet_private *fep = netdev_priv(ndev);
639 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
640 	unsigned short status;
641 	unsigned int estatus = 0;
642 	dma_addr_t addr;
643 
644 	status = fec16_to_cpu(bdp->cbd_sc);
645 	status &= ~BD_ENET_TX_STATS;
646 
647 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
648 
649 	if (((unsigned long) data) & fep->tx_align ||
650 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
651 		memcpy(txq->tx_bounce[index], data, size);
652 		data = txq->tx_bounce[index];
653 
654 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
655 			swap_buffer(data, size);
656 	}
657 
658 	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
659 	if (dma_mapping_error(&fep->pdev->dev, addr)) {
660 		dev_kfree_skb_any(skb);
661 		if (net_ratelimit())
662 			netdev_err(ndev, "Tx DMA memory map failed\n");
663 		return NETDEV_TX_BUSY;
664 	}
665 
666 	bdp->cbd_datlen = cpu_to_fec16(size);
667 	bdp->cbd_bufaddr = cpu_to_fec32(addr);
668 
669 	if (fep->bufdesc_ex) {
670 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
671 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
672 		if (skb->ip_summed == CHECKSUM_PARTIAL)
673 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
674 		ebdp->cbd_bdu = 0;
675 		ebdp->cbd_esc = cpu_to_fec32(estatus);
676 	}
677 
678 	/* Handle the last BD specially */
679 	if (last_tcp)
680 		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
681 	if (is_last) {
682 		status |= BD_ENET_TX_INTR;
683 		if (fep->bufdesc_ex)
684 			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
685 	}
686 
687 	bdp->cbd_sc = cpu_to_fec16(status);
688 
689 	return 0;
690 }
691 
692 static int
693 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
694 			 struct sk_buff *skb, struct net_device *ndev,
695 			 struct bufdesc *bdp, int index)
696 {
697 	struct fec_enet_private *fep = netdev_priv(ndev);
698 	int hdr_len = skb_tcp_all_headers(skb);
699 	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
700 	void *bufaddr;
701 	unsigned long dmabuf;
702 	unsigned short status;
703 	unsigned int estatus = 0;
704 
705 	status = fec16_to_cpu(bdp->cbd_sc);
706 	status &= ~BD_ENET_TX_STATS;
707 	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
708 
709 	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
710 	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
711 	if (((unsigned long)bufaddr) & fep->tx_align ||
712 		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
713 		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
714 		bufaddr = txq->tx_bounce[index];
715 
716 		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
717 			swap_buffer(bufaddr, hdr_len);
718 
719 		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
720 					hdr_len, DMA_TO_DEVICE);
721 		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
722 			dev_kfree_skb_any(skb);
723 			if (net_ratelimit())
724 				netdev_err(ndev, "Tx DMA memory map failed\n");
725 			return NETDEV_TX_BUSY;
726 		}
727 	}
728 
729 	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
730 	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
731 
732 	if (fep->bufdesc_ex) {
733 		if (fep->quirks & FEC_QUIRK_HAS_AVB)
734 			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
735 		if (skb->ip_summed == CHECKSUM_PARTIAL)
736 			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
737 		ebdp->cbd_bdu = 0;
738 		ebdp->cbd_esc = cpu_to_fec32(estatus);
739 	}
740 
741 	bdp->cbd_sc = cpu_to_fec16(status);
742 
743 	return 0;
744 }
745 
746 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
747 				   struct sk_buff *skb,
748 				   struct net_device *ndev)
749 {
750 	struct fec_enet_private *fep = netdev_priv(ndev);
751 	int hdr_len, total_len, data_left;
752 	struct bufdesc *bdp = txq->bd.cur;
753 	struct tso_t tso;
754 	unsigned int index = 0;
755 	int ret;
756 
757 	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
758 		dev_kfree_skb_any(skb);
759 		if (net_ratelimit())
760 			netdev_err(ndev, "NOT enough BD for TSO!\n");
761 		return NETDEV_TX_OK;
762 	}
763 
764 	/* Protocol checksum off-load for TCP and UDP. */
765 	if (fec_enet_clear_csum(skb, ndev)) {
766 		dev_kfree_skb_any(skb);
767 		return NETDEV_TX_OK;
768 	}
769 
770 	/* Initialize the TSO handler, and prepare the first payload */
771 	hdr_len = tso_start(skb, &tso);
772 
773 	total_len = skb->len - hdr_len;
774 	while (total_len > 0) {
775 		char *hdr;
776 
777 		index = fec_enet_get_bd_index(bdp, &txq->bd);
778 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
779 		total_len -= data_left;
780 
781 		/* prepare packet headers: MAC + IP + TCP */
782 		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
783 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
784 		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
785 		if (ret)
786 			goto err_release;
787 
788 		while (data_left > 0) {
789 			int size;
790 
791 			size = min_t(int, tso.size, data_left);
792 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
793 			index = fec_enet_get_bd_index(bdp, &txq->bd);
794 			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
795 							bdp, index,
796 							tso.data, size,
797 							size == data_left,
798 							total_len == 0);
799 			if (ret)
800 				goto err_release;
801 
802 			data_left -= size;
803 			tso_build_data(skb, &tso, size);
804 		}
805 
806 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
807 	}
808 
809 	/* Save skb pointer */
810 	txq->tx_skbuff[index] = skb;
811 
812 	skb_tx_timestamp(skb);
813 	txq->bd.cur = bdp;
814 
815 	/* Trigger transmission start */
816 	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
817 	    !readl(txq->bd.reg_desc_active) ||
818 	    !readl(txq->bd.reg_desc_active) ||
819 	    !readl(txq->bd.reg_desc_active) ||
820 	    !readl(txq->bd.reg_desc_active))
821 		writel(0, txq->bd.reg_desc_active);
822 
823 	return 0;
824 
825 err_release:
826 	/* TODO: Release all used data descriptors for TSO */
827 	return ret;
828 }
829 
830 static netdev_tx_t
831 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
832 {
833 	struct fec_enet_private *fep = netdev_priv(ndev);
834 	int entries_free;
835 	unsigned short queue;
836 	struct fec_enet_priv_tx_q *txq;
837 	struct netdev_queue *nq;
838 	int ret;
839 
840 	queue = skb_get_queue_mapping(skb);
841 	txq = fep->tx_queue[queue];
842 	nq = netdev_get_tx_queue(ndev, queue);
843 
844 	if (skb_is_gso(skb))
845 		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
846 	else
847 		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
848 	if (ret)
849 		return ret;
850 
851 	entries_free = fec_enet_get_free_txdesc_num(txq);
852 	if (entries_free <= txq->tx_stop_threshold)
853 		netif_tx_stop_queue(nq);
854 
855 	return NETDEV_TX_OK;
856 }
857 
858 /* Init RX & TX buffer descriptors
859  */
860 static void fec_enet_bd_init(struct net_device *dev)
861 {
862 	struct fec_enet_private *fep = netdev_priv(dev);
863 	struct fec_enet_priv_tx_q *txq;
864 	struct fec_enet_priv_rx_q *rxq;
865 	struct bufdesc *bdp;
866 	unsigned int i;
867 	unsigned int q;
868 
869 	for (q = 0; q < fep->num_rx_queues; q++) {
870 		/* Initialize the receive buffer descriptors. */
871 		rxq = fep->rx_queue[q];
872 		bdp = rxq->bd.base;
873 
874 		for (i = 0; i < rxq->bd.ring_size; i++) {
875 
876 			/* Initialize the BD for every fragment in the page. */
877 			if (bdp->cbd_bufaddr)
878 				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
879 			else
880 				bdp->cbd_sc = cpu_to_fec16(0);
881 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
882 		}
883 
884 		/* Set the last buffer to wrap */
885 		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
886 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
887 
888 		rxq->bd.cur = rxq->bd.base;
889 	}
890 
891 	for (q = 0; q < fep->num_tx_queues; q++) {
892 		/* ...and the same for transmit */
893 		txq = fep->tx_queue[q];
894 		bdp = txq->bd.base;
895 		txq->bd.cur = bdp;
896 
897 		for (i = 0; i < txq->bd.ring_size; i++) {
898 			/* Initialize the BD for every fragment in the page. */
899 			bdp->cbd_sc = cpu_to_fec16(0);
900 			if (bdp->cbd_bufaddr &&
901 			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
902 				dma_unmap_single(&fep->pdev->dev,
903 						 fec32_to_cpu(bdp->cbd_bufaddr),
904 						 fec16_to_cpu(bdp->cbd_datlen),
905 						 DMA_TO_DEVICE);
906 			if (txq->tx_skbuff[i]) {
907 				dev_kfree_skb_any(txq->tx_skbuff[i]);
908 				txq->tx_skbuff[i] = NULL;
909 			}
910 			bdp->cbd_bufaddr = cpu_to_fec32(0);
911 			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
912 		}
913 
914 		/* Set the last buffer to wrap */
915 		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
916 		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
917 		txq->dirty_tx = bdp;
918 	}
919 }
920 
921 static void fec_enet_active_rxring(struct net_device *ndev)
922 {
923 	struct fec_enet_private *fep = netdev_priv(ndev);
924 	int i;
925 
926 	for (i = 0; i < fep->num_rx_queues; i++)
927 		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
928 }
929 
930 static void fec_enet_enable_ring(struct net_device *ndev)
931 {
932 	struct fec_enet_private *fep = netdev_priv(ndev);
933 	struct fec_enet_priv_tx_q *txq;
934 	struct fec_enet_priv_rx_q *rxq;
935 	int i;
936 
937 	for (i = 0; i < fep->num_rx_queues; i++) {
938 		rxq = fep->rx_queue[i];
939 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
940 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
941 
942 		/* enable DMA1/2 */
943 		if (i)
944 			writel(RCMR_MATCHEN | RCMR_CMP(i),
945 			       fep->hwp + FEC_RCMR(i));
946 	}
947 
948 	for (i = 0; i < fep->num_tx_queues; i++) {
949 		txq = fep->tx_queue[i];
950 		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
951 
952 		/* enable DMA1/2 */
953 		if (i)
954 			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
955 			       fep->hwp + FEC_DMA_CFG(i));
956 	}
957 }
958 
959 static void fec_enet_reset_skb(struct net_device *ndev)
960 {
961 	struct fec_enet_private *fep = netdev_priv(ndev);
962 	struct fec_enet_priv_tx_q *txq;
963 	int i, j;
964 
965 	for (i = 0; i < fep->num_tx_queues; i++) {
966 		txq = fep->tx_queue[i];
967 
968 		for (j = 0; j < txq->bd.ring_size; j++) {
969 			if (txq->tx_skbuff[j]) {
970 				dev_kfree_skb_any(txq->tx_skbuff[j]);
971 				txq->tx_skbuff[j] = NULL;
972 			}
973 		}
974 	}
975 }
976 
977 /*
978  * This function is called to start or restart the FEC during a link
979  * change, transmit timeout, or to reconfigure the FEC.  The network
980  * packet processing for this device must be stopped before this call.
981  */
982 static void
983 fec_restart(struct net_device *ndev)
984 {
985 	struct fec_enet_private *fep = netdev_priv(ndev);
986 	u32 temp_mac[2];
987 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
988 	u32 ecntl = 0x2; /* ETHEREN */
989 	struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
990 
991 	fec_ptp_save_state(fep);
992 
993 	/* Whack a reset.  We should wait for this.
994 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
995 	 * instead of reset MAC itself.
996 	 */
997 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
998 	    ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
999 		writel(0, fep->hwp + FEC_ECNTRL);
1000 	} else {
1001 		writel(1, fep->hwp + FEC_ECNTRL);
1002 		udelay(10);
1003 	}
1004 
1005 	/*
1006 	 * enet-mac reset will reset mac address registers too,
1007 	 * so need to reconfigure it.
1008 	 */
1009 	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1010 	writel((__force u32)cpu_to_be32(temp_mac[0]),
1011 	       fep->hwp + FEC_ADDR_LOW);
1012 	writel((__force u32)cpu_to_be32(temp_mac[1]),
1013 	       fep->hwp + FEC_ADDR_HIGH);
1014 
1015 	/* Clear any outstanding interrupt, except MDIO. */
1016 	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1017 
1018 	fec_enet_bd_init(ndev);
1019 
1020 	fec_enet_enable_ring(ndev);
1021 
1022 	/* Reset tx SKB buffers. */
1023 	fec_enet_reset_skb(ndev);
1024 
1025 	/* Enable MII mode */
1026 	if (fep->full_duplex == DUPLEX_FULL) {
1027 		/* FD enable */
1028 		writel(0x04, fep->hwp + FEC_X_CNTRL);
1029 	} else {
1030 		/* No Rcv on Xmit */
1031 		rcntl |= 0x02;
1032 		writel(0x0, fep->hwp + FEC_X_CNTRL);
1033 	}
1034 
1035 	/* Set MII speed */
1036 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1037 
1038 #if !defined(CONFIG_M5272)
1039 	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1040 		u32 val = readl(fep->hwp + FEC_RACC);
1041 
1042 		/* align IP header */
1043 		val |= FEC_RACC_SHIFT16;
1044 		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1045 			/* set RX checksum */
1046 			val |= FEC_RACC_OPTIONS;
1047 		else
1048 			val &= ~FEC_RACC_OPTIONS;
1049 		writel(val, fep->hwp + FEC_RACC);
1050 		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1051 	}
1052 #endif
1053 
1054 	/*
1055 	 * The phy interface and speed need to get configured
1056 	 * differently on enet-mac.
1057 	 */
1058 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1059 		/* Enable flow control and length check */
1060 		rcntl |= 0x40000000 | 0x00000020;
1061 
1062 		/* RGMII, RMII or MII */
1063 		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1064 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1065 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1066 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1067 			rcntl |= (1 << 6);
1068 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1069 			rcntl |= (1 << 8);
1070 		else
1071 			rcntl &= ~(1 << 8);
1072 
1073 		/* 1G, 100M or 10M */
1074 		if (ndev->phydev) {
1075 			if (ndev->phydev->speed == SPEED_1000)
1076 				ecntl |= (1 << 5);
1077 			else if (ndev->phydev->speed == SPEED_100)
1078 				rcntl &= ~(1 << 9);
1079 			else
1080 				rcntl |= (1 << 9);
1081 		}
1082 	} else {
1083 #ifdef FEC_MIIGSK_ENR
1084 		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1085 			u32 cfgr;
1086 			/* disable the gasket and wait */
1087 			writel(0, fep->hwp + FEC_MIIGSK_ENR);
1088 			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1089 				udelay(1);
1090 
1091 			/*
1092 			 * configure the gasket:
1093 			 *   RMII, 50 MHz, no loopback, no echo
1094 			 *   MII, 25 MHz, no loopback, no echo
1095 			 */
1096 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1097 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1098 			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1099 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1100 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1101 
1102 			/* re-enable the gasket */
1103 			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1104 		}
1105 #endif
1106 	}
1107 
1108 #if !defined(CONFIG_M5272)
1109 	/* enable pause frame*/
1110 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1111 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1112 	     ndev->phydev && ndev->phydev->pause)) {
1113 		rcntl |= FEC_ENET_FCE;
1114 
1115 		/* set FIFO threshold parameter to reduce overrun */
1116 		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1117 		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1118 		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1119 		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1120 
1121 		/* OPD */
1122 		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1123 	} else {
1124 		rcntl &= ~FEC_ENET_FCE;
1125 	}
1126 #endif /* !defined(CONFIG_M5272) */
1127 
1128 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1129 
1130 	/* Setup multicast filter. */
1131 	set_multicast_list(ndev);
1132 #ifndef CONFIG_M5272
1133 	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1134 	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1135 #endif
1136 
1137 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1138 		/* enable ENET endian swap */
1139 		ecntl |= (1 << 8);
1140 		/* enable ENET store and forward mode */
1141 		writel(1 << 8, fep->hwp + FEC_X_WMRK);
1142 	}
1143 
1144 	if (fep->bufdesc_ex)
1145 		ecntl |= FEC_ECR_EN1588;
1146 
1147 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1148 	    fep->rgmii_txc_dly)
1149 		ecntl |= FEC_ENET_TXC_DLY;
1150 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1151 	    fep->rgmii_rxc_dly)
1152 		ecntl |= FEC_ENET_RXC_DLY;
1153 
1154 #ifndef CONFIG_M5272
1155 	/* Enable the MIB statistic event counters */
1156 	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1157 #endif
1158 
1159 	/* And last, enable the transmit and receive processing */
1160 	writel(ecntl, fep->hwp + FEC_ECNTRL);
1161 	fec_enet_active_rxring(ndev);
1162 
1163 	if (fep->bufdesc_ex)
1164 		fec_ptp_start_cyclecounter(ndev);
1165 
1166 	/* Restart PPS if needed */
1167 	if (fep->pps_enable) {
1168 		/* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
1169 		fep->pps_enable = 0;
1170 		fec_ptp_restore_state(fep);
1171 		fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
1172 	}
1173 
1174 	/* Enable interrupts we wish to service */
1175 	if (fep->link)
1176 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1177 	else
1178 		writel(0, fep->hwp + FEC_IMASK);
1179 
1180 	/* Init the interrupt coalescing */
1181 	fec_enet_itr_coal_init(ndev);
1182 
1183 }
1184 
1185 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1186 {
1187 	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1188 	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1189 
1190 	if (stop_gpr->gpr) {
1191 		if (enabled)
1192 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1193 					   BIT(stop_gpr->bit),
1194 					   BIT(stop_gpr->bit));
1195 		else
1196 			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1197 					   BIT(stop_gpr->bit), 0);
1198 	} else if (pdata && pdata->sleep_mode_enable) {
1199 		pdata->sleep_mode_enable(enabled);
1200 	}
1201 }
1202 
1203 static void fec_irqs_disable(struct net_device *ndev)
1204 {
1205 	struct fec_enet_private *fep = netdev_priv(ndev);
1206 
1207 	writel(0, fep->hwp + FEC_IMASK);
1208 }
1209 
1210 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1211 {
1212 	struct fec_enet_private *fep = netdev_priv(ndev);
1213 
1214 	writel(0, fep->hwp + FEC_IMASK);
1215 	writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1216 }
1217 
1218 static void
1219 fec_stop(struct net_device *ndev)
1220 {
1221 	struct fec_enet_private *fep = netdev_priv(ndev);
1222 	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1223 	u32 val;
1224 	struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
1225 	u32 ecntl = 0;
1226 
1227 	/* We cannot expect a graceful transmit stop without link !!! */
1228 	if (fep->link) {
1229 		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1230 		udelay(10);
1231 		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1232 			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1233 	}
1234 
1235 	fec_ptp_save_state(fep);
1236 
1237 	/* Whack a reset.  We should wait for this.
1238 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1239 	 * instead of reset MAC itself.
1240 	 */
1241 	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1242 		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1243 			writel(0, fep->hwp + FEC_ECNTRL);
1244 		} else {
1245 			writel(1, fep->hwp + FEC_ECNTRL);
1246 			udelay(10);
1247 		}
1248 	} else {
1249 		val = readl(fep->hwp + FEC_ECNTRL);
1250 		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1251 		writel(val, fep->hwp + FEC_ECNTRL);
1252 	}
1253 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1254 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1255 
1256 	if (fep->bufdesc_ex)
1257 		ecntl |= FEC_ECR_EN1588;
1258 
1259 	/* We have to keep ENET enabled to have MII interrupt stay working */
1260 	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1261 		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1262 		ecntl |= FEC_ECR_ETHEREN;
1263 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1264 	}
1265 
1266 	writel(ecntl, fep->hwp + FEC_ECNTRL);
1267 
1268 	if (fep->bufdesc_ex)
1269 		fec_ptp_start_cyclecounter(ndev);
1270 
1271 	/* Restart PPS if needed */
1272 	if (fep->pps_enable) {
1273 		/* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
1274 		fep->pps_enable = 0;
1275 		fec_ptp_restore_state(fep);
1276 		fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
1277 	}
1278 }
1279 
1280 
1281 static void
1282 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1283 {
1284 	struct fec_enet_private *fep = netdev_priv(ndev);
1285 
1286 	fec_dump(ndev);
1287 
1288 	ndev->stats.tx_errors++;
1289 
1290 	schedule_work(&fep->tx_timeout_work);
1291 }
1292 
1293 static void fec_enet_timeout_work(struct work_struct *work)
1294 {
1295 	struct fec_enet_private *fep =
1296 		container_of(work, struct fec_enet_private, tx_timeout_work);
1297 	struct net_device *ndev = fep->netdev;
1298 
1299 	rtnl_lock();
1300 	if (netif_device_present(ndev) || netif_running(ndev)) {
1301 		napi_disable(&fep->napi);
1302 		netif_tx_lock_bh(ndev);
1303 		fec_restart(ndev);
1304 		netif_tx_wake_all_queues(ndev);
1305 		netif_tx_unlock_bh(ndev);
1306 		napi_enable(&fep->napi);
1307 	}
1308 	rtnl_unlock();
1309 }
1310 
1311 static void
1312 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1313 	struct skb_shared_hwtstamps *hwtstamps)
1314 {
1315 	unsigned long flags;
1316 	u64 ns;
1317 
1318 	spin_lock_irqsave(&fep->tmreg_lock, flags);
1319 	ns = timecounter_cyc2time(&fep->tc, ts);
1320 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1321 
1322 	memset(hwtstamps, 0, sizeof(*hwtstamps));
1323 	hwtstamps->hwtstamp = ns_to_ktime(ns);
1324 }
1325 
1326 static void
1327 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1328 {
1329 	struct	fec_enet_private *fep;
1330 	struct bufdesc *bdp;
1331 	unsigned short status;
1332 	struct	sk_buff	*skb;
1333 	struct fec_enet_priv_tx_q *txq;
1334 	struct netdev_queue *nq;
1335 	int	index = 0;
1336 	int	entries_free;
1337 
1338 	fep = netdev_priv(ndev);
1339 
1340 	txq = fep->tx_queue[queue_id];
1341 	/* get next bdp of dirty_tx */
1342 	nq = netdev_get_tx_queue(ndev, queue_id);
1343 	bdp = txq->dirty_tx;
1344 
1345 	/* get next bdp of dirty_tx */
1346 	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1347 
1348 	while (bdp != READ_ONCE(txq->bd.cur)) {
1349 		/* Order the load of bd.cur and cbd_sc */
1350 		rmb();
1351 		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1352 		if (status & BD_ENET_TX_READY)
1353 			break;
1354 
1355 		index = fec_enet_get_bd_index(bdp, &txq->bd);
1356 
1357 		skb = txq->tx_skbuff[index];
1358 		txq->tx_skbuff[index] = NULL;
1359 		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1360 			dma_unmap_single(&fep->pdev->dev,
1361 					 fec32_to_cpu(bdp->cbd_bufaddr),
1362 					 fec16_to_cpu(bdp->cbd_datlen),
1363 					 DMA_TO_DEVICE);
1364 		bdp->cbd_bufaddr = cpu_to_fec32(0);
1365 		if (!skb)
1366 			goto skb_done;
1367 
1368 		/* Check for errors. */
1369 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1370 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
1371 				   BD_ENET_TX_CSL)) {
1372 			ndev->stats.tx_errors++;
1373 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1374 				ndev->stats.tx_heartbeat_errors++;
1375 			if (status & BD_ENET_TX_LC)  /* Late collision */
1376 				ndev->stats.tx_window_errors++;
1377 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1378 				ndev->stats.tx_aborted_errors++;
1379 			if (status & BD_ENET_TX_UN)  /* Underrun */
1380 				ndev->stats.tx_fifo_errors++;
1381 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1382 				ndev->stats.tx_carrier_errors++;
1383 		} else {
1384 			ndev->stats.tx_packets++;
1385 			ndev->stats.tx_bytes += skb->len;
1386 		}
1387 
1388 		/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1389 		 * are to time stamp the packet, so we still need to check time
1390 		 * stamping enabled flag.
1391 		 */
1392 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1393 			     fep->hwts_tx_en) &&
1394 		    fep->bufdesc_ex) {
1395 			struct skb_shared_hwtstamps shhwtstamps;
1396 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1397 
1398 			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1399 			skb_tstamp_tx(skb, &shhwtstamps);
1400 		}
1401 
1402 		/* Deferred means some collisions occurred during transmit,
1403 		 * but we eventually sent the packet OK.
1404 		 */
1405 		if (status & BD_ENET_TX_DEF)
1406 			ndev->stats.collisions++;
1407 
1408 		/* Free the sk buffer associated with this last transmit */
1409 		dev_kfree_skb_any(skb);
1410 skb_done:
1411 		/* Make sure the update to bdp and tx_skbuff are performed
1412 		 * before dirty_tx
1413 		 */
1414 		wmb();
1415 		txq->dirty_tx = bdp;
1416 
1417 		/* Update pointer to next buffer descriptor to be transmitted */
1418 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1419 
1420 		/* Since we have freed up a buffer, the ring is no longer full
1421 		 */
1422 		if (netif_tx_queue_stopped(nq)) {
1423 			entries_free = fec_enet_get_free_txdesc_num(txq);
1424 			if (entries_free >= txq->tx_wake_threshold)
1425 				netif_tx_wake_queue(nq);
1426 		}
1427 	}
1428 
1429 	/* ERR006358: Keep the transmitter going */
1430 	if (bdp != txq->bd.cur &&
1431 	    readl(txq->bd.reg_desc_active) == 0)
1432 		writel(0, txq->bd.reg_desc_active);
1433 }
1434 
1435 static void fec_enet_tx(struct net_device *ndev)
1436 {
1437 	struct fec_enet_private *fep = netdev_priv(ndev);
1438 	int i;
1439 
1440 	/* Make sure that AVB queues are processed first. */
1441 	for (i = fep->num_tx_queues - 1; i >= 0; i--)
1442 		fec_enet_tx_queue(ndev, i);
1443 }
1444 
1445 static int
1446 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1447 {
1448 	struct  fec_enet_private *fep = netdev_priv(ndev);
1449 	int off;
1450 
1451 	off = ((unsigned long)skb->data) & fep->rx_align;
1452 	if (off)
1453 		skb_reserve(skb, fep->rx_align + 1 - off);
1454 
1455 	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1456 	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1457 		if (net_ratelimit())
1458 			netdev_err(ndev, "Rx DMA memory map failed\n");
1459 		return -ENOMEM;
1460 	}
1461 
1462 	return 0;
1463 }
1464 
1465 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1466 			       struct bufdesc *bdp, u32 length, bool swap)
1467 {
1468 	struct  fec_enet_private *fep = netdev_priv(ndev);
1469 	struct sk_buff *new_skb;
1470 
1471 	if (length > fep->rx_copybreak)
1472 		return false;
1473 
1474 	new_skb = netdev_alloc_skb(ndev, length);
1475 	if (!new_skb)
1476 		return false;
1477 
1478 	dma_sync_single_for_cpu(&fep->pdev->dev,
1479 				fec32_to_cpu(bdp->cbd_bufaddr),
1480 				FEC_ENET_RX_FRSIZE - fep->rx_align,
1481 				DMA_FROM_DEVICE);
1482 	if (!swap)
1483 		memcpy(new_skb->data, (*skb)->data, length);
1484 	else
1485 		swap_buffer2(new_skb->data, (*skb)->data, length);
1486 	*skb = new_skb;
1487 
1488 	return true;
1489 }
1490 
1491 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1492  * When we update through the ring, if the next incoming buffer has
1493  * not been given to the system, we just set the empty indicator,
1494  * effectively tossing the packet.
1495  */
1496 static int
1497 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1498 {
1499 	struct fec_enet_private *fep = netdev_priv(ndev);
1500 	struct fec_enet_priv_rx_q *rxq;
1501 	struct bufdesc *bdp;
1502 	unsigned short status;
1503 	struct  sk_buff *skb_new = NULL;
1504 	struct  sk_buff *skb;
1505 	ushort	pkt_len;
1506 	__u8 *data;
1507 	int	pkt_received = 0;
1508 	struct	bufdesc_ex *ebdp = NULL;
1509 	bool	vlan_packet_rcvd = false;
1510 	u16	vlan_tag;
1511 	int	index = 0;
1512 	bool	is_copybreak;
1513 	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1514 
1515 #ifdef CONFIG_M532x
1516 	flush_cache_all();
1517 #endif
1518 	rxq = fep->rx_queue[queue_id];
1519 
1520 	/* First, grab all of the stats for the incoming packet.
1521 	 * These get messed up if we get called due to a busy condition.
1522 	 */
1523 	bdp = rxq->bd.cur;
1524 
1525 	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1526 
1527 		if (pkt_received >= budget)
1528 			break;
1529 		pkt_received++;
1530 
1531 		writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1532 
1533 		/* Check for errors. */
1534 		status ^= BD_ENET_RX_LAST;
1535 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1536 			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1537 			   BD_ENET_RX_CL)) {
1538 			ndev->stats.rx_errors++;
1539 			if (status & BD_ENET_RX_OV) {
1540 				/* FIFO overrun */
1541 				ndev->stats.rx_fifo_errors++;
1542 				goto rx_processing_done;
1543 			}
1544 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1545 						| BD_ENET_RX_LAST)) {
1546 				/* Frame too long or too short. */
1547 				ndev->stats.rx_length_errors++;
1548 				if (status & BD_ENET_RX_LAST)
1549 					netdev_err(ndev, "rcv is not +last\n");
1550 			}
1551 			if (status & BD_ENET_RX_CR)	/* CRC Error */
1552 				ndev->stats.rx_crc_errors++;
1553 			/* Report late collisions as a frame error. */
1554 			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1555 				ndev->stats.rx_frame_errors++;
1556 			goto rx_processing_done;
1557 		}
1558 
1559 		/* Process the incoming frame. */
1560 		ndev->stats.rx_packets++;
1561 		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1562 		ndev->stats.rx_bytes += pkt_len;
1563 
1564 		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1565 		skb = rxq->rx_skbuff[index];
1566 
1567 		/* The packet length includes FCS, but we don't want to
1568 		 * include that when passing upstream as it messes up
1569 		 * bridging applications.
1570 		 */
1571 		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1572 						  need_swap);
1573 		if (!is_copybreak) {
1574 			skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1575 			if (unlikely(!skb_new)) {
1576 				ndev->stats.rx_dropped++;
1577 				goto rx_processing_done;
1578 			}
1579 			dma_unmap_single(&fep->pdev->dev,
1580 					 fec32_to_cpu(bdp->cbd_bufaddr),
1581 					 FEC_ENET_RX_FRSIZE - fep->rx_align,
1582 					 DMA_FROM_DEVICE);
1583 		}
1584 
1585 		prefetch(skb->data - NET_IP_ALIGN);
1586 		skb_put(skb, pkt_len - 4);
1587 		data = skb->data;
1588 
1589 		if (!is_copybreak && need_swap)
1590 			swap_buffer(data, pkt_len);
1591 
1592 #if !defined(CONFIG_M5272)
1593 		if (fep->quirks & FEC_QUIRK_HAS_RACC)
1594 			data = skb_pull_inline(skb, 2);
1595 #endif
1596 
1597 		/* Extract the enhanced buffer descriptor */
1598 		ebdp = NULL;
1599 		if (fep->bufdesc_ex)
1600 			ebdp = (struct bufdesc_ex *)bdp;
1601 
1602 		/* If this is a VLAN packet remove the VLAN Tag */
1603 		vlan_packet_rcvd = false;
1604 		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1605 		    fep->bufdesc_ex &&
1606 		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1607 			/* Push and remove the vlan tag */
1608 			struct vlan_hdr *vlan_header =
1609 					(struct vlan_hdr *) (data + ETH_HLEN);
1610 			vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1611 
1612 			vlan_packet_rcvd = true;
1613 
1614 			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1615 			skb_pull(skb, VLAN_HLEN);
1616 		}
1617 
1618 		skb->protocol = eth_type_trans(skb, ndev);
1619 
1620 		/* Get receive timestamp from the skb */
1621 		if (fep->hwts_rx_en && fep->bufdesc_ex)
1622 			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1623 					  skb_hwtstamps(skb));
1624 
1625 		if (fep->bufdesc_ex &&
1626 		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1627 			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1628 				/* don't check it */
1629 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 			} else {
1631 				skb_checksum_none_assert(skb);
1632 			}
1633 		}
1634 
1635 		/* Handle received VLAN packets */
1636 		if (vlan_packet_rcvd)
1637 			__vlan_hwaccel_put_tag(skb,
1638 					       htons(ETH_P_8021Q),
1639 					       vlan_tag);
1640 
1641 		skb_record_rx_queue(skb, queue_id);
1642 		napi_gro_receive(&fep->napi, skb);
1643 
1644 		if (is_copybreak) {
1645 			dma_sync_single_for_device(&fep->pdev->dev,
1646 						   fec32_to_cpu(bdp->cbd_bufaddr),
1647 						   FEC_ENET_RX_FRSIZE - fep->rx_align,
1648 						   DMA_FROM_DEVICE);
1649 		} else {
1650 			rxq->rx_skbuff[index] = skb_new;
1651 			fec_enet_new_rxbdp(ndev, bdp, skb_new);
1652 		}
1653 
1654 rx_processing_done:
1655 		/* Clear the status flags for this buffer */
1656 		status &= ~BD_ENET_RX_STATS;
1657 
1658 		/* Mark the buffer empty */
1659 		status |= BD_ENET_RX_EMPTY;
1660 
1661 		if (fep->bufdesc_ex) {
1662 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1663 
1664 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1665 			ebdp->cbd_prot = 0;
1666 			ebdp->cbd_bdu = 0;
1667 		}
1668 		/* Make sure the updates to rest of the descriptor are
1669 		 * performed before transferring ownership.
1670 		 */
1671 		wmb();
1672 		bdp->cbd_sc = cpu_to_fec16(status);
1673 
1674 		/* Update BD pointer to next entry */
1675 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1676 
1677 		/* Doing this here will keep the FEC running while we process
1678 		 * incoming frames.  On a heavily loaded network, we should be
1679 		 * able to keep up at the expense of system resources.
1680 		 */
1681 		writel(0, rxq->bd.reg_desc_active);
1682 	}
1683 	rxq->bd.cur = bdp;
1684 	return pkt_received;
1685 }
1686 
1687 static int fec_enet_rx(struct net_device *ndev, int budget)
1688 {
1689 	struct fec_enet_private *fep = netdev_priv(ndev);
1690 	int i, done = 0;
1691 
1692 	/* Make sure that AVB queues are processed first. */
1693 	for (i = fep->num_rx_queues - 1; i >= 0; i--)
1694 		done += fec_enet_rx_queue(ndev, budget - done, i);
1695 
1696 	return done;
1697 }
1698 
1699 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1700 {
1701 	uint int_events;
1702 
1703 	int_events = readl(fep->hwp + FEC_IEVENT);
1704 
1705 	/* Don't clear MDIO events, we poll for those */
1706 	int_events &= ~FEC_ENET_MII;
1707 
1708 	writel(int_events, fep->hwp + FEC_IEVENT);
1709 
1710 	return int_events != 0;
1711 }
1712 
1713 static irqreturn_t
1714 fec_enet_interrupt(int irq, void *dev_id)
1715 {
1716 	struct net_device *ndev = dev_id;
1717 	struct fec_enet_private *fep = netdev_priv(ndev);
1718 	irqreturn_t ret = IRQ_NONE;
1719 
1720 	if (fec_enet_collect_events(fep) && fep->link) {
1721 		ret = IRQ_HANDLED;
1722 
1723 		if (napi_schedule_prep(&fep->napi)) {
1724 			/* Disable interrupts */
1725 			writel(0, fep->hwp + FEC_IMASK);
1726 			__napi_schedule(&fep->napi);
1727 		}
1728 	}
1729 
1730 	return ret;
1731 }
1732 
1733 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1734 {
1735 	struct net_device *ndev = napi->dev;
1736 	struct fec_enet_private *fep = netdev_priv(ndev);
1737 	int done = 0;
1738 
1739 	do {
1740 		done += fec_enet_rx(ndev, budget - done);
1741 		fec_enet_tx(ndev);
1742 	} while ((done < budget) && fec_enet_collect_events(fep));
1743 
1744 	if (done < budget) {
1745 		napi_complete_done(napi, done);
1746 		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1747 	}
1748 
1749 	return done;
1750 }
1751 
1752 /* ------------------------------------------------------------------------- */
1753 static int fec_get_mac(struct net_device *ndev)
1754 {
1755 	struct fec_enet_private *fep = netdev_priv(ndev);
1756 	unsigned char *iap, tmpaddr[ETH_ALEN];
1757 	int ret;
1758 
1759 	/*
1760 	 * try to get mac address in following order:
1761 	 *
1762 	 * 1) module parameter via kernel command line in form
1763 	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1764 	 */
1765 	iap = macaddr;
1766 
1767 	/*
1768 	 * 2) from device tree data
1769 	 */
1770 	if (!is_valid_ether_addr(iap)) {
1771 		struct device_node *np = fep->pdev->dev.of_node;
1772 		if (np) {
1773 			ret = of_get_mac_address(np, tmpaddr);
1774 			if (!ret)
1775 				iap = tmpaddr;
1776 			else if (ret == -EPROBE_DEFER)
1777 				return ret;
1778 		}
1779 	}
1780 
1781 	/*
1782 	 * 3) from flash or fuse (via platform data)
1783 	 */
1784 	if (!is_valid_ether_addr(iap)) {
1785 #ifdef CONFIG_M5272
1786 		if (FEC_FLASHMAC)
1787 			iap = (unsigned char *)FEC_FLASHMAC;
1788 #else
1789 		struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1790 
1791 		if (pdata)
1792 			iap = (unsigned char *)&pdata->mac;
1793 #endif
1794 	}
1795 
1796 	/*
1797 	 * 4) FEC mac registers set by bootloader
1798 	 */
1799 	if (!is_valid_ether_addr(iap)) {
1800 		*((__be32 *) &tmpaddr[0]) =
1801 			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1802 		*((__be16 *) &tmpaddr[4]) =
1803 			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1804 		iap = &tmpaddr[0];
1805 	}
1806 
1807 	/*
1808 	 * 5) random mac address
1809 	 */
1810 	if (!is_valid_ether_addr(iap)) {
1811 		/* Report it and use a random ethernet address instead */
1812 		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1813 		eth_hw_addr_random(ndev);
1814 		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1815 			 ndev->dev_addr);
1816 		return 0;
1817 	}
1818 
1819 	/* Adjust MAC if using macaddr */
1820 	eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
1821 
1822 	return 0;
1823 }
1824 
1825 /* ------------------------------------------------------------------------- */
1826 
1827 /*
1828  * Phy section
1829  */
1830 static void fec_enet_adjust_link(struct net_device *ndev)
1831 {
1832 	struct fec_enet_private *fep = netdev_priv(ndev);
1833 	struct phy_device *phy_dev = ndev->phydev;
1834 	int status_change = 0;
1835 
1836 	/*
1837 	 * If the netdev is down, or is going down, we're not interested
1838 	 * in link state events, so just mark our idea of the link as down
1839 	 * and ignore the event.
1840 	 */
1841 	if (!netif_running(ndev) || !netif_device_present(ndev)) {
1842 		fep->link = 0;
1843 	} else if (phy_dev->link) {
1844 		if (!fep->link) {
1845 			fep->link = phy_dev->link;
1846 			status_change = 1;
1847 		}
1848 
1849 		if (fep->full_duplex != phy_dev->duplex) {
1850 			fep->full_duplex = phy_dev->duplex;
1851 			status_change = 1;
1852 		}
1853 
1854 		if (phy_dev->speed != fep->speed) {
1855 			fep->speed = phy_dev->speed;
1856 			status_change = 1;
1857 		}
1858 
1859 		/* if any of the above changed restart the FEC */
1860 		if (status_change) {
1861 			napi_disable(&fep->napi);
1862 			netif_tx_lock_bh(ndev);
1863 			fec_restart(ndev);
1864 			netif_tx_wake_all_queues(ndev);
1865 			netif_tx_unlock_bh(ndev);
1866 			napi_enable(&fep->napi);
1867 		}
1868 	} else {
1869 		if (fep->link) {
1870 			napi_disable(&fep->napi);
1871 			netif_tx_lock_bh(ndev);
1872 			fec_stop(ndev);
1873 			netif_tx_unlock_bh(ndev);
1874 			napi_enable(&fep->napi);
1875 			fep->link = phy_dev->link;
1876 			status_change = 1;
1877 		}
1878 	}
1879 
1880 	if (status_change)
1881 		phy_print_status(phy_dev);
1882 }
1883 
1884 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
1885 {
1886 	uint ievent;
1887 	int ret;
1888 
1889 	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
1890 					ievent & FEC_ENET_MII, 2, 30000);
1891 
1892 	if (!ret)
1893 		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1894 
1895 	return ret;
1896 }
1897 
1898 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1899 {
1900 	struct fec_enet_private *fep = bus->priv;
1901 	struct device *dev = &fep->pdev->dev;
1902 	int ret = 0, frame_start, frame_addr, frame_op;
1903 	bool is_c45 = !!(regnum & MII_ADDR_C45);
1904 
1905 	ret = pm_runtime_resume_and_get(dev);
1906 	if (ret < 0)
1907 		return ret;
1908 
1909 	if (is_c45) {
1910 		frame_start = FEC_MMFR_ST_C45;
1911 
1912 		/* write address */
1913 		frame_addr = (regnum >> 16);
1914 		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
1915 		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
1916 		       FEC_MMFR_TA | (regnum & 0xFFFF),
1917 		       fep->hwp + FEC_MII_DATA);
1918 
1919 		/* wait for end of transfer */
1920 		ret = fec_enet_mdio_wait(fep);
1921 		if (ret) {
1922 			netdev_err(fep->netdev, "MDIO address write timeout\n");
1923 			goto out;
1924 		}
1925 
1926 		frame_op = FEC_MMFR_OP_READ_C45;
1927 
1928 	} else {
1929 		/* C22 read */
1930 		frame_op = FEC_MMFR_OP_READ;
1931 		frame_start = FEC_MMFR_ST;
1932 		frame_addr = regnum;
1933 	}
1934 
1935 	/* start a read op */
1936 	writel(frame_start | frame_op |
1937 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
1938 		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1939 
1940 	/* wait for end of transfer */
1941 	ret = fec_enet_mdio_wait(fep);
1942 	if (ret) {
1943 		netdev_err(fep->netdev, "MDIO read timeout\n");
1944 		goto out;
1945 	}
1946 
1947 	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1948 
1949 out:
1950 	pm_runtime_mark_last_busy(dev);
1951 	pm_runtime_put_autosuspend(dev);
1952 
1953 	return ret;
1954 }
1955 
1956 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1957 			   u16 value)
1958 {
1959 	struct fec_enet_private *fep = bus->priv;
1960 	struct device *dev = &fep->pdev->dev;
1961 	int ret, frame_start, frame_addr;
1962 	bool is_c45 = !!(regnum & MII_ADDR_C45);
1963 
1964 	ret = pm_runtime_resume_and_get(dev);
1965 	if (ret < 0)
1966 		return ret;
1967 
1968 	if (is_c45) {
1969 		frame_start = FEC_MMFR_ST_C45;
1970 
1971 		/* write address */
1972 		frame_addr = (regnum >> 16);
1973 		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
1974 		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
1975 		       FEC_MMFR_TA | (regnum & 0xFFFF),
1976 		       fep->hwp + FEC_MII_DATA);
1977 
1978 		/* wait for end of transfer */
1979 		ret = fec_enet_mdio_wait(fep);
1980 		if (ret) {
1981 			netdev_err(fep->netdev, "MDIO address write timeout\n");
1982 			goto out;
1983 		}
1984 	} else {
1985 		/* C22 write */
1986 		frame_start = FEC_MMFR_ST;
1987 		frame_addr = regnum;
1988 	}
1989 
1990 	/* start a write op */
1991 	writel(frame_start | FEC_MMFR_OP_WRITE |
1992 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
1993 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
1994 		fep->hwp + FEC_MII_DATA);
1995 
1996 	/* wait for end of transfer */
1997 	ret = fec_enet_mdio_wait(fep);
1998 	if (ret)
1999 		netdev_err(fep->netdev, "MDIO write timeout\n");
2000 
2001 out:
2002 	pm_runtime_mark_last_busy(dev);
2003 	pm_runtime_put_autosuspend(dev);
2004 
2005 	return ret;
2006 }
2007 
2008 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2009 {
2010 	struct fec_enet_private *fep = netdev_priv(ndev);
2011 	struct phy_device *phy_dev = ndev->phydev;
2012 
2013 	if (phy_dev) {
2014 		phy_reset_after_clk_enable(phy_dev);
2015 	} else if (fep->phy_node) {
2016 		/*
2017 		 * If the PHY still is not bound to the MAC, but there is
2018 		 * OF PHY node and a matching PHY device instance already,
2019 		 * use the OF PHY node to obtain the PHY device instance,
2020 		 * and then use that PHY device instance when triggering
2021 		 * the PHY reset.
2022 		 */
2023 		phy_dev = of_phy_find_device(fep->phy_node);
2024 		phy_reset_after_clk_enable(phy_dev);
2025 		put_device(&phy_dev->mdio.dev);
2026 	}
2027 }
2028 
2029 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2030 {
2031 	struct fec_enet_private *fep = netdev_priv(ndev);
2032 	unsigned long flags;
2033 	int ret;
2034 
2035 	if (enable) {
2036 		ret = clk_prepare_enable(fep->clk_enet_out);
2037 		if (ret)
2038 			return ret;
2039 
2040 		if (fep->clk_ptp) {
2041 			spin_lock_irqsave(&fep->tmreg_lock, flags);
2042 			ret = clk_prepare_enable(fep->clk_ptp);
2043 			if (ret) {
2044 				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
2045 				goto failed_clk_ptp;
2046 			} else {
2047 				fep->ptp_clk_on = true;
2048 			}
2049 			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
2050 		}
2051 
2052 		ret = clk_prepare_enable(fep->clk_ref);
2053 		if (ret)
2054 			goto failed_clk_ref;
2055 
2056 		ret = clk_prepare_enable(fep->clk_2x_txclk);
2057 		if (ret)
2058 			goto failed_clk_2x_txclk;
2059 
2060 		fec_enet_phy_reset_after_clk_enable(ndev);
2061 	} else {
2062 		clk_disable_unprepare(fep->clk_enet_out);
2063 		if (fep->clk_ptp) {
2064 			spin_lock_irqsave(&fep->tmreg_lock, flags);
2065 			clk_disable_unprepare(fep->clk_ptp);
2066 			fep->ptp_clk_on = false;
2067 			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
2068 		}
2069 		clk_disable_unprepare(fep->clk_ref);
2070 		clk_disable_unprepare(fep->clk_2x_txclk);
2071 	}
2072 
2073 	return 0;
2074 
2075 failed_clk_2x_txclk:
2076 	if (fep->clk_ref)
2077 		clk_disable_unprepare(fep->clk_ref);
2078 failed_clk_ref:
2079 	if (fep->clk_ptp) {
2080 		spin_lock_irqsave(&fep->tmreg_lock, flags);
2081 		clk_disable_unprepare(fep->clk_ptp);
2082 		fep->ptp_clk_on = false;
2083 		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
2084 	}
2085 failed_clk_ptp:
2086 	clk_disable_unprepare(fep->clk_enet_out);
2087 
2088 	return ret;
2089 }
2090 
2091 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2092 				      struct device_node *np)
2093 {
2094 	u32 rgmii_tx_delay, rgmii_rx_delay;
2095 
2096 	/* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2097 	if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2098 		if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2099 			dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2100 			return -EINVAL;
2101 		} else if (rgmii_tx_delay == 2000) {
2102 			fep->rgmii_txc_dly = true;
2103 		}
2104 	}
2105 
2106 	/* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2107 	if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2108 		if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2109 			dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2110 			return -EINVAL;
2111 		} else if (rgmii_rx_delay == 2000) {
2112 			fep->rgmii_rxc_dly = true;
2113 		}
2114 	}
2115 
2116 	return 0;
2117 }
2118 
2119 static int fec_enet_mii_probe(struct net_device *ndev)
2120 {
2121 	struct fec_enet_private *fep = netdev_priv(ndev);
2122 	struct phy_device *phy_dev = NULL;
2123 	char mdio_bus_id[MII_BUS_ID_SIZE];
2124 	char phy_name[MII_BUS_ID_SIZE + 3];
2125 	int phy_id;
2126 	int dev_id = fep->dev_id;
2127 
2128 	if (fep->phy_node) {
2129 		phy_dev = of_phy_connect(ndev, fep->phy_node,
2130 					 &fec_enet_adjust_link, 0,
2131 					 fep->phy_interface);
2132 		if (!phy_dev) {
2133 			netdev_err(ndev, "Unable to connect to phy\n");
2134 			return -ENODEV;
2135 		}
2136 	} else {
2137 		/* check for attached phy */
2138 		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2139 			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2140 				continue;
2141 			if (dev_id--)
2142 				continue;
2143 			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2144 			break;
2145 		}
2146 
2147 		if (phy_id >= PHY_MAX_ADDR) {
2148 			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2149 			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2150 			phy_id = 0;
2151 		}
2152 
2153 		snprintf(phy_name, sizeof(phy_name),
2154 			 PHY_ID_FMT, mdio_bus_id, phy_id);
2155 		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2156 				      fep->phy_interface);
2157 	}
2158 
2159 	if (IS_ERR(phy_dev)) {
2160 		netdev_err(ndev, "could not attach to PHY\n");
2161 		return PTR_ERR(phy_dev);
2162 	}
2163 
2164 	/* mask with MAC supported features */
2165 	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2166 		phy_set_max_speed(phy_dev, 1000);
2167 		phy_remove_link_mode(phy_dev,
2168 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2169 #if !defined(CONFIG_M5272)
2170 		phy_support_sym_pause(phy_dev);
2171 #endif
2172 	}
2173 	else
2174 		phy_set_max_speed(phy_dev, 100);
2175 
2176 	fep->link = 0;
2177 	fep->full_duplex = 0;
2178 
2179 	phy_dev->mac_managed_pm = 1;
2180 
2181 	phy_attached_info(phy_dev);
2182 
2183 	return 0;
2184 }
2185 
2186 static int fec_enet_mii_init(struct platform_device *pdev)
2187 {
2188 	static struct mii_bus *fec0_mii_bus;
2189 	struct net_device *ndev = platform_get_drvdata(pdev);
2190 	struct fec_enet_private *fep = netdev_priv(ndev);
2191 	bool suppress_preamble = false;
2192 	struct device_node *node;
2193 	int err = -ENXIO;
2194 	u32 mii_speed, holdtime;
2195 	u32 bus_freq;
2196 
2197 	/*
2198 	 * The i.MX28 dual fec interfaces are not equal.
2199 	 * Here are the differences:
2200 	 *
2201 	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
2202 	 *  - fec0 acts as the 1588 time master while fec1 is slave
2203 	 *  - external phys can only be configured by fec0
2204 	 *
2205 	 * That is to say fec1 can not work independently. It only works
2206 	 * when fec0 is working. The reason behind this design is that the
2207 	 * second interface is added primarily for Switch mode.
2208 	 *
2209 	 * Because of the last point above, both phys are attached on fec0
2210 	 * mdio interface in board design, and need to be configured by
2211 	 * fec0 mii_bus.
2212 	 */
2213 	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2214 		/* fec1 uses fec0 mii_bus */
2215 		if (mii_cnt && fec0_mii_bus) {
2216 			fep->mii_bus = fec0_mii_bus;
2217 			mii_cnt++;
2218 			return 0;
2219 		}
2220 		return -ENOENT;
2221 	}
2222 
2223 	bus_freq = 2500000; /* 2.5MHz by default */
2224 	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2225 	if (node) {
2226 		of_property_read_u32(node, "clock-frequency", &bus_freq);
2227 		suppress_preamble = of_property_read_bool(node,
2228 							  "suppress-preamble");
2229 	}
2230 
2231 	/*
2232 	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2233 	 *
2234 	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2235 	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
2236 	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2237 	 * document.
2238 	 */
2239 	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2240 	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2241 		mii_speed--;
2242 	if (mii_speed > 63) {
2243 		dev_err(&pdev->dev,
2244 			"fec clock (%lu) too fast to get right mii speed\n",
2245 			clk_get_rate(fep->clk_ipg));
2246 		err = -EINVAL;
2247 		goto err_out;
2248 	}
2249 
2250 	/*
2251 	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2252 	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2253 	 * versions are RAZ there, so just ignore the difference and write the
2254 	 * register always.
2255 	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2256 	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2257 	 * output.
2258 	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2259 	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2260 	 * holdtime cannot result in a value greater than 3.
2261 	 */
2262 	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2263 
2264 	fep->phy_speed = mii_speed << 1 | holdtime << 8;
2265 
2266 	if (suppress_preamble)
2267 		fep->phy_speed |= BIT(7);
2268 
2269 	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2270 		/* Clear MMFR to avoid to generate MII event by writing MSCR.
2271 		 * MII event generation condition:
2272 		 * - writing MSCR:
2273 		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2274 		 *	  mscr_reg_data_in[7:0] != 0
2275 		 * - writing MMFR:
2276 		 *	- mscr[7:0]_not_zero
2277 		 */
2278 		writel(0, fep->hwp + FEC_MII_DATA);
2279 	}
2280 
2281 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2282 
2283 	/* Clear any pending transaction complete indication */
2284 	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2285 
2286 	fep->mii_bus = mdiobus_alloc();
2287 	if (fep->mii_bus == NULL) {
2288 		err = -ENOMEM;
2289 		goto err_out;
2290 	}
2291 
2292 	fep->mii_bus->name = "fec_enet_mii_bus";
2293 	fep->mii_bus->read = fec_enet_mdio_read;
2294 	fep->mii_bus->write = fec_enet_mdio_write;
2295 	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2296 		pdev->name, fep->dev_id + 1);
2297 	fep->mii_bus->priv = fep;
2298 	fep->mii_bus->parent = &pdev->dev;
2299 
2300 	err = of_mdiobus_register(fep->mii_bus, node);
2301 	if (err)
2302 		goto err_out_free_mdiobus;
2303 	of_node_put(node);
2304 
2305 	mii_cnt++;
2306 
2307 	/* save fec0 mii_bus */
2308 	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2309 		fec0_mii_bus = fep->mii_bus;
2310 
2311 	return 0;
2312 
2313 err_out_free_mdiobus:
2314 	mdiobus_free(fep->mii_bus);
2315 err_out:
2316 	of_node_put(node);
2317 	return err;
2318 }
2319 
2320 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2321 {
2322 	if (--mii_cnt == 0) {
2323 		mdiobus_unregister(fep->mii_bus);
2324 		mdiobus_free(fep->mii_bus);
2325 	}
2326 }
2327 
2328 static void fec_enet_get_drvinfo(struct net_device *ndev,
2329 				 struct ethtool_drvinfo *info)
2330 {
2331 	struct fec_enet_private *fep = netdev_priv(ndev);
2332 
2333 	strlcpy(info->driver, fep->pdev->dev.driver->name,
2334 		sizeof(info->driver));
2335 	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2336 }
2337 
2338 static int fec_enet_get_regs_len(struct net_device *ndev)
2339 {
2340 	struct fec_enet_private *fep = netdev_priv(ndev);
2341 	struct resource *r;
2342 	int s = 0;
2343 
2344 	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2345 	if (r)
2346 		s = resource_size(r);
2347 
2348 	return s;
2349 }
2350 
2351 /* List of registers that can be safety be read to dump them with ethtool */
2352 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2353 	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2354 	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2355 static __u32 fec_enet_register_version = 2;
2356 static u32 fec_enet_register_offset[] = {
2357 	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2358 	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2359 	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2360 	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2361 	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2362 	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2363 	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2364 	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2365 	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2366 	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2367 	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2368 	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2369 	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2370 	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2371 	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2372 	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2373 	RMON_T_P_GTE2048, RMON_T_OCTETS,
2374 	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2375 	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2376 	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2377 	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2378 	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2379 	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2380 	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2381 	RMON_R_P_GTE2048, RMON_R_OCTETS,
2382 	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2383 	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2384 };
2385 #else
2386 static __u32 fec_enet_register_version = 1;
2387 static u32 fec_enet_register_offset[] = {
2388 	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2389 	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2390 	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2391 	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2392 	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2393 	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2394 	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2395 	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2396 	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2397 };
2398 #endif
2399 
2400 static void fec_enet_get_regs(struct net_device *ndev,
2401 			      struct ethtool_regs *regs, void *regbuf)
2402 {
2403 	struct fec_enet_private *fep = netdev_priv(ndev);
2404 	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2405 	struct device *dev = &fep->pdev->dev;
2406 	u32 *buf = (u32 *)regbuf;
2407 	u32 i, off;
2408 	int ret;
2409 
2410 	ret = pm_runtime_resume_and_get(dev);
2411 	if (ret < 0)
2412 		return;
2413 
2414 	regs->version = fec_enet_register_version;
2415 
2416 	memset(buf, 0, regs->len);
2417 
2418 	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2419 		off = fec_enet_register_offset[i];
2420 
2421 		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2422 		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2423 			continue;
2424 
2425 		off >>= 2;
2426 		buf[off] = readl(&theregs[off]);
2427 	}
2428 
2429 	pm_runtime_mark_last_busy(dev);
2430 	pm_runtime_put_autosuspend(dev);
2431 }
2432 
2433 static int fec_enet_get_ts_info(struct net_device *ndev,
2434 				struct ethtool_ts_info *info)
2435 {
2436 	struct fec_enet_private *fep = netdev_priv(ndev);
2437 
2438 	if (fep->bufdesc_ex) {
2439 
2440 		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2441 					SOF_TIMESTAMPING_RX_SOFTWARE |
2442 					SOF_TIMESTAMPING_SOFTWARE |
2443 					SOF_TIMESTAMPING_TX_HARDWARE |
2444 					SOF_TIMESTAMPING_RX_HARDWARE |
2445 					SOF_TIMESTAMPING_RAW_HARDWARE;
2446 		if (fep->ptp_clock)
2447 			info->phc_index = ptp_clock_index(fep->ptp_clock);
2448 		else
2449 			info->phc_index = -1;
2450 
2451 		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2452 				 (1 << HWTSTAMP_TX_ON);
2453 
2454 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2455 				   (1 << HWTSTAMP_FILTER_ALL);
2456 		return 0;
2457 	} else {
2458 		return ethtool_op_get_ts_info(ndev, info);
2459 	}
2460 }
2461 
2462 #if !defined(CONFIG_M5272)
2463 
2464 static void fec_enet_get_pauseparam(struct net_device *ndev,
2465 				    struct ethtool_pauseparam *pause)
2466 {
2467 	struct fec_enet_private *fep = netdev_priv(ndev);
2468 
2469 	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2470 	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2471 	pause->rx_pause = pause->tx_pause;
2472 }
2473 
2474 static int fec_enet_set_pauseparam(struct net_device *ndev,
2475 				   struct ethtool_pauseparam *pause)
2476 {
2477 	struct fec_enet_private *fep = netdev_priv(ndev);
2478 
2479 	if (!ndev->phydev)
2480 		return -ENODEV;
2481 
2482 	if (pause->tx_pause != pause->rx_pause) {
2483 		netdev_info(ndev,
2484 			"hardware only support enable/disable both tx and rx");
2485 		return -EINVAL;
2486 	}
2487 
2488 	fep->pause_flag = 0;
2489 
2490 	/* tx pause must be same as rx pause */
2491 	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2492 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2493 
2494 	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2495 			  pause->autoneg);
2496 
2497 	if (pause->autoneg) {
2498 		if (netif_running(ndev))
2499 			fec_stop(ndev);
2500 		phy_start_aneg(ndev->phydev);
2501 	}
2502 	if (netif_running(ndev)) {
2503 		napi_disable(&fep->napi);
2504 		netif_tx_lock_bh(ndev);
2505 		fec_restart(ndev);
2506 		netif_tx_wake_all_queues(ndev);
2507 		netif_tx_unlock_bh(ndev);
2508 		napi_enable(&fep->napi);
2509 	}
2510 
2511 	return 0;
2512 }
2513 
2514 static const struct fec_stat {
2515 	char name[ETH_GSTRING_LEN];
2516 	u16 offset;
2517 } fec_stats[] = {
2518 	/* RMON TX */
2519 	{ "tx_dropped", RMON_T_DROP },
2520 	{ "tx_packets", RMON_T_PACKETS },
2521 	{ "tx_broadcast", RMON_T_BC_PKT },
2522 	{ "tx_multicast", RMON_T_MC_PKT },
2523 	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
2524 	{ "tx_undersize", RMON_T_UNDERSIZE },
2525 	{ "tx_oversize", RMON_T_OVERSIZE },
2526 	{ "tx_fragment", RMON_T_FRAG },
2527 	{ "tx_jabber", RMON_T_JAB },
2528 	{ "tx_collision", RMON_T_COL },
2529 	{ "tx_64byte", RMON_T_P64 },
2530 	{ "tx_65to127byte", RMON_T_P65TO127 },
2531 	{ "tx_128to255byte", RMON_T_P128TO255 },
2532 	{ "tx_256to511byte", RMON_T_P256TO511 },
2533 	{ "tx_512to1023byte", RMON_T_P512TO1023 },
2534 	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
2535 	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
2536 	{ "tx_octets", RMON_T_OCTETS },
2537 
2538 	/* IEEE TX */
2539 	{ "IEEE_tx_drop", IEEE_T_DROP },
2540 	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2541 	{ "IEEE_tx_1col", IEEE_T_1COL },
2542 	{ "IEEE_tx_mcol", IEEE_T_MCOL },
2543 	{ "IEEE_tx_def", IEEE_T_DEF },
2544 	{ "IEEE_tx_lcol", IEEE_T_LCOL },
2545 	{ "IEEE_tx_excol", IEEE_T_EXCOL },
2546 	{ "IEEE_tx_macerr", IEEE_T_MACERR },
2547 	{ "IEEE_tx_cserr", IEEE_T_CSERR },
2548 	{ "IEEE_tx_sqe", IEEE_T_SQE },
2549 	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2550 	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2551 
2552 	/* RMON RX */
2553 	{ "rx_packets", RMON_R_PACKETS },
2554 	{ "rx_broadcast", RMON_R_BC_PKT },
2555 	{ "rx_multicast", RMON_R_MC_PKT },
2556 	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
2557 	{ "rx_undersize", RMON_R_UNDERSIZE },
2558 	{ "rx_oversize", RMON_R_OVERSIZE },
2559 	{ "rx_fragment", RMON_R_FRAG },
2560 	{ "rx_jabber", RMON_R_JAB },
2561 	{ "rx_64byte", RMON_R_P64 },
2562 	{ "rx_65to127byte", RMON_R_P65TO127 },
2563 	{ "rx_128to255byte", RMON_R_P128TO255 },
2564 	{ "rx_256to511byte", RMON_R_P256TO511 },
2565 	{ "rx_512to1023byte", RMON_R_P512TO1023 },
2566 	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
2567 	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
2568 	{ "rx_octets", RMON_R_OCTETS },
2569 
2570 	/* IEEE RX */
2571 	{ "IEEE_rx_drop", IEEE_R_DROP },
2572 	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2573 	{ "IEEE_rx_crc", IEEE_R_CRC },
2574 	{ "IEEE_rx_align", IEEE_R_ALIGN },
2575 	{ "IEEE_rx_macerr", IEEE_R_MACERR },
2576 	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2577 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2578 };
2579 
2580 #define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
2581 
2582 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2583 {
2584 	struct fec_enet_private *fep = netdev_priv(dev);
2585 	int i;
2586 
2587 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2588 		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2589 }
2590 
2591 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2592 				       struct ethtool_stats *stats, u64 *data)
2593 {
2594 	struct fec_enet_private *fep = netdev_priv(dev);
2595 
2596 	if (netif_running(dev))
2597 		fec_enet_update_ethtool_stats(dev);
2598 
2599 	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2600 }
2601 
2602 static void fec_enet_get_strings(struct net_device *netdev,
2603 	u32 stringset, u8 *data)
2604 {
2605 	int i;
2606 	switch (stringset) {
2607 	case ETH_SS_STATS:
2608 		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2609 			memcpy(data + i * ETH_GSTRING_LEN,
2610 				fec_stats[i].name, ETH_GSTRING_LEN);
2611 		break;
2612 	case ETH_SS_TEST:
2613 		net_selftest_get_strings(data);
2614 		break;
2615 	}
2616 }
2617 
2618 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2619 {
2620 	switch (sset) {
2621 	case ETH_SS_STATS:
2622 		return ARRAY_SIZE(fec_stats);
2623 	case ETH_SS_TEST:
2624 		return net_selftest_get_count();
2625 	default:
2626 		return -EOPNOTSUPP;
2627 	}
2628 }
2629 
2630 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2631 {
2632 	struct fec_enet_private *fep = netdev_priv(dev);
2633 	int i;
2634 
2635 	/* Disable MIB statistics counters */
2636 	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2637 
2638 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2639 		writel(0, fep->hwp + fec_stats[i].offset);
2640 
2641 	/* Don't disable MIB statistics counters */
2642 	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2643 }
2644 
2645 #else	/* !defined(CONFIG_M5272) */
2646 #define FEC_STATS_SIZE	0
2647 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2648 {
2649 }
2650 
2651 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2652 {
2653 }
2654 #endif /* !defined(CONFIG_M5272) */
2655 
2656 /* ITR clock source is enet system clock (clk_ahb).
2657  * TCTT unit is cycle_ns * 64 cycle
2658  * So, the ICTT value = X us / (cycle_ns * 64)
2659  */
2660 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2661 {
2662 	struct fec_enet_private *fep = netdev_priv(ndev);
2663 
2664 	return us * (fep->itr_clk_rate / 64000) / 1000;
2665 }
2666 
2667 /* Set threshold for interrupt coalescing */
2668 static void fec_enet_itr_coal_set(struct net_device *ndev)
2669 {
2670 	struct fec_enet_private *fep = netdev_priv(ndev);
2671 	int rx_itr, tx_itr;
2672 
2673 	/* Must be greater than zero to avoid unpredictable behavior */
2674 	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2675 	    !fep->tx_time_itr || !fep->tx_pkts_itr)
2676 		return;
2677 
2678 	/* Select enet system clock as Interrupt Coalescing
2679 	 * timer Clock Source
2680 	 */
2681 	rx_itr = FEC_ITR_CLK_SEL;
2682 	tx_itr = FEC_ITR_CLK_SEL;
2683 
2684 	/* set ICFT and ICTT */
2685 	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2686 	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2687 	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2688 	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2689 
2690 	rx_itr |= FEC_ITR_EN;
2691 	tx_itr |= FEC_ITR_EN;
2692 
2693 	writel(tx_itr, fep->hwp + FEC_TXIC0);
2694 	writel(rx_itr, fep->hwp + FEC_RXIC0);
2695 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
2696 		writel(tx_itr, fep->hwp + FEC_TXIC1);
2697 		writel(rx_itr, fep->hwp + FEC_RXIC1);
2698 		writel(tx_itr, fep->hwp + FEC_TXIC2);
2699 		writel(rx_itr, fep->hwp + FEC_RXIC2);
2700 	}
2701 }
2702 
2703 static int fec_enet_get_coalesce(struct net_device *ndev,
2704 				 struct ethtool_coalesce *ec,
2705 				 struct kernel_ethtool_coalesce *kernel_coal,
2706 				 struct netlink_ext_ack *extack)
2707 {
2708 	struct fec_enet_private *fep = netdev_priv(ndev);
2709 
2710 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2711 		return -EOPNOTSUPP;
2712 
2713 	ec->rx_coalesce_usecs = fep->rx_time_itr;
2714 	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2715 
2716 	ec->tx_coalesce_usecs = fep->tx_time_itr;
2717 	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2718 
2719 	return 0;
2720 }
2721 
2722 static int fec_enet_set_coalesce(struct net_device *ndev,
2723 				 struct ethtool_coalesce *ec,
2724 				 struct kernel_ethtool_coalesce *kernel_coal,
2725 				 struct netlink_ext_ack *extack)
2726 {
2727 	struct fec_enet_private *fep = netdev_priv(ndev);
2728 	struct device *dev = &fep->pdev->dev;
2729 	unsigned int cycle;
2730 
2731 	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2732 		return -EOPNOTSUPP;
2733 
2734 	if (ec->rx_max_coalesced_frames > 255) {
2735 		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
2736 		return -EINVAL;
2737 	}
2738 
2739 	if (ec->tx_max_coalesced_frames > 255) {
2740 		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
2741 		return -EINVAL;
2742 	}
2743 
2744 	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
2745 	if (cycle > 0xFFFF) {
2746 		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
2747 		return -EINVAL;
2748 	}
2749 
2750 	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
2751 	if (cycle > 0xFFFF) {
2752 		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
2753 		return -EINVAL;
2754 	}
2755 
2756 	fep->rx_time_itr = ec->rx_coalesce_usecs;
2757 	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2758 
2759 	fep->tx_time_itr = ec->tx_coalesce_usecs;
2760 	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2761 
2762 	fec_enet_itr_coal_set(ndev);
2763 
2764 	return 0;
2765 }
2766 
2767 static void fec_enet_itr_coal_init(struct net_device *ndev)
2768 {
2769 	struct ethtool_coalesce ec;
2770 
2771 	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2772 	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2773 
2774 	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2775 	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2776 
2777 	fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
2778 }
2779 
2780 static int fec_enet_get_tunable(struct net_device *netdev,
2781 				const struct ethtool_tunable *tuna,
2782 				void *data)
2783 {
2784 	struct fec_enet_private *fep = netdev_priv(netdev);
2785 	int ret = 0;
2786 
2787 	switch (tuna->id) {
2788 	case ETHTOOL_RX_COPYBREAK:
2789 		*(u32 *)data = fep->rx_copybreak;
2790 		break;
2791 	default:
2792 		ret = -EINVAL;
2793 		break;
2794 	}
2795 
2796 	return ret;
2797 }
2798 
2799 static int fec_enet_set_tunable(struct net_device *netdev,
2800 				const struct ethtool_tunable *tuna,
2801 				const void *data)
2802 {
2803 	struct fec_enet_private *fep = netdev_priv(netdev);
2804 	int ret = 0;
2805 
2806 	switch (tuna->id) {
2807 	case ETHTOOL_RX_COPYBREAK:
2808 		fep->rx_copybreak = *(u32 *)data;
2809 		break;
2810 	default:
2811 		ret = -EINVAL;
2812 		break;
2813 	}
2814 
2815 	return ret;
2816 }
2817 
2818 /* LPI Sleep Ts count base on tx clk (clk_ref).
2819  * The lpi sleep cnt value = X us / (cycle_ns).
2820  */
2821 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2822 {
2823 	struct fec_enet_private *fep = netdev_priv(ndev);
2824 
2825 	return us * (fep->clk_ref_rate / 1000) / 1000;
2826 }
2827 
2828 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
2829 {
2830 	struct fec_enet_private *fep = netdev_priv(ndev);
2831 	struct ethtool_eee *p = &fep->eee;
2832 	unsigned int sleep_cycle, wake_cycle;
2833 	int ret = 0;
2834 
2835 	if (enable) {
2836 		ret = phy_init_eee(ndev->phydev, false);
2837 		if (ret)
2838 			return ret;
2839 
2840 		sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
2841 		wake_cycle = sleep_cycle;
2842 	} else {
2843 		sleep_cycle = 0;
2844 		wake_cycle = 0;
2845 	}
2846 
2847 	p->tx_lpi_enabled = enable;
2848 	p->eee_enabled = enable;
2849 	p->eee_active = enable;
2850 
2851 	writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2852 	writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2853 
2854 	return 0;
2855 }
2856 
2857 static int
2858 fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2859 {
2860 	struct fec_enet_private *fep = netdev_priv(ndev);
2861 	struct ethtool_eee *p = &fep->eee;
2862 
2863 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
2864 		return -EOPNOTSUPP;
2865 
2866 	if (!netif_running(ndev))
2867 		return -ENETDOWN;
2868 
2869 	edata->eee_enabled = p->eee_enabled;
2870 	edata->eee_active = p->eee_active;
2871 	edata->tx_lpi_timer = p->tx_lpi_timer;
2872 	edata->tx_lpi_enabled = p->tx_lpi_enabled;
2873 
2874 	return phy_ethtool_get_eee(ndev->phydev, edata);
2875 }
2876 
2877 static int
2878 fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2879 {
2880 	struct fec_enet_private *fep = netdev_priv(ndev);
2881 	struct ethtool_eee *p = &fep->eee;
2882 	int ret = 0;
2883 
2884 	if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
2885 		return -EOPNOTSUPP;
2886 
2887 	if (!netif_running(ndev))
2888 		return -ENETDOWN;
2889 
2890 	p->tx_lpi_timer = edata->tx_lpi_timer;
2891 
2892 	if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
2893 	    !edata->tx_lpi_timer)
2894 		ret = fec_enet_eee_mode_set(ndev, false);
2895 	else
2896 		ret = fec_enet_eee_mode_set(ndev, true);
2897 
2898 	if (ret)
2899 		return ret;
2900 
2901 	return phy_ethtool_set_eee(ndev->phydev, edata);
2902 }
2903 
2904 static void
2905 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2906 {
2907 	struct fec_enet_private *fep = netdev_priv(ndev);
2908 
2909 	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2910 		wol->supported = WAKE_MAGIC;
2911 		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2912 	} else {
2913 		wol->supported = wol->wolopts = 0;
2914 	}
2915 }
2916 
2917 static int
2918 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2919 {
2920 	struct fec_enet_private *fep = netdev_priv(ndev);
2921 
2922 	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2923 		return -EINVAL;
2924 
2925 	if (wol->wolopts & ~WAKE_MAGIC)
2926 		return -EINVAL;
2927 
2928 	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2929 	if (device_may_wakeup(&ndev->dev))
2930 		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2931 	else
2932 		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2933 
2934 	return 0;
2935 }
2936 
2937 static const struct ethtool_ops fec_enet_ethtool_ops = {
2938 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2939 				     ETHTOOL_COALESCE_MAX_FRAMES,
2940 	.get_drvinfo		= fec_enet_get_drvinfo,
2941 	.get_regs_len		= fec_enet_get_regs_len,
2942 	.get_regs		= fec_enet_get_regs,
2943 	.nway_reset		= phy_ethtool_nway_reset,
2944 	.get_link		= ethtool_op_get_link,
2945 	.get_coalesce		= fec_enet_get_coalesce,
2946 	.set_coalesce		= fec_enet_set_coalesce,
2947 #ifndef CONFIG_M5272
2948 	.get_pauseparam		= fec_enet_get_pauseparam,
2949 	.set_pauseparam		= fec_enet_set_pauseparam,
2950 	.get_strings		= fec_enet_get_strings,
2951 	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
2952 	.get_sset_count		= fec_enet_get_sset_count,
2953 #endif
2954 	.get_ts_info		= fec_enet_get_ts_info,
2955 	.get_tunable		= fec_enet_get_tunable,
2956 	.set_tunable		= fec_enet_set_tunable,
2957 	.get_wol		= fec_enet_get_wol,
2958 	.set_wol		= fec_enet_set_wol,
2959 	.get_eee		= fec_enet_get_eee,
2960 	.set_eee		= fec_enet_set_eee,
2961 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2962 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2963 	.self_test		= net_selftest,
2964 };
2965 
2966 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2967 {
2968 	struct fec_enet_private *fep = netdev_priv(ndev);
2969 	struct phy_device *phydev = ndev->phydev;
2970 
2971 	if (!netif_running(ndev))
2972 		return -EINVAL;
2973 
2974 	if (!phydev)
2975 		return -ENODEV;
2976 
2977 	if (fep->bufdesc_ex) {
2978 		bool use_fec_hwts = !phy_has_hwtstamp(phydev);
2979 
2980 		if (cmd == SIOCSHWTSTAMP) {
2981 			if (use_fec_hwts)
2982 				return fec_ptp_set(ndev, rq);
2983 			fec_ptp_disable_hwts(ndev);
2984 		} else if (cmd == SIOCGHWTSTAMP) {
2985 			if (use_fec_hwts)
2986 				return fec_ptp_get(ndev, rq);
2987 		}
2988 	}
2989 
2990 	return phy_mii_ioctl(phydev, rq, cmd);
2991 }
2992 
2993 static void fec_enet_free_buffers(struct net_device *ndev)
2994 {
2995 	struct fec_enet_private *fep = netdev_priv(ndev);
2996 	unsigned int i;
2997 	struct sk_buff *skb;
2998 	struct bufdesc	*bdp;
2999 	struct fec_enet_priv_tx_q *txq;
3000 	struct fec_enet_priv_rx_q *rxq;
3001 	unsigned int q;
3002 
3003 	for (q = 0; q < fep->num_rx_queues; q++) {
3004 		rxq = fep->rx_queue[q];
3005 		bdp = rxq->bd.base;
3006 		for (i = 0; i < rxq->bd.ring_size; i++) {
3007 			skb = rxq->rx_skbuff[i];
3008 			rxq->rx_skbuff[i] = NULL;
3009 			if (skb) {
3010 				dma_unmap_single(&fep->pdev->dev,
3011 						 fec32_to_cpu(bdp->cbd_bufaddr),
3012 						 FEC_ENET_RX_FRSIZE - fep->rx_align,
3013 						 DMA_FROM_DEVICE);
3014 				dev_kfree_skb(skb);
3015 			}
3016 			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3017 		}
3018 	}
3019 
3020 	for (q = 0; q < fep->num_tx_queues; q++) {
3021 		txq = fep->tx_queue[q];
3022 		for (i = 0; i < txq->bd.ring_size; i++) {
3023 			kfree(txq->tx_bounce[i]);
3024 			txq->tx_bounce[i] = NULL;
3025 			skb = txq->tx_skbuff[i];
3026 			txq->tx_skbuff[i] = NULL;
3027 			dev_kfree_skb(skb);
3028 		}
3029 	}
3030 }
3031 
3032 static void fec_enet_free_queue(struct net_device *ndev)
3033 {
3034 	struct fec_enet_private *fep = netdev_priv(ndev);
3035 	int i;
3036 	struct fec_enet_priv_tx_q *txq;
3037 
3038 	for (i = 0; i < fep->num_tx_queues; i++)
3039 		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3040 			txq = fep->tx_queue[i];
3041 			dma_free_coherent(&fep->pdev->dev,
3042 					  txq->bd.ring_size * TSO_HEADER_SIZE,
3043 					  txq->tso_hdrs,
3044 					  txq->tso_hdrs_dma);
3045 		}
3046 
3047 	for (i = 0; i < fep->num_rx_queues; i++)
3048 		kfree(fep->rx_queue[i]);
3049 	for (i = 0; i < fep->num_tx_queues; i++)
3050 		kfree(fep->tx_queue[i]);
3051 }
3052 
3053 static int fec_enet_alloc_queue(struct net_device *ndev)
3054 {
3055 	struct fec_enet_private *fep = netdev_priv(ndev);
3056 	int i;
3057 	int ret = 0;
3058 	struct fec_enet_priv_tx_q *txq;
3059 
3060 	for (i = 0; i < fep->num_tx_queues; i++) {
3061 		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3062 		if (!txq) {
3063 			ret = -ENOMEM;
3064 			goto alloc_failed;
3065 		}
3066 
3067 		fep->tx_queue[i] = txq;
3068 		txq->bd.ring_size = TX_RING_SIZE;
3069 		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3070 
3071 		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3072 		txq->tx_wake_threshold =
3073 			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
3074 
3075 		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
3076 					txq->bd.ring_size * TSO_HEADER_SIZE,
3077 					&txq->tso_hdrs_dma,
3078 					GFP_KERNEL);
3079 		if (!txq->tso_hdrs) {
3080 			ret = -ENOMEM;
3081 			goto alloc_failed;
3082 		}
3083 	}
3084 
3085 	for (i = 0; i < fep->num_rx_queues; i++) {
3086 		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3087 					   GFP_KERNEL);
3088 		if (!fep->rx_queue[i]) {
3089 			ret = -ENOMEM;
3090 			goto alloc_failed;
3091 		}
3092 
3093 		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3094 		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3095 	}
3096 	return ret;
3097 
3098 alloc_failed:
3099 	fec_enet_free_queue(ndev);
3100 	return ret;
3101 }
3102 
3103 static int
3104 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3105 {
3106 	struct fec_enet_private *fep = netdev_priv(ndev);
3107 	unsigned int i;
3108 	struct sk_buff *skb;
3109 	struct bufdesc	*bdp;
3110 	struct fec_enet_priv_rx_q *rxq;
3111 
3112 	rxq = fep->rx_queue[queue];
3113 	bdp = rxq->bd.base;
3114 	for (i = 0; i < rxq->bd.ring_size; i++) {
3115 		skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
3116 		if (!skb)
3117 			goto err_alloc;
3118 
3119 		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
3120 			dev_kfree_skb(skb);
3121 			goto err_alloc;
3122 		}
3123 
3124 		rxq->rx_skbuff[i] = skb;
3125 		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3126 
3127 		if (fep->bufdesc_ex) {
3128 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3129 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3130 		}
3131 
3132 		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3133 	}
3134 
3135 	/* Set the last buffer to wrap. */
3136 	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3137 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3138 	return 0;
3139 
3140  err_alloc:
3141 	fec_enet_free_buffers(ndev);
3142 	return -ENOMEM;
3143 }
3144 
3145 static int
3146 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3147 {
3148 	struct fec_enet_private *fep = netdev_priv(ndev);
3149 	unsigned int i;
3150 	struct bufdesc  *bdp;
3151 	struct fec_enet_priv_tx_q *txq;
3152 
3153 	txq = fep->tx_queue[queue];
3154 	bdp = txq->bd.base;
3155 	for (i = 0; i < txq->bd.ring_size; i++) {
3156 		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3157 		if (!txq->tx_bounce[i])
3158 			goto err_alloc;
3159 
3160 		bdp->cbd_sc = cpu_to_fec16(0);
3161 		bdp->cbd_bufaddr = cpu_to_fec32(0);
3162 
3163 		if (fep->bufdesc_ex) {
3164 			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3165 			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3166 		}
3167 
3168 		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3169 	}
3170 
3171 	/* Set the last buffer to wrap. */
3172 	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3173 	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3174 
3175 	return 0;
3176 
3177  err_alloc:
3178 	fec_enet_free_buffers(ndev);
3179 	return -ENOMEM;
3180 }
3181 
3182 static int fec_enet_alloc_buffers(struct net_device *ndev)
3183 {
3184 	struct fec_enet_private *fep = netdev_priv(ndev);
3185 	unsigned int i;
3186 
3187 	for (i = 0; i < fep->num_rx_queues; i++)
3188 		if (fec_enet_alloc_rxq_buffers(ndev, i))
3189 			return -ENOMEM;
3190 
3191 	for (i = 0; i < fep->num_tx_queues; i++)
3192 		if (fec_enet_alloc_txq_buffers(ndev, i))
3193 			return -ENOMEM;
3194 	return 0;
3195 }
3196 
3197 static int
3198 fec_enet_open(struct net_device *ndev)
3199 {
3200 	struct fec_enet_private *fep = netdev_priv(ndev);
3201 	int ret;
3202 	bool reset_again;
3203 
3204 	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3205 	if (ret < 0)
3206 		return ret;
3207 
3208 	pinctrl_pm_select_default_state(&fep->pdev->dev);
3209 	ret = fec_enet_clk_enable(ndev, true);
3210 	if (ret)
3211 		goto clk_enable;
3212 
3213 	/* During the first fec_enet_open call the PHY isn't probed at this
3214 	 * point. Therefore the phy_reset_after_clk_enable() call within
3215 	 * fec_enet_clk_enable() fails. As we need this reset in order to be
3216 	 * sure the PHY is working correctly we check if we need to reset again
3217 	 * later when the PHY is probed
3218 	 */
3219 	if (ndev->phydev && ndev->phydev->drv)
3220 		reset_again = false;
3221 	else
3222 		reset_again = true;
3223 
3224 	/* I should reset the ring buffers here, but I don't yet know
3225 	 * a simple way to do that.
3226 	 */
3227 
3228 	ret = fec_enet_alloc_buffers(ndev);
3229 	if (ret)
3230 		goto err_enet_alloc;
3231 
3232 	/* Init MAC prior to mii bus probe */
3233 	fec_restart(ndev);
3234 
3235 	/* Call phy_reset_after_clk_enable() again if it failed during
3236 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3237 	 */
3238 	if (reset_again)
3239 		fec_enet_phy_reset_after_clk_enable(ndev);
3240 
3241 	/* Probe and connect to PHY when open the interface */
3242 	ret = fec_enet_mii_probe(ndev);
3243 	if (ret)
3244 		goto err_enet_mii_probe;
3245 
3246 	if (fep->quirks & FEC_QUIRK_ERR006687)
3247 		imx6q_cpuidle_fec_irqs_used();
3248 
3249 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3250 		cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3251 
3252 	napi_enable(&fep->napi);
3253 	phy_start(ndev->phydev);
3254 	netif_tx_start_all_queues(ndev);
3255 
3256 	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3257 				 FEC_WOL_FLAG_ENABLE);
3258 
3259 	return 0;
3260 
3261 err_enet_mii_probe:
3262 	fec_enet_free_buffers(ndev);
3263 err_enet_alloc:
3264 	fec_enet_clk_enable(ndev, false);
3265 clk_enable:
3266 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3267 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3268 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3269 	return ret;
3270 }
3271 
3272 static int
3273 fec_enet_close(struct net_device *ndev)
3274 {
3275 	struct fec_enet_private *fep = netdev_priv(ndev);
3276 
3277 	phy_stop(ndev->phydev);
3278 
3279 	if (netif_device_present(ndev)) {
3280 		napi_disable(&fep->napi);
3281 		netif_tx_disable(ndev);
3282 		fec_stop(ndev);
3283 	}
3284 
3285 	phy_disconnect(ndev->phydev);
3286 
3287 	if (fep->quirks & FEC_QUIRK_ERR006687)
3288 		imx6q_cpuidle_fec_irqs_unused();
3289 
3290 	fec_enet_update_ethtool_stats(ndev);
3291 
3292 	fec_enet_clk_enable(ndev, false);
3293 	if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3294 		cpu_latency_qos_remove_request(&fep->pm_qos_req);
3295 
3296 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3297 	pm_runtime_mark_last_busy(&fep->pdev->dev);
3298 	pm_runtime_put_autosuspend(&fep->pdev->dev);
3299 
3300 	fec_enet_free_buffers(ndev);
3301 
3302 	return 0;
3303 }
3304 
3305 /* Set or clear the multicast filter for this adaptor.
3306  * Skeleton taken from sunlance driver.
3307  * The CPM Ethernet implementation allows Multicast as well as individual
3308  * MAC address filtering.  Some of the drivers check to make sure it is
3309  * a group multicast address, and discard those that are not.  I guess I
3310  * will do the same for now, but just remove the test if you want
3311  * individual filtering as well (do the upper net layers want or support
3312  * this kind of feature?).
3313  */
3314 
3315 #define FEC_HASH_BITS	6		/* #bits in hash */
3316 
3317 static void set_multicast_list(struct net_device *ndev)
3318 {
3319 	struct fec_enet_private *fep = netdev_priv(ndev);
3320 	struct netdev_hw_addr *ha;
3321 	unsigned int crc, tmp;
3322 	unsigned char hash;
3323 	unsigned int hash_high = 0, hash_low = 0;
3324 
3325 	if (ndev->flags & IFF_PROMISC) {
3326 		tmp = readl(fep->hwp + FEC_R_CNTRL);
3327 		tmp |= 0x8;
3328 		writel(tmp, fep->hwp + FEC_R_CNTRL);
3329 		return;
3330 	}
3331 
3332 	tmp = readl(fep->hwp + FEC_R_CNTRL);
3333 	tmp &= ~0x8;
3334 	writel(tmp, fep->hwp + FEC_R_CNTRL);
3335 
3336 	if (ndev->flags & IFF_ALLMULTI) {
3337 		/* Catch all multicast addresses, so set the
3338 		 * filter to all 1's
3339 		 */
3340 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3341 		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3342 
3343 		return;
3344 	}
3345 
3346 	/* Add the addresses in hash register */
3347 	netdev_for_each_mc_addr(ha, ndev) {
3348 		/* calculate crc32 value of mac address */
3349 		crc = ether_crc_le(ndev->addr_len, ha->addr);
3350 
3351 		/* only upper 6 bits (FEC_HASH_BITS) are used
3352 		 * which point to specific bit in the hash registers
3353 		 */
3354 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3355 
3356 		if (hash > 31)
3357 			hash_high |= 1 << (hash - 32);
3358 		else
3359 			hash_low |= 1 << hash;
3360 	}
3361 
3362 	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3363 	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3364 }
3365 
3366 /* Set a MAC change in hardware. */
3367 static int
3368 fec_set_mac_address(struct net_device *ndev, void *p)
3369 {
3370 	struct fec_enet_private *fep = netdev_priv(ndev);
3371 	struct sockaddr *addr = p;
3372 
3373 	if (addr) {
3374 		if (!is_valid_ether_addr(addr->sa_data))
3375 			return -EADDRNOTAVAIL;
3376 		eth_hw_addr_set(ndev, addr->sa_data);
3377 	}
3378 
3379 	/* Add netif status check here to avoid system hang in below case:
3380 	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3381 	 * After ethx down, fec all clocks are gated off and then register
3382 	 * access causes system hang.
3383 	 */
3384 	if (!netif_running(ndev))
3385 		return 0;
3386 
3387 	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3388 		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3389 		fep->hwp + FEC_ADDR_LOW);
3390 	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3391 		fep->hwp + FEC_ADDR_HIGH);
3392 	return 0;
3393 }
3394 
3395 #ifdef CONFIG_NET_POLL_CONTROLLER
3396 /**
3397  * fec_poll_controller - FEC Poll controller function
3398  * @dev: The FEC network adapter
3399  *
3400  * Polled functionality used by netconsole and others in non interrupt mode
3401  *
3402  */
3403 static void fec_poll_controller(struct net_device *dev)
3404 {
3405 	int i;
3406 	struct fec_enet_private *fep = netdev_priv(dev);
3407 
3408 	for (i = 0; i < FEC_IRQ_NUM; i++) {
3409 		if (fep->irq[i] > 0) {
3410 			disable_irq(fep->irq[i]);
3411 			fec_enet_interrupt(fep->irq[i], dev);
3412 			enable_irq(fep->irq[i]);
3413 		}
3414 	}
3415 }
3416 #endif
3417 
3418 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3419 	netdev_features_t features)
3420 {
3421 	struct fec_enet_private *fep = netdev_priv(netdev);
3422 	netdev_features_t changed = features ^ netdev->features;
3423 
3424 	netdev->features = features;
3425 
3426 	/* Receive checksum has been changed */
3427 	if (changed & NETIF_F_RXCSUM) {
3428 		if (features & NETIF_F_RXCSUM)
3429 			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3430 		else
3431 			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3432 	}
3433 }
3434 
3435 static int fec_set_features(struct net_device *netdev,
3436 	netdev_features_t features)
3437 {
3438 	struct fec_enet_private *fep = netdev_priv(netdev);
3439 	netdev_features_t changed = features ^ netdev->features;
3440 
3441 	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3442 		napi_disable(&fep->napi);
3443 		netif_tx_lock_bh(netdev);
3444 		fec_stop(netdev);
3445 		fec_enet_set_netdev_features(netdev, features);
3446 		fec_restart(netdev);
3447 		netif_tx_wake_all_queues(netdev);
3448 		netif_tx_unlock_bh(netdev);
3449 		napi_enable(&fep->napi);
3450 	} else {
3451 		fec_enet_set_netdev_features(netdev, features);
3452 	}
3453 
3454 	return 0;
3455 }
3456 
3457 static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
3458 {
3459 	struct vlan_ethhdr *vhdr;
3460 	unsigned short vlan_TCI = 0;
3461 
3462 	if (skb->protocol == htons(ETH_P_ALL)) {
3463 		vhdr = (struct vlan_ethhdr *)(skb->data);
3464 		vlan_TCI = ntohs(vhdr->h_vlan_TCI);
3465 	}
3466 
3467 	return vlan_TCI;
3468 }
3469 
3470 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3471 				 struct net_device *sb_dev)
3472 {
3473 	struct fec_enet_private *fep = netdev_priv(ndev);
3474 	u16 vlan_tag;
3475 
3476 	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3477 		return netdev_pick_tx(ndev, skb, NULL);
3478 
3479 	vlan_tag = fec_enet_get_raw_vlan_tci(skb);
3480 	if (!vlan_tag)
3481 		return vlan_tag;
3482 
3483 	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3484 }
3485 
3486 static const struct net_device_ops fec_netdev_ops = {
3487 	.ndo_open		= fec_enet_open,
3488 	.ndo_stop		= fec_enet_close,
3489 	.ndo_start_xmit		= fec_enet_start_xmit,
3490 	.ndo_select_queue       = fec_enet_select_queue,
3491 	.ndo_set_rx_mode	= set_multicast_list,
3492 	.ndo_validate_addr	= eth_validate_addr,
3493 	.ndo_tx_timeout		= fec_timeout,
3494 	.ndo_set_mac_address	= fec_set_mac_address,
3495 	.ndo_eth_ioctl		= fec_enet_ioctl,
3496 #ifdef CONFIG_NET_POLL_CONTROLLER
3497 	.ndo_poll_controller	= fec_poll_controller,
3498 #endif
3499 	.ndo_set_features	= fec_set_features,
3500 };
3501 
3502 static const unsigned short offset_des_active_rxq[] = {
3503 	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3504 };
3505 
3506 static const unsigned short offset_des_active_txq[] = {
3507 	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3508 };
3509 
3510  /*
3511   * XXX:  We need to clean up on failure exits here.
3512   *
3513   */
3514 static int fec_enet_init(struct net_device *ndev)
3515 {
3516 	struct fec_enet_private *fep = netdev_priv(ndev);
3517 	struct bufdesc *cbd_base;
3518 	dma_addr_t bd_dma;
3519 	int bd_size;
3520 	unsigned int i;
3521 	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3522 			sizeof(struct bufdesc);
3523 	unsigned dsize_log2 = __fls(dsize);
3524 	int ret;
3525 
3526 	WARN_ON(dsize != (1 << dsize_log2));
3527 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3528 	fep->rx_align = 0xf;
3529 	fep->tx_align = 0xf;
3530 #else
3531 	fep->rx_align = 0x3;
3532 	fep->tx_align = 0x3;
3533 #endif
3534 
3535 	/* Check mask of the streaming and coherent API */
3536 	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
3537 	if (ret < 0) {
3538 		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
3539 		return ret;
3540 	}
3541 
3542 	ret = fec_enet_alloc_queue(ndev);
3543 	if (ret)
3544 		return ret;
3545 
3546 	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3547 
3548 	/* Allocate memory for buffer descriptors. */
3549 	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3550 				       GFP_KERNEL);
3551 	if (!cbd_base) {
3552 		ret = -ENOMEM;
3553 		goto free_queue_mem;
3554 	}
3555 
3556 	/* Get the Ethernet address */
3557 	ret = fec_get_mac(ndev);
3558 	if (ret)
3559 		goto free_queue_mem;
3560 
3561 	/* make sure MAC we just acquired is programmed into the hw */
3562 	fec_set_mac_address(ndev, NULL);
3563 
3564 	/* Set receive and transmit descriptor base. */
3565 	for (i = 0; i < fep->num_rx_queues; i++) {
3566 		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3567 		unsigned size = dsize * rxq->bd.ring_size;
3568 
3569 		rxq->bd.qid = i;
3570 		rxq->bd.base = cbd_base;
3571 		rxq->bd.cur = cbd_base;
3572 		rxq->bd.dma = bd_dma;
3573 		rxq->bd.dsize = dsize;
3574 		rxq->bd.dsize_log2 = dsize_log2;
3575 		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3576 		bd_dma += size;
3577 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3578 		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3579 	}
3580 
3581 	for (i = 0; i < fep->num_tx_queues; i++) {
3582 		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3583 		unsigned size = dsize * txq->bd.ring_size;
3584 
3585 		txq->bd.qid = i;
3586 		txq->bd.base = cbd_base;
3587 		txq->bd.cur = cbd_base;
3588 		txq->bd.dma = bd_dma;
3589 		txq->bd.dsize = dsize;
3590 		txq->bd.dsize_log2 = dsize_log2;
3591 		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3592 		bd_dma += size;
3593 		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3594 		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3595 	}
3596 
3597 
3598 	/* The FEC Ethernet specific entries in the device structure */
3599 	ndev->watchdog_timeo = TX_TIMEOUT;
3600 	ndev->netdev_ops = &fec_netdev_ops;
3601 	ndev->ethtool_ops = &fec_enet_ethtool_ops;
3602 
3603 	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3604 	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3605 
3606 	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3607 		/* enable hw VLAN support */
3608 		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3609 
3610 	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3611 		netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
3612 
3613 		/* enable hw accelerator */
3614 		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3615 				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3616 		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3617 	}
3618 
3619 	if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3620 		fep->tx_align = 0;
3621 		fep->rx_align = 0x3f;
3622 	}
3623 
3624 	ndev->hw_features = ndev->features;
3625 
3626 	fec_restart(ndev);
3627 
3628 	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
3629 		fec_enet_clear_ethtool_stats(ndev);
3630 	else
3631 		fec_enet_update_ethtool_stats(ndev);
3632 
3633 	return 0;
3634 
3635 free_queue_mem:
3636 	fec_enet_free_queue(ndev);
3637 	return ret;
3638 }
3639 
3640 #ifdef CONFIG_OF
3641 static int fec_reset_phy(struct platform_device *pdev)
3642 {
3643 	int err, phy_reset;
3644 	bool active_high = false;
3645 	int msec = 1, phy_post_delay = 0;
3646 	struct device_node *np = pdev->dev.of_node;
3647 
3648 	if (!np)
3649 		return 0;
3650 
3651 	err = of_property_read_u32(np, "phy-reset-duration", &msec);
3652 	/* A sane reset duration should not be longer than 1s */
3653 	if (!err && msec > 1000)
3654 		msec = 1;
3655 
3656 	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3657 	if (phy_reset == -EPROBE_DEFER)
3658 		return phy_reset;
3659 	else if (!gpio_is_valid(phy_reset))
3660 		return 0;
3661 
3662 	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3663 	/* valid reset duration should be less than 1s */
3664 	if (!err && phy_post_delay > 1000)
3665 		return -EINVAL;
3666 
3667 	active_high = of_property_read_bool(np, "phy-reset-active-high");
3668 
3669 	err = devm_gpio_request_one(&pdev->dev, phy_reset,
3670 			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3671 			"phy-reset");
3672 	if (err) {
3673 		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3674 		return err;
3675 	}
3676 
3677 	if (msec > 20)
3678 		msleep(msec);
3679 	else
3680 		usleep_range(msec * 1000, msec * 1000 + 1000);
3681 
3682 	gpio_set_value_cansleep(phy_reset, !active_high);
3683 
3684 	if (!phy_post_delay)
3685 		return 0;
3686 
3687 	if (phy_post_delay > 20)
3688 		msleep(phy_post_delay);
3689 	else
3690 		usleep_range(phy_post_delay * 1000,
3691 			     phy_post_delay * 1000 + 1000);
3692 
3693 	return 0;
3694 }
3695 #else /* CONFIG_OF */
3696 static int fec_reset_phy(struct platform_device *pdev)
3697 {
3698 	/*
3699 	 * In case of platform probe, the reset has been done
3700 	 * by machine code.
3701 	 */
3702 	return 0;
3703 }
3704 #endif /* CONFIG_OF */
3705 
3706 static void
3707 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3708 {
3709 	struct device_node *np = pdev->dev.of_node;
3710 
3711 	*num_tx = *num_rx = 1;
3712 
3713 	if (!np || !of_device_is_available(np))
3714 		return;
3715 
3716 	/* parse the num of tx and rx queues */
3717 	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3718 
3719 	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3720 
3721 	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3722 		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3723 			 *num_tx);
3724 		*num_tx = 1;
3725 		return;
3726 	}
3727 
3728 	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3729 		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3730 			 *num_rx);
3731 		*num_rx = 1;
3732 		return;
3733 	}
3734 
3735 }
3736 
3737 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
3738 {
3739 	int irq_cnt = platform_irq_count(pdev);
3740 
3741 	if (irq_cnt > FEC_IRQ_NUM)
3742 		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
3743 	else if (irq_cnt == 2)
3744 		irq_cnt = 1;	/* last for pps */
3745 	else if (irq_cnt <= 0)
3746 		irq_cnt = 1;	/* At least 1 irq is needed */
3747 	return irq_cnt;
3748 }
3749 
3750 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
3751 {
3752 	struct net_device *ndev = platform_get_drvdata(pdev);
3753 	struct fec_enet_private *fep = netdev_priv(ndev);
3754 
3755 	if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
3756 		fep->wake_irq = fep->irq[2];
3757 	else
3758 		fep->wake_irq = fep->irq[0];
3759 }
3760 
3761 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
3762 				   struct device_node *np)
3763 {
3764 	struct device_node *gpr_np;
3765 	u32 out_val[3];
3766 	int ret = 0;
3767 
3768 	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
3769 	if (!gpr_np)
3770 		return 0;
3771 
3772 	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
3773 					 ARRAY_SIZE(out_val));
3774 	if (ret) {
3775 		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
3776 		goto out;
3777 	}
3778 
3779 	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
3780 	if (IS_ERR(fep->stop_gpr.gpr)) {
3781 		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
3782 		ret = PTR_ERR(fep->stop_gpr.gpr);
3783 		fep->stop_gpr.gpr = NULL;
3784 		goto out;
3785 	}
3786 
3787 	fep->stop_gpr.reg = out_val[1];
3788 	fep->stop_gpr.bit = out_val[2];
3789 
3790 out:
3791 	of_node_put(gpr_np);
3792 
3793 	return ret;
3794 }
3795 
3796 static int
3797 fec_probe(struct platform_device *pdev)
3798 {
3799 	struct fec_enet_private *fep;
3800 	struct fec_platform_data *pdata;
3801 	phy_interface_t interface;
3802 	struct net_device *ndev;
3803 	int i, irq, ret = 0;
3804 	const struct of_device_id *of_id;
3805 	static int dev_id;
3806 	struct device_node *np = pdev->dev.of_node, *phy_node;
3807 	int num_tx_qs;
3808 	int num_rx_qs;
3809 	char irq_name[8];
3810 	int irq_cnt;
3811 	struct fec_devinfo *dev_info;
3812 
3813 	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3814 
3815 	/* Init network device */
3816 	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3817 				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3818 	if (!ndev)
3819 		return -ENOMEM;
3820 
3821 	SET_NETDEV_DEV(ndev, &pdev->dev);
3822 
3823 	/* setup board info structure */
3824 	fep = netdev_priv(ndev);
3825 
3826 	of_id = of_match_device(fec_dt_ids, &pdev->dev);
3827 	if (of_id)
3828 		pdev->id_entry = of_id->data;
3829 	dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
3830 	if (dev_info)
3831 		fep->quirks = dev_info->quirks;
3832 
3833 	fep->netdev = ndev;
3834 	fep->num_rx_queues = num_rx_qs;
3835 	fep->num_tx_queues = num_tx_qs;
3836 
3837 #if !defined(CONFIG_M5272)
3838 	/* default enable pause frame auto negotiation */
3839 	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3840 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3841 #endif
3842 
3843 	/* Select default pin state */
3844 	pinctrl_pm_select_default_state(&pdev->dev);
3845 
3846 	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
3847 	if (IS_ERR(fep->hwp)) {
3848 		ret = PTR_ERR(fep->hwp);
3849 		goto failed_ioremap;
3850 	}
3851 
3852 	fep->pdev = pdev;
3853 	fep->dev_id = dev_id++;
3854 
3855 	platform_set_drvdata(pdev, ndev);
3856 
3857 	if ((of_machine_is_compatible("fsl,imx6q") ||
3858 	     of_machine_is_compatible("fsl,imx6dl")) &&
3859 	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
3860 		fep->quirks |= FEC_QUIRK_ERR006687;
3861 
3862 	if (of_get_property(np, "fsl,magic-packet", NULL))
3863 		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3864 
3865 	ret = fec_enet_init_stop_mode(fep, np);
3866 	if (ret)
3867 		goto failed_stop_mode;
3868 
3869 	phy_node = of_parse_phandle(np, "phy-handle", 0);
3870 	if (!phy_node && of_phy_is_fixed_link(np)) {
3871 		ret = of_phy_register_fixed_link(np);
3872 		if (ret < 0) {
3873 			dev_err(&pdev->dev,
3874 				"broken fixed-link specification\n");
3875 			goto failed_phy;
3876 		}
3877 		phy_node = of_node_get(np);
3878 	}
3879 	fep->phy_node = phy_node;
3880 
3881 	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
3882 	if (ret) {
3883 		pdata = dev_get_platdata(&pdev->dev);
3884 		if (pdata)
3885 			fep->phy_interface = pdata->phy;
3886 		else
3887 			fep->phy_interface = PHY_INTERFACE_MODE_MII;
3888 	} else {
3889 		fep->phy_interface = interface;
3890 	}
3891 
3892 	ret = fec_enet_parse_rgmii_delay(fep, np);
3893 	if (ret)
3894 		goto failed_rgmii_delay;
3895 
3896 	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3897 	if (IS_ERR(fep->clk_ipg)) {
3898 		ret = PTR_ERR(fep->clk_ipg);
3899 		goto failed_clk;
3900 	}
3901 
3902 	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3903 	if (IS_ERR(fep->clk_ahb)) {
3904 		ret = PTR_ERR(fep->clk_ahb);
3905 		goto failed_clk;
3906 	}
3907 
3908 	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3909 
3910 	/* enet_out is optional, depends on board */
3911 	fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
3912 	if (IS_ERR(fep->clk_enet_out)) {
3913 		ret = PTR_ERR(fep->clk_enet_out);
3914 		goto failed_clk;
3915 	}
3916 
3917 	fep->ptp_clk_on = false;
3918 	spin_lock_init(&fep->tmreg_lock);
3919 
3920 	/* clk_ref is optional, depends on board */
3921 	fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
3922 	if (IS_ERR(fep->clk_ref)) {
3923 		ret = PTR_ERR(fep->clk_ref);
3924 		goto failed_clk;
3925 	}
3926 	fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
3927 
3928 	/* clk_2x_txclk is optional, depends on board */
3929 	if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
3930 		fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
3931 		if (IS_ERR(fep->clk_2x_txclk))
3932 			fep->clk_2x_txclk = NULL;
3933 	}
3934 
3935 	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3936 	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3937 	if (IS_ERR(fep->clk_ptp)) {
3938 		fep->clk_ptp = NULL;
3939 		fep->bufdesc_ex = false;
3940 	}
3941 
3942 	ret = fec_enet_clk_enable(ndev, true);
3943 	if (ret)
3944 		goto failed_clk;
3945 
3946 	ret = clk_prepare_enable(fep->clk_ipg);
3947 	if (ret)
3948 		goto failed_clk_ipg;
3949 	ret = clk_prepare_enable(fep->clk_ahb);
3950 	if (ret)
3951 		goto failed_clk_ahb;
3952 
3953 	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3954 	if (!IS_ERR(fep->reg_phy)) {
3955 		ret = regulator_enable(fep->reg_phy);
3956 		if (ret) {
3957 			dev_err(&pdev->dev,
3958 				"Failed to enable phy regulator: %d\n", ret);
3959 			goto failed_regulator;
3960 		}
3961 	} else {
3962 		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3963 			ret = -EPROBE_DEFER;
3964 			goto failed_regulator;
3965 		}
3966 		fep->reg_phy = NULL;
3967 	}
3968 
3969 	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3970 	pm_runtime_use_autosuspend(&pdev->dev);
3971 	pm_runtime_get_noresume(&pdev->dev);
3972 	pm_runtime_set_active(&pdev->dev);
3973 	pm_runtime_enable(&pdev->dev);
3974 
3975 	ret = fec_reset_phy(pdev);
3976 	if (ret)
3977 		goto failed_reset;
3978 
3979 	irq_cnt = fec_enet_get_irq_cnt(pdev);
3980 	if (fep->bufdesc_ex)
3981 		fec_ptp_init(pdev, irq_cnt);
3982 
3983 	ret = fec_enet_init(ndev);
3984 	if (ret)
3985 		goto failed_init;
3986 
3987 	for (i = 0; i < irq_cnt; i++) {
3988 		snprintf(irq_name, sizeof(irq_name), "int%d", i);
3989 		irq = platform_get_irq_byname_optional(pdev, irq_name);
3990 		if (irq < 0)
3991 			irq = platform_get_irq(pdev, i);
3992 		if (irq < 0) {
3993 			ret = irq;
3994 			goto failed_irq;
3995 		}
3996 		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3997 				       0, pdev->name, ndev);
3998 		if (ret)
3999 			goto failed_irq;
4000 
4001 		fep->irq[i] = irq;
4002 	}
4003 
4004 	/* Decide which interrupt line is wakeup capable */
4005 	fec_enet_get_wakeup_irq(pdev);
4006 
4007 	ret = fec_enet_mii_init(pdev);
4008 	if (ret)
4009 		goto failed_mii_init;
4010 
4011 	/* Carrier starts down, phylib will bring it up */
4012 	netif_carrier_off(ndev);
4013 	fec_enet_clk_enable(ndev, false);
4014 	pinctrl_pm_select_sleep_state(&pdev->dev);
4015 
4016 	ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4017 
4018 	ret = register_netdev(ndev);
4019 	if (ret)
4020 		goto failed_register;
4021 
4022 	device_init_wakeup(&ndev->dev, fep->wol_flag &
4023 			   FEC_WOL_HAS_MAGIC_PACKET);
4024 
4025 	if (fep->bufdesc_ex && fep->ptp_clock)
4026 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4027 
4028 	fep->rx_copybreak = COPYBREAK_DEFAULT;
4029 	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4030 
4031 	pm_runtime_mark_last_busy(&pdev->dev);
4032 	pm_runtime_put_autosuspend(&pdev->dev);
4033 
4034 	return 0;
4035 
4036 failed_register:
4037 	fec_enet_mii_remove(fep);
4038 failed_mii_init:
4039 failed_irq:
4040 failed_init:
4041 	fec_ptp_stop(pdev);
4042 failed_reset:
4043 	pm_runtime_put_noidle(&pdev->dev);
4044 	pm_runtime_disable(&pdev->dev);
4045 	if (fep->reg_phy)
4046 		regulator_disable(fep->reg_phy);
4047 failed_regulator:
4048 	clk_disable_unprepare(fep->clk_ahb);
4049 failed_clk_ahb:
4050 	clk_disable_unprepare(fep->clk_ipg);
4051 failed_clk_ipg:
4052 	fec_enet_clk_enable(ndev, false);
4053 failed_clk:
4054 failed_rgmii_delay:
4055 	if (of_phy_is_fixed_link(np))
4056 		of_phy_deregister_fixed_link(np);
4057 	of_node_put(phy_node);
4058 failed_stop_mode:
4059 failed_phy:
4060 	dev_id--;
4061 failed_ioremap:
4062 	free_netdev(ndev);
4063 
4064 	return ret;
4065 }
4066 
4067 static int
4068 fec_drv_remove(struct platform_device *pdev)
4069 {
4070 	struct net_device *ndev = platform_get_drvdata(pdev);
4071 	struct fec_enet_private *fep = netdev_priv(ndev);
4072 	struct device_node *np = pdev->dev.of_node;
4073 	int ret;
4074 
4075 	ret = pm_runtime_resume_and_get(&pdev->dev);
4076 	if (ret < 0)
4077 		return ret;
4078 
4079 	cancel_work_sync(&fep->tx_timeout_work);
4080 	fec_ptp_stop(pdev);
4081 	unregister_netdev(ndev);
4082 	fec_enet_mii_remove(fep);
4083 	if (fep->reg_phy)
4084 		regulator_disable(fep->reg_phy);
4085 
4086 	if (of_phy_is_fixed_link(np))
4087 		of_phy_deregister_fixed_link(np);
4088 	of_node_put(fep->phy_node);
4089 
4090 	clk_disable_unprepare(fep->clk_ahb);
4091 	clk_disable_unprepare(fep->clk_ipg);
4092 	pm_runtime_put_noidle(&pdev->dev);
4093 	pm_runtime_disable(&pdev->dev);
4094 
4095 	free_netdev(ndev);
4096 	return 0;
4097 }
4098 
4099 static int __maybe_unused fec_suspend(struct device *dev)
4100 {
4101 	struct net_device *ndev = dev_get_drvdata(dev);
4102 	struct fec_enet_private *fep = netdev_priv(ndev);
4103 
4104 	rtnl_lock();
4105 	if (netif_running(ndev)) {
4106 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4107 			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4108 		phy_stop(ndev->phydev);
4109 		napi_disable(&fep->napi);
4110 		netif_tx_lock_bh(ndev);
4111 		netif_device_detach(ndev);
4112 		netif_tx_unlock_bh(ndev);
4113 		fec_stop(ndev);
4114 		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4115 			fec_irqs_disable(ndev);
4116 			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4117 		} else {
4118 			fec_irqs_disable_except_wakeup(ndev);
4119 			if (fep->wake_irq > 0) {
4120 				disable_irq(fep->wake_irq);
4121 				enable_irq_wake(fep->wake_irq);
4122 			}
4123 			fec_enet_stop_mode(fep, true);
4124 		}
4125 		/* It's safe to disable clocks since interrupts are masked */
4126 		fec_enet_clk_enable(ndev, false);
4127 	}
4128 	rtnl_unlock();
4129 
4130 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4131 		regulator_disable(fep->reg_phy);
4132 
4133 	/* SOC supply clock to phy, when clock is disabled, phy link down
4134 	 * SOC control phy regulator, when regulator is disabled, phy link down
4135 	 */
4136 	if (fep->clk_enet_out || fep->reg_phy)
4137 		fep->link = 0;
4138 
4139 	return 0;
4140 }
4141 
4142 static int __maybe_unused fec_resume(struct device *dev)
4143 {
4144 	struct net_device *ndev = dev_get_drvdata(dev);
4145 	struct fec_enet_private *fep = netdev_priv(ndev);
4146 	int ret;
4147 	int val;
4148 
4149 	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4150 		ret = regulator_enable(fep->reg_phy);
4151 		if (ret)
4152 			return ret;
4153 	}
4154 
4155 	rtnl_lock();
4156 	if (netif_running(ndev)) {
4157 		ret = fec_enet_clk_enable(ndev, true);
4158 		if (ret) {
4159 			rtnl_unlock();
4160 			goto failed_clk;
4161 		}
4162 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4163 			fec_enet_stop_mode(fep, false);
4164 			if (fep->wake_irq) {
4165 				disable_irq_wake(fep->wake_irq);
4166 				enable_irq(fep->wake_irq);
4167 			}
4168 
4169 			val = readl(fep->hwp + FEC_ECNTRL);
4170 			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4171 			writel(val, fep->hwp + FEC_ECNTRL);
4172 			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4173 		} else {
4174 			pinctrl_pm_select_default_state(&fep->pdev->dev);
4175 		}
4176 		fec_restart(ndev);
4177 		netif_tx_lock_bh(ndev);
4178 		netif_device_attach(ndev);
4179 		netif_tx_unlock_bh(ndev);
4180 		napi_enable(&fep->napi);
4181 		phy_init_hw(ndev->phydev);
4182 		phy_start(ndev->phydev);
4183 	}
4184 	rtnl_unlock();
4185 
4186 	return 0;
4187 
4188 failed_clk:
4189 	if (fep->reg_phy)
4190 		regulator_disable(fep->reg_phy);
4191 	return ret;
4192 }
4193 
4194 static int __maybe_unused fec_runtime_suspend(struct device *dev)
4195 {
4196 	struct net_device *ndev = dev_get_drvdata(dev);
4197 	struct fec_enet_private *fep = netdev_priv(ndev);
4198 
4199 	clk_disable_unprepare(fep->clk_ahb);
4200 	clk_disable_unprepare(fep->clk_ipg);
4201 
4202 	return 0;
4203 }
4204 
4205 static int __maybe_unused fec_runtime_resume(struct device *dev)
4206 {
4207 	struct net_device *ndev = dev_get_drvdata(dev);
4208 	struct fec_enet_private *fep = netdev_priv(ndev);
4209 	int ret;
4210 
4211 	ret = clk_prepare_enable(fep->clk_ahb);
4212 	if (ret)
4213 		return ret;
4214 	ret = clk_prepare_enable(fep->clk_ipg);
4215 	if (ret)
4216 		goto failed_clk_ipg;
4217 
4218 	return 0;
4219 
4220 failed_clk_ipg:
4221 	clk_disable_unprepare(fep->clk_ahb);
4222 	return ret;
4223 }
4224 
4225 static const struct dev_pm_ops fec_pm_ops = {
4226 	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4227 	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4228 };
4229 
4230 static struct platform_driver fec_driver = {
4231 	.driver	= {
4232 		.name	= DRIVER_NAME,
4233 		.pm	= &fec_pm_ops,
4234 		.of_match_table = fec_dt_ids,
4235 		.suppress_bind_attrs = true,
4236 	},
4237 	.id_table = fec_devtype,
4238 	.probe	= fec_probe,
4239 	.remove	= fec_drv_remove,
4240 };
4241 
4242 module_platform_driver(fec_driver);
4243 
4244 MODULE_LICENSE("GPL");
4245