1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * sni_ave.c - Socionext UniPhier AVE ethernet driver
4  * Copyright 2014 Panasonic Corporation
5  * Copyright 2015-2017 Socionext Inc.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/mii.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_net.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_platform.h>
20 #include <linux/phy.h>
21 #include <linux/reset.h>
22 #include <linux/types.h>
23 #include <linux/u64_stats_sync.h>
24 
25 /* General Register Group */
26 #define AVE_IDR			0x000	/* ID */
27 #define AVE_VR			0x004	/* Version */
28 #define AVE_GRR			0x008	/* Global Reset */
29 #define AVE_CFGR		0x00c	/* Configuration */
30 
31 /* Interrupt Register Group */
32 #define AVE_GIMR		0x100	/* Global Interrupt Mask */
33 #define AVE_GISR		0x104	/* Global Interrupt Status */
34 
35 /* MAC Register Group */
36 #define AVE_TXCR		0x200	/* TX Setup */
37 #define AVE_RXCR		0x204	/* RX Setup */
38 #define AVE_RXMAC1R		0x208	/* MAC address (lower) */
39 #define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
40 #define AVE_MDIOCTR		0x214	/* MDIO Control */
41 #define AVE_MDIOAR		0x218	/* MDIO Address */
42 #define AVE_MDIOWDR		0x21c	/* MDIO Data */
43 #define AVE_MDIOSR		0x220	/* MDIO Status */
44 #define AVE_MDIORDR		0x224	/* MDIO Rd Data */
45 
46 /* Descriptor Control Register Group */
47 #define AVE_DESCC		0x300	/* Descriptor Control */
48 #define AVE_TXDC		0x304	/* TX Descriptor Configuration */
49 #define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
50 #define AVE_IIRQC		0x34c	/* Interval IRQ Control */
51 
52 /* Packet Filter Register Group */
53 #define AVE_PKTF_BASE		0x800	/* PF Base Address */
54 #define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
55 #define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
56 #define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
57 #define AVE_PFEN		0xffc	/* Packet Filter Enable */
58 #define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
59 #define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
60 #define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
61 #define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
62 
63 /* 64bit descriptor memory */
64 #define AVE_DESC_SIZE_64	12	/* Descriptor Size */
65 
66 #define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
67 #define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
68 
69 #define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
70 #define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
71 
72 /* 32bit descriptor memory */
73 #define AVE_DESC_SIZE_32	8	/* Descriptor Size */
74 
75 #define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
76 #define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
77 
78 #define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
79 #define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
80 
81 /* RMII Bridge Register Group */
82 #define AVE_RSTCTRL		0x8028	/* Reset control */
83 #define AVE_RSTCTRL_RMIIRST	BIT(16)
84 #define AVE_LINKSEL		0x8034	/* Link speed setting */
85 #define AVE_LINKSEL_100M	BIT(0)
86 
87 /* AVE_GRR */
88 #define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
89 #define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
90 #define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
91 
92 /* AVE_CFGR */
93 #define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
94 #define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
95 #define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
96 #define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
97 
98 /* AVE_GISR (common with GIMR) */
99 #define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
100 #define AVE_GI_TX		BIT(16)	/* Tx complete */
101 #define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
102 #define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
103 #define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
104 #define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
105 
106 /* AVE_TXCR */
107 #define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
108 #define AVE_TXCR_TXSPD_1G	BIT(17)
109 #define AVE_TXCR_TXSPD_100	BIT(16)
110 
111 /* AVE_RXCR */
112 #define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
113 #define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
114 #define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
115 #define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
116 #define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
117 #define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
118 
119 /* AVE_MDIOCTR */
120 #define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
121 #define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
122 
123 /* AVE_MDIOSR */
124 #define AVE_MDIOSR_STS		BIT(0)	/* access status */
125 
126 /* AVE_DESCC */
127 #define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
128 #define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
129 #define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
130 #define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
131 
132 /* AVE_TXDC */
133 #define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
134 #define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
135 #define AVE_TXDC_ADDR_START	0
136 
137 /* AVE_RXDC0 */
138 #define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
139 #define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
140 #define AVE_RXDC0_ADDR_START	0
141 
142 /* AVE_IIRQC */
143 #define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
144 #define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
145 
146 /* Command status for descriptor */
147 #define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
148 #define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
149 #define AVE_STS_OK		BIT(27)	/* Normal transmit */
150 /* TX */
151 #define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
152 #define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
153 #define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
154 #define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
155 #define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
156 #define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
157 /* RX */
158 #define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
159 #define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
160 #define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
161 
162 /* Packet filter */
163 #define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
164 #define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
165 #define AVE_PFMBIT_MASK		GENMASK(15, 0)
166 
167 #define AVE_PF_SIZE		17	/* Number of all packet filter */
168 #define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
169 
170 #define AVE_PFNUM_FILTER	0	/* No.0 */
171 #define AVE_PFNUM_UNICAST	1	/* No.1 */
172 #define AVE_PFNUM_BROADCAST	2	/* No.2 */
173 #define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
174 
175 /* NETIF Message control */
176 #define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
177 				 NETIF_MSG_PROBE  |	\
178 				 NETIF_MSG_LINK   |	\
179 				 NETIF_MSG_TIMER  |	\
180 				 NETIF_MSG_IFDOWN |	\
181 				 NETIF_MSG_IFUP   |	\
182 				 NETIF_MSG_RX_ERR |	\
183 				 NETIF_MSG_TX_ERR)
184 
185 /* Parameter for descriptor */
186 #define AVE_NR_TXDESC		32	/* Tx descriptor */
187 #define AVE_NR_RXDESC		64	/* Rx descriptor */
188 
189 #define AVE_DESC_OFS_CMDSTS	0
190 #define AVE_DESC_OFS_ADDRL	4
191 #define AVE_DESC_OFS_ADDRU	8
192 
193 /* Parameter for ethernet frame */
194 #define AVE_MAX_ETHFRAME	1518
195 
196 /* Parameter for interrupt */
197 #define AVE_INTM_COUNT		20
198 #define AVE_FORCE_TXINTCNT	1
199 
200 #define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
201 
202 enum desc_id {
203 	AVE_DESCID_RX,
204 	AVE_DESCID_TX,
205 };
206 
207 enum desc_state {
208 	AVE_DESC_RX_PERMIT,
209 	AVE_DESC_RX_SUSPEND,
210 	AVE_DESC_START,
211 	AVE_DESC_STOP,
212 };
213 
214 struct ave_desc {
215 	struct sk_buff	*skbs;
216 	dma_addr_t	skbs_dma;
217 	size_t		skbs_dmalen;
218 };
219 
220 struct ave_desc_info {
221 	u32	ndesc;		/* number of descriptor */
222 	u32	daddr;		/* start address of descriptor */
223 	u32	proc_idx;	/* index of processing packet */
224 	u32	done_idx;	/* index of processed packet */
225 	struct ave_desc *desc;	/* skb info related descriptor */
226 };
227 
228 struct ave_soc_data {
229 	bool	is_desc_64bit;
230 };
231 
232 struct ave_stats {
233 	struct	u64_stats_sync	syncp;
234 	u64	packets;
235 	u64	bytes;
236 	u64	errors;
237 	u64	dropped;
238 	u64	collisions;
239 	u64	fifo_errors;
240 };
241 
242 struct ave_private {
243 	void __iomem            *base;
244 	int                     irq;
245 	int			phy_id;
246 	unsigned int		desc_size;
247 	u32			msg_enable;
248 	struct clk		*clk;
249 	struct reset_control	*rst;
250 	phy_interface_t		phy_mode;
251 	struct phy_device	*phydev;
252 	struct mii_bus		*mdio;
253 
254 	/* stats */
255 	struct ave_stats	stats_rx;
256 	struct ave_stats	stats_tx;
257 
258 	/* NAPI support */
259 	struct net_device	*ndev;
260 	struct napi_struct	napi_rx;
261 	struct napi_struct	napi_tx;
262 
263 	/* descriptor */
264 	struct ave_desc_info	rx;
265 	struct ave_desc_info	tx;
266 
267 	/* flow control */
268 	int pause_auto;
269 	int pause_rx;
270 	int pause_tx;
271 
272 	const struct ave_soc_data *data;
273 };
274 
275 static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
276 			 int offset)
277 {
278 	struct ave_private *priv = netdev_priv(ndev);
279 	u32 addr;
280 
281 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
282 		+ entry * priv->desc_size + offset;
283 
284 	return readl(priv->base + addr);
285 }
286 
287 static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
288 				int entry)
289 {
290 	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
291 }
292 
293 static void ave_desc_write(struct net_device *ndev, enum desc_id id,
294 			   int entry, int offset, u32 val)
295 {
296 	struct ave_private *priv = netdev_priv(ndev);
297 	u32 addr;
298 
299 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
300 		+ entry * priv->desc_size + offset;
301 
302 	writel(val, priv->base + addr);
303 }
304 
305 static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
306 				  int entry, u32 val)
307 {
308 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
309 }
310 
311 static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
312 				int entry, dma_addr_t paddr)
313 {
314 	struct ave_private *priv = netdev_priv(ndev);
315 
316 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
317 		       lower_32_bits(paddr));
318 	if (IS_DESC_64BIT(priv))
319 		ave_desc_write(ndev, id,
320 			       entry, AVE_DESC_OFS_ADDRU,
321 			       upper_32_bits(paddr));
322 }
323 
324 static u32 ave_irq_disable_all(struct net_device *ndev)
325 {
326 	struct ave_private *priv = netdev_priv(ndev);
327 	u32 ret;
328 
329 	ret = readl(priv->base + AVE_GIMR);
330 	writel(0, priv->base + AVE_GIMR);
331 
332 	return ret;
333 }
334 
335 static void ave_irq_restore(struct net_device *ndev, u32 val)
336 {
337 	struct ave_private *priv = netdev_priv(ndev);
338 
339 	writel(val, priv->base + AVE_GIMR);
340 }
341 
342 static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
343 {
344 	struct ave_private *priv = netdev_priv(ndev);
345 
346 	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
347 	writel(bitflag, priv->base + AVE_GISR);
348 }
349 
350 static void ave_hw_write_macaddr(struct net_device *ndev,
351 				 const unsigned char *mac_addr,
352 				 int reg1, int reg2)
353 {
354 	struct ave_private *priv = netdev_priv(ndev);
355 
356 	writel(mac_addr[0] | mac_addr[1] << 8 |
357 	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
358 	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
359 }
360 
361 static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
362 {
363 	struct ave_private *priv = netdev_priv(ndev);
364 	u32 major, minor, vr;
365 
366 	vr = readl(priv->base + AVE_VR);
367 	major = (vr & GENMASK(15, 8)) >> 8;
368 	minor = (vr & GENMASK(7, 0));
369 	snprintf(buf, len, "v%u.%u", major, minor);
370 }
371 
372 static void ave_ethtool_get_drvinfo(struct net_device *ndev,
373 				    struct ethtool_drvinfo *info)
374 {
375 	struct device *dev = ndev->dev.parent;
376 
377 	strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
378 	strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
379 	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
380 }
381 
382 static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
383 {
384 	struct ave_private *priv = netdev_priv(ndev);
385 
386 	return priv->msg_enable;
387 }
388 
389 static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
390 {
391 	struct ave_private *priv = netdev_priv(ndev);
392 
393 	priv->msg_enable = val;
394 }
395 
396 static void ave_ethtool_get_wol(struct net_device *ndev,
397 				struct ethtool_wolinfo *wol)
398 {
399 	wol->supported = 0;
400 	wol->wolopts   = 0;
401 
402 	if (ndev->phydev)
403 		phy_ethtool_get_wol(ndev->phydev, wol);
404 }
405 
406 static int ave_ethtool_set_wol(struct net_device *ndev,
407 			       struct ethtool_wolinfo *wol)
408 {
409 	int ret;
410 
411 	if (!ndev->phydev ||
412 	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
413 		return -EOPNOTSUPP;
414 
415 	ret = phy_ethtool_set_wol(ndev->phydev, wol);
416 	if (!ret)
417 		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
418 
419 	return ret;
420 }
421 
422 static void ave_ethtool_get_pauseparam(struct net_device *ndev,
423 				       struct ethtool_pauseparam *pause)
424 {
425 	struct ave_private *priv = netdev_priv(ndev);
426 
427 	pause->autoneg  = priv->pause_auto;
428 	pause->rx_pause = priv->pause_rx;
429 	pause->tx_pause = priv->pause_tx;
430 }
431 
432 static int ave_ethtool_set_pauseparam(struct net_device *ndev,
433 				      struct ethtool_pauseparam *pause)
434 {
435 	struct ave_private *priv = netdev_priv(ndev);
436 	struct phy_device *phydev = ndev->phydev;
437 
438 	if (!phydev)
439 		return -EINVAL;
440 
441 	priv->pause_auto = pause->autoneg;
442 	priv->pause_rx   = pause->rx_pause;
443 	priv->pause_tx   = pause->tx_pause;
444 
445 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
446 	if (pause->rx_pause)
447 		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
448 	if (pause->tx_pause)
449 		phydev->advertising ^= ADVERTISED_Asym_Pause;
450 
451 	if (pause->autoneg) {
452 		if (netif_running(ndev))
453 			phy_start_aneg(phydev);
454 	}
455 
456 	return 0;
457 }
458 
459 static const struct ethtool_ops ave_ethtool_ops = {
460 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
461 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
462 	.get_drvinfo		= ave_ethtool_get_drvinfo,
463 	.nway_reset		= phy_ethtool_nway_reset,
464 	.get_link		= ethtool_op_get_link,
465 	.get_msglevel		= ave_ethtool_get_msglevel,
466 	.set_msglevel		= ave_ethtool_set_msglevel,
467 	.get_wol		= ave_ethtool_get_wol,
468 	.set_wol		= ave_ethtool_set_wol,
469 	.get_pauseparam         = ave_ethtool_get_pauseparam,
470 	.set_pauseparam         = ave_ethtool_set_pauseparam,
471 };
472 
473 static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
474 {
475 	struct net_device *ndev = bus->priv;
476 	struct ave_private *priv;
477 	u32 mdioctl, mdiosr;
478 	int ret;
479 
480 	priv = netdev_priv(ndev);
481 
482 	/* write address */
483 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
484 
485 	/* read request */
486 	mdioctl = readl(priv->base + AVE_MDIOCTR);
487 	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
488 	       priv->base + AVE_MDIOCTR);
489 
490 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
491 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
492 	if (ret) {
493 		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
494 			   phyid, regnum);
495 		return ret;
496 	}
497 
498 	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
499 }
500 
501 static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
502 			     u16 val)
503 {
504 	struct net_device *ndev = bus->priv;
505 	struct ave_private *priv;
506 	u32 mdioctl, mdiosr;
507 	int ret;
508 
509 	priv = netdev_priv(ndev);
510 
511 	/* write address */
512 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
513 
514 	/* write data */
515 	writel(val, priv->base + AVE_MDIOWDR);
516 
517 	/* write request */
518 	mdioctl = readl(priv->base + AVE_MDIOCTR);
519 	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
520 	       priv->base + AVE_MDIOCTR);
521 
522 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
523 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
524 	if (ret)
525 		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
526 			   phyid, regnum);
527 
528 	return ret;
529 }
530 
531 static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
532 		       void *ptr, size_t len, enum dma_data_direction dir,
533 		       dma_addr_t *paddr)
534 {
535 	dma_addr_t map_addr;
536 
537 	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
538 	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
539 		return -ENOMEM;
540 
541 	desc->skbs_dma = map_addr;
542 	desc->skbs_dmalen = len;
543 	*paddr = map_addr;
544 
545 	return 0;
546 }
547 
548 static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
549 			  enum dma_data_direction dir)
550 {
551 	if (!desc->skbs_dma)
552 		return;
553 
554 	dma_unmap_single(ndev->dev.parent,
555 			 desc->skbs_dma, desc->skbs_dmalen, dir);
556 	desc->skbs_dma = 0;
557 }
558 
559 /* Prepare Rx descriptor and memory */
560 static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
561 {
562 	struct ave_private *priv = netdev_priv(ndev);
563 	struct sk_buff *skb;
564 	dma_addr_t paddr;
565 	int ret;
566 
567 	skb = priv->rx.desc[entry].skbs;
568 	if (!skb) {
569 		skb = netdev_alloc_skb_ip_align(ndev,
570 						AVE_MAX_ETHFRAME);
571 		if (!skb) {
572 			netdev_err(ndev, "can't allocate skb for Rx\n");
573 			return -ENOMEM;
574 		}
575 	}
576 
577 	/* set disable to cmdsts */
578 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
579 			      AVE_STS_INTR | AVE_STS_OWN);
580 
581 	/* map Rx buffer
582 	 * Rx buffer set to the Rx descriptor has two restrictions:
583 	 * - Rx buffer address is 4 byte aligned.
584 	 * - Rx buffer begins with 2 byte headroom, and data will be put from
585 	 *   (buffer + 2).
586 	 * To satisfy this, specify the address to put back the buffer
587 	 * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
588 	 * and expand the map size by NET_IP_ALIGN.
589 	 */
590 	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
591 			  skb->data - NET_IP_ALIGN,
592 			  AVE_MAX_ETHFRAME + NET_IP_ALIGN,
593 			  DMA_FROM_DEVICE, &paddr);
594 	if (ret) {
595 		netdev_err(ndev, "can't map skb for Rx\n");
596 		dev_kfree_skb_any(skb);
597 		return ret;
598 	}
599 	priv->rx.desc[entry].skbs = skb;
600 
601 	/* set buffer pointer */
602 	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
603 
604 	/* set enable to cmdsts */
605 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
606 			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
607 
608 	return ret;
609 }
610 
611 /* Switch state of descriptor */
612 static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
613 {
614 	struct ave_private *priv = netdev_priv(ndev);
615 	int ret = 0;
616 	u32 val;
617 
618 	switch (state) {
619 	case AVE_DESC_START:
620 		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
621 		break;
622 
623 	case AVE_DESC_STOP:
624 		writel(0, priv->base + AVE_DESCC);
625 		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
626 				       150, 15000)) {
627 			netdev_err(ndev, "can't stop descriptor\n");
628 			ret = -EBUSY;
629 		}
630 		break;
631 
632 	case AVE_DESC_RX_SUSPEND:
633 		val = readl(priv->base + AVE_DESCC);
634 		val |= AVE_DESCC_RDSTP;
635 		val &= ~AVE_DESCC_STATUS_MASK;
636 		writel(val, priv->base + AVE_DESCC);
637 		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
638 				       val & (AVE_DESCC_RDSTP << 16),
639 				       150, 150000)) {
640 			netdev_err(ndev, "can't suspend descriptor\n");
641 			ret = -EBUSY;
642 		}
643 		break;
644 
645 	case AVE_DESC_RX_PERMIT:
646 		val = readl(priv->base + AVE_DESCC);
647 		val &= ~AVE_DESCC_RDSTP;
648 		val &= ~AVE_DESCC_STATUS_MASK;
649 		writel(val, priv->base + AVE_DESCC);
650 		break;
651 
652 	default:
653 		ret = -EINVAL;
654 		break;
655 	}
656 
657 	return ret;
658 }
659 
660 static int ave_tx_complete(struct net_device *ndev)
661 {
662 	struct ave_private *priv = netdev_priv(ndev);
663 	u32 proc_idx, done_idx, ndesc, cmdsts;
664 	unsigned int nr_freebuf = 0;
665 	unsigned int tx_packets = 0;
666 	unsigned int tx_bytes = 0;
667 
668 	proc_idx = priv->tx.proc_idx;
669 	done_idx = priv->tx.done_idx;
670 	ndesc    = priv->tx.ndesc;
671 
672 	/* free pre-stored skb from done_idx to proc_idx */
673 	while (proc_idx != done_idx) {
674 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
675 
676 		/* do nothing if owner is HW (==1 for Tx) */
677 		if (cmdsts & AVE_STS_OWN)
678 			break;
679 
680 		/* check Tx status and updates statistics */
681 		if (cmdsts & AVE_STS_OK) {
682 			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
683 			/* success */
684 			if (cmdsts & AVE_STS_LAST)
685 				tx_packets++;
686 		} else {
687 			/* error */
688 			if (cmdsts & AVE_STS_LAST) {
689 				priv->stats_tx.errors++;
690 				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
691 					priv->stats_tx.collisions++;
692 			}
693 		}
694 
695 		/* release skb */
696 		if (priv->tx.desc[done_idx].skbs) {
697 			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
698 				      DMA_TO_DEVICE);
699 			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
700 			priv->tx.desc[done_idx].skbs = NULL;
701 			nr_freebuf++;
702 		}
703 		done_idx = (done_idx + 1) % ndesc;
704 	}
705 
706 	priv->tx.done_idx = done_idx;
707 
708 	/* update stats */
709 	u64_stats_update_begin(&priv->stats_tx.syncp);
710 	priv->stats_tx.packets += tx_packets;
711 	priv->stats_tx.bytes   += tx_bytes;
712 	u64_stats_update_end(&priv->stats_tx.syncp);
713 
714 	/* wake queue for freeing buffer */
715 	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
716 		netif_wake_queue(ndev);
717 
718 	return nr_freebuf;
719 }
720 
721 static int ave_rx_receive(struct net_device *ndev, int num)
722 {
723 	struct ave_private *priv = netdev_priv(ndev);
724 	unsigned int rx_packets = 0;
725 	unsigned int rx_bytes = 0;
726 	u32 proc_idx, done_idx;
727 	struct sk_buff *skb;
728 	unsigned int pktlen;
729 	int restpkt, npkts;
730 	u32 ndesc, cmdsts;
731 
732 	proc_idx = priv->rx.proc_idx;
733 	done_idx = priv->rx.done_idx;
734 	ndesc    = priv->rx.ndesc;
735 	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
736 
737 	for (npkts = 0; npkts < num; npkts++) {
738 		/* we can't receive more packet, so fill desc quickly */
739 		if (--restpkt < 0)
740 			break;
741 
742 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
743 
744 		/* do nothing if owner is HW (==0 for Rx) */
745 		if (!(cmdsts & AVE_STS_OWN))
746 			break;
747 
748 		if (!(cmdsts & AVE_STS_OK)) {
749 			priv->stats_rx.errors++;
750 			proc_idx = (proc_idx + 1) % ndesc;
751 			continue;
752 		}
753 
754 		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
755 
756 		/* get skbuff for rx */
757 		skb = priv->rx.desc[proc_idx].skbs;
758 		priv->rx.desc[proc_idx].skbs = NULL;
759 
760 		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
761 
762 		skb->dev = ndev;
763 		skb_put(skb, pktlen);
764 		skb->protocol = eth_type_trans(skb, ndev);
765 
766 		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
767 			skb->ip_summed = CHECKSUM_UNNECESSARY;
768 
769 		rx_packets++;
770 		rx_bytes += pktlen;
771 
772 		netif_receive_skb(skb);
773 
774 		proc_idx = (proc_idx + 1) % ndesc;
775 	}
776 
777 	priv->rx.proc_idx = proc_idx;
778 
779 	/* update stats */
780 	u64_stats_update_begin(&priv->stats_rx.syncp);
781 	priv->stats_rx.packets += rx_packets;
782 	priv->stats_rx.bytes   += rx_bytes;
783 	u64_stats_update_end(&priv->stats_rx.syncp);
784 
785 	/* refill the Rx buffers */
786 	while (proc_idx != done_idx) {
787 		if (ave_rxdesc_prepare(ndev, done_idx))
788 			break;
789 		done_idx = (done_idx + 1) % ndesc;
790 	}
791 
792 	priv->rx.done_idx = done_idx;
793 
794 	return npkts;
795 }
796 
797 static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
798 {
799 	struct ave_private *priv;
800 	struct net_device *ndev;
801 	int num;
802 
803 	priv = container_of(napi, struct ave_private, napi_rx);
804 	ndev = priv->ndev;
805 
806 	num = ave_rx_receive(ndev, budget);
807 	if (num < budget) {
808 		napi_complete_done(napi, num);
809 
810 		/* enable Rx interrupt when NAPI finishes */
811 		ave_irq_enable(ndev, AVE_GI_RXIINT);
812 	}
813 
814 	return num;
815 }
816 
817 static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
818 {
819 	struct ave_private *priv;
820 	struct net_device *ndev;
821 	int num;
822 
823 	priv = container_of(napi, struct ave_private, napi_tx);
824 	ndev = priv->ndev;
825 
826 	num = ave_tx_complete(ndev);
827 	napi_complete(napi);
828 
829 	/* enable Tx interrupt when NAPI finishes */
830 	ave_irq_enable(ndev, AVE_GI_TX);
831 
832 	return num;
833 }
834 
835 static void ave_global_reset(struct net_device *ndev)
836 {
837 	struct ave_private *priv = netdev_priv(ndev);
838 	u32 val;
839 
840 	/* set config register */
841 	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
842 	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
843 		val |= AVE_CFGR_MII;
844 	writel(val, priv->base + AVE_CFGR);
845 
846 	/* reset RMII register */
847 	val = readl(priv->base + AVE_RSTCTRL);
848 	val &= ~AVE_RSTCTRL_RMIIRST;
849 	writel(val, priv->base + AVE_RSTCTRL);
850 
851 	/* assert reset */
852 	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
853 	msleep(20);
854 
855 	/* 1st, negate PHY reset only */
856 	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
857 	msleep(40);
858 
859 	/* negate reset */
860 	writel(0, priv->base + AVE_GRR);
861 	msleep(40);
862 
863 	/* negate RMII register */
864 	val = readl(priv->base + AVE_RSTCTRL);
865 	val |= AVE_RSTCTRL_RMIIRST;
866 	writel(val, priv->base + AVE_RSTCTRL);
867 
868 	ave_irq_disable_all(ndev);
869 }
870 
871 static void ave_rxfifo_reset(struct net_device *ndev)
872 {
873 	struct ave_private *priv = netdev_priv(ndev);
874 	u32 rxcr_org;
875 
876 	/* save and disable MAC receive op */
877 	rxcr_org = readl(priv->base + AVE_RXCR);
878 	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
879 
880 	/* suspend Rx descriptor */
881 	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
882 
883 	/* receive all packets before descriptor starts */
884 	ave_rx_receive(ndev, priv->rx.ndesc);
885 
886 	/* assert reset */
887 	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
888 	usleep_range(40, 50);
889 
890 	/* negate reset */
891 	writel(0, priv->base + AVE_GRR);
892 	usleep_range(10, 20);
893 
894 	/* negate interrupt status */
895 	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
896 
897 	/* permit descriptor */
898 	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
899 
900 	/* restore MAC reccieve op */
901 	writel(rxcr_org, priv->base + AVE_RXCR);
902 }
903 
904 static irqreturn_t ave_irq_handler(int irq, void *netdev)
905 {
906 	struct net_device *ndev = (struct net_device *)netdev;
907 	struct ave_private *priv = netdev_priv(ndev);
908 	u32 gimr_val, gisr_val;
909 
910 	gimr_val = ave_irq_disable_all(ndev);
911 
912 	/* get interrupt status */
913 	gisr_val = readl(priv->base + AVE_GISR);
914 
915 	/* PHY */
916 	if (gisr_val & AVE_GI_PHY)
917 		writel(AVE_GI_PHY, priv->base + AVE_GISR);
918 
919 	/* check exceeding packet */
920 	if (gisr_val & AVE_GI_RXERR) {
921 		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
922 		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
923 	}
924 
925 	gisr_val &= gimr_val;
926 	if (!gisr_val)
927 		goto exit_isr;
928 
929 	/* RxFIFO overflow */
930 	if (gisr_val & AVE_GI_RXOVF) {
931 		priv->stats_rx.fifo_errors++;
932 		ave_rxfifo_reset(ndev);
933 		goto exit_isr;
934 	}
935 
936 	/* Rx drop */
937 	if (gisr_val & AVE_GI_RXDROP) {
938 		priv->stats_rx.dropped++;
939 		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
940 	}
941 
942 	/* Rx interval */
943 	if (gisr_val & AVE_GI_RXIINT) {
944 		napi_schedule(&priv->napi_rx);
945 		/* still force to disable Rx interrupt until NAPI finishes */
946 		gimr_val &= ~AVE_GI_RXIINT;
947 	}
948 
949 	/* Tx completed */
950 	if (gisr_val & AVE_GI_TX) {
951 		napi_schedule(&priv->napi_tx);
952 		/* still force to disable Tx interrupt until NAPI finishes */
953 		gimr_val &= ~AVE_GI_TX;
954 	}
955 
956 exit_isr:
957 	ave_irq_restore(ndev, gimr_val);
958 
959 	return IRQ_HANDLED;
960 }
961 
962 static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
963 {
964 	struct ave_private *priv = netdev_priv(ndev);
965 	u32 val;
966 
967 	if (WARN_ON(entry > AVE_PF_SIZE))
968 		return -EINVAL;
969 
970 	val = readl(priv->base + AVE_PFEN);
971 	writel(val | BIT(entry), priv->base + AVE_PFEN);
972 
973 	return 0;
974 }
975 
976 static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
977 {
978 	struct ave_private *priv = netdev_priv(ndev);
979 	u32 val;
980 
981 	if (WARN_ON(entry > AVE_PF_SIZE))
982 		return -EINVAL;
983 
984 	val = readl(priv->base + AVE_PFEN);
985 	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
986 
987 	return 0;
988 }
989 
990 static int ave_pfsel_set_macaddr(struct net_device *ndev,
991 				 unsigned int entry,
992 				 const unsigned char *mac_addr,
993 				 unsigned int set_size)
994 {
995 	struct ave_private *priv = netdev_priv(ndev);
996 
997 	if (WARN_ON(entry > AVE_PF_SIZE))
998 		return -EINVAL;
999 	if (WARN_ON(set_size > 6))
1000 		return -EINVAL;
1001 
1002 	ave_pfsel_stop(ndev, entry);
1003 
1004 	/* set MAC address for the filter */
1005 	ave_hw_write_macaddr(ndev, mac_addr,
1006 			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1007 
1008 	/* set byte mask */
1009 	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1010 	       priv->base + AVE_PFMBYTE(entry));
1011 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1012 
1013 	/* set bit mask filter */
1014 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1015 
1016 	/* set selector to ring 0 */
1017 	writel(0, priv->base + AVE_PFSEL(entry));
1018 
1019 	/* restart filter */
1020 	ave_pfsel_start(ndev, entry);
1021 
1022 	return 0;
1023 }
1024 
1025 static void ave_pfsel_set_promisc(struct net_device *ndev,
1026 				  unsigned int entry, u32 rxring)
1027 {
1028 	struct ave_private *priv = netdev_priv(ndev);
1029 
1030 	if (WARN_ON(entry > AVE_PF_SIZE))
1031 		return;
1032 
1033 	ave_pfsel_stop(ndev, entry);
1034 
1035 	/* set byte mask */
1036 	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1037 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1038 
1039 	/* set bit mask filter */
1040 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1041 
1042 	/* set selector to rxring */
1043 	writel(rxring, priv->base + AVE_PFSEL(entry));
1044 
1045 	ave_pfsel_start(ndev, entry);
1046 }
1047 
1048 static void ave_pfsel_init(struct net_device *ndev)
1049 {
1050 	unsigned char bcast_mac[ETH_ALEN];
1051 	int i;
1052 
1053 	eth_broadcast_addr(bcast_mac);
1054 
1055 	for (i = 0; i < AVE_PF_SIZE; i++)
1056 		ave_pfsel_stop(ndev, i);
1057 
1058 	/* promiscious entry, select ring 0 */
1059 	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1060 
1061 	/* unicast entry */
1062 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1063 
1064 	/* broadcast entry */
1065 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1066 }
1067 
1068 static void ave_phy_adjust_link(struct net_device *ndev)
1069 {
1070 	struct ave_private *priv = netdev_priv(ndev);
1071 	struct phy_device *phydev = ndev->phydev;
1072 	u32 val, txcr, rxcr, rxcr_org;
1073 	u16 rmt_adv = 0, lcl_adv = 0;
1074 	u8 cap;
1075 
1076 	/* set RGMII speed */
1077 	val = readl(priv->base + AVE_TXCR);
1078 	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1079 
1080 	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1081 		val |= AVE_TXCR_TXSPD_1G;
1082 	else if (phydev->speed == SPEED_100)
1083 		val |= AVE_TXCR_TXSPD_100;
1084 
1085 	writel(val, priv->base + AVE_TXCR);
1086 
1087 	/* set RMII speed (100M/10M only) */
1088 	if (!phy_interface_is_rgmii(phydev)) {
1089 		val = readl(priv->base + AVE_LINKSEL);
1090 		if (phydev->speed == SPEED_10)
1091 			val &= ~AVE_LINKSEL_100M;
1092 		else
1093 			val |= AVE_LINKSEL_100M;
1094 		writel(val, priv->base + AVE_LINKSEL);
1095 	}
1096 
1097 	/* check current RXCR/TXCR */
1098 	rxcr = readl(priv->base + AVE_RXCR);
1099 	txcr = readl(priv->base + AVE_TXCR);
1100 	rxcr_org = rxcr;
1101 
1102 	if (phydev->duplex) {
1103 		rxcr |= AVE_RXCR_FDUPEN;
1104 
1105 		if (phydev->pause)
1106 			rmt_adv |= LPA_PAUSE_CAP;
1107 		if (phydev->asym_pause)
1108 			rmt_adv |= LPA_PAUSE_ASYM;
1109 		if (phydev->advertising & ADVERTISED_Pause)
1110 			lcl_adv |= ADVERTISE_PAUSE_CAP;
1111 		if (phydev->advertising & ADVERTISED_Asym_Pause)
1112 			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1113 
1114 		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1115 		if (cap & FLOW_CTRL_TX)
1116 			txcr |= AVE_TXCR_FLOCTR;
1117 		else
1118 			txcr &= ~AVE_TXCR_FLOCTR;
1119 		if (cap & FLOW_CTRL_RX)
1120 			rxcr |= AVE_RXCR_FLOCTR;
1121 		else
1122 			rxcr &= ~AVE_RXCR_FLOCTR;
1123 	} else {
1124 		rxcr &= ~AVE_RXCR_FDUPEN;
1125 		rxcr &= ~AVE_RXCR_FLOCTR;
1126 		txcr &= ~AVE_TXCR_FLOCTR;
1127 	}
1128 
1129 	if (rxcr_org != rxcr) {
1130 		/* disable Rx mac */
1131 		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1132 		/* change and enable TX/Rx mac */
1133 		writel(txcr, priv->base + AVE_TXCR);
1134 		writel(rxcr, priv->base + AVE_RXCR);
1135 	}
1136 
1137 	phy_print_status(phydev);
1138 }
1139 
1140 static void ave_macaddr_init(struct net_device *ndev)
1141 {
1142 	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1143 
1144 	/* pfsel unicast entry */
1145 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1146 }
1147 
1148 static int ave_init(struct net_device *ndev)
1149 {
1150 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1151 	struct ave_private *priv = netdev_priv(ndev);
1152 	struct device *dev = ndev->dev.parent;
1153 	struct device_node *np = dev->of_node;
1154 	struct device_node *mdio_np;
1155 	struct phy_device *phydev;
1156 	int ret;
1157 
1158 	/* enable clk because of hw access until ndo_open */
1159 	ret = clk_prepare_enable(priv->clk);
1160 	if (ret) {
1161 		dev_err(dev, "can't enable clock\n");
1162 		return ret;
1163 	}
1164 	ret = reset_control_deassert(priv->rst);
1165 	if (ret) {
1166 		dev_err(dev, "can't deassert reset\n");
1167 		goto out_clk_disable;
1168 	}
1169 
1170 	ave_global_reset(ndev);
1171 
1172 	mdio_np = of_get_child_by_name(np, "mdio");
1173 	if (!mdio_np) {
1174 		dev_err(dev, "mdio node not found\n");
1175 		ret = -EINVAL;
1176 		goto out_reset_assert;
1177 	}
1178 	ret = of_mdiobus_register(priv->mdio, mdio_np);
1179 	of_node_put(mdio_np);
1180 	if (ret) {
1181 		dev_err(dev, "failed to register mdiobus\n");
1182 		goto out_reset_assert;
1183 	}
1184 
1185 	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1186 	if (!phydev) {
1187 		dev_err(dev, "could not attach to PHY\n");
1188 		ret = -ENODEV;
1189 		goto out_mdio_unregister;
1190 	}
1191 
1192 	priv->phydev = phydev;
1193 
1194 	phy_ethtool_get_wol(phydev, &wol);
1195 	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1196 
1197 	if (!phy_interface_is_rgmii(phydev)) {
1198 		phydev->supported &= ~PHY_GBIT_FEATURES;
1199 		phydev->supported |= PHY_BASIC_FEATURES;
1200 	}
1201 	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1202 
1203 	phy_attached_info(phydev);
1204 
1205 	return 0;
1206 
1207 out_mdio_unregister:
1208 	mdiobus_unregister(priv->mdio);
1209 out_reset_assert:
1210 	reset_control_assert(priv->rst);
1211 out_clk_disable:
1212 	clk_disable_unprepare(priv->clk);
1213 
1214 	return ret;
1215 }
1216 
1217 static void ave_uninit(struct net_device *ndev)
1218 {
1219 	struct ave_private *priv = netdev_priv(ndev);
1220 
1221 	phy_disconnect(priv->phydev);
1222 	mdiobus_unregister(priv->mdio);
1223 
1224 	/* disable clk because of hw access after ndo_stop */
1225 	reset_control_assert(priv->rst);
1226 	clk_disable_unprepare(priv->clk);
1227 }
1228 
1229 static int ave_open(struct net_device *ndev)
1230 {
1231 	struct ave_private *priv = netdev_priv(ndev);
1232 	int entry;
1233 	int ret;
1234 	u32 val;
1235 
1236 	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1237 			  ndev);
1238 	if (ret)
1239 		return ret;
1240 
1241 	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1242 				GFP_KERNEL);
1243 	if (!priv->tx.desc) {
1244 		ret = -ENOMEM;
1245 		goto out_free_irq;
1246 	}
1247 
1248 	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1249 				GFP_KERNEL);
1250 	if (!priv->rx.desc) {
1251 		kfree(priv->tx.desc);
1252 		ret = -ENOMEM;
1253 		goto out_free_irq;
1254 	}
1255 
1256 	/* initialize Tx work and descriptor */
1257 	priv->tx.proc_idx = 0;
1258 	priv->tx.done_idx = 0;
1259 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1260 		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1261 		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1262 	}
1263 	writel(AVE_TXDC_ADDR_START |
1264 	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1265 	       priv->base + AVE_TXDC);
1266 
1267 	/* initialize Rx work and descriptor */
1268 	priv->rx.proc_idx = 0;
1269 	priv->rx.done_idx = 0;
1270 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1271 		if (ave_rxdesc_prepare(ndev, entry))
1272 			break;
1273 	}
1274 	writel(AVE_RXDC0_ADDR_START |
1275 	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1276 	       priv->base + AVE_RXDC0);
1277 
1278 	ave_desc_switch(ndev, AVE_DESC_START);
1279 
1280 	ave_pfsel_init(ndev);
1281 	ave_macaddr_init(ndev);
1282 
1283 	/* set Rx configuration */
1284 	/* full duplex, enable pause drop, enalbe flow control */
1285 	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1286 		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1287 	writel(val, priv->base + AVE_RXCR);
1288 
1289 	/* set Tx configuration */
1290 	/* enable flow control, disable loopback */
1291 	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1292 
1293 	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1294 	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1295 	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1296 	writel(val, priv->base + AVE_IIRQC);
1297 
1298 	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
1299 	ave_irq_restore(ndev, val);
1300 
1301 	napi_enable(&priv->napi_rx);
1302 	napi_enable(&priv->napi_tx);
1303 
1304 	phy_start(ndev->phydev);
1305 	phy_start_aneg(ndev->phydev);
1306 	netif_start_queue(ndev);
1307 
1308 	return 0;
1309 
1310 out_free_irq:
1311 	disable_irq(priv->irq);
1312 	free_irq(priv->irq, ndev);
1313 
1314 	return ret;
1315 }
1316 
1317 static int ave_stop(struct net_device *ndev)
1318 {
1319 	struct ave_private *priv = netdev_priv(ndev);
1320 	int entry;
1321 
1322 	ave_irq_disable_all(ndev);
1323 	disable_irq(priv->irq);
1324 	free_irq(priv->irq, ndev);
1325 
1326 	netif_tx_disable(ndev);
1327 	phy_stop(ndev->phydev);
1328 	napi_disable(&priv->napi_tx);
1329 	napi_disable(&priv->napi_rx);
1330 
1331 	ave_desc_switch(ndev, AVE_DESC_STOP);
1332 
1333 	/* free Tx buffer */
1334 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1335 		if (!priv->tx.desc[entry].skbs)
1336 			continue;
1337 
1338 		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1339 		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1340 		priv->tx.desc[entry].skbs = NULL;
1341 	}
1342 	priv->tx.proc_idx = 0;
1343 	priv->tx.done_idx = 0;
1344 
1345 	/* free Rx buffer */
1346 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1347 		if (!priv->rx.desc[entry].skbs)
1348 			continue;
1349 
1350 		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1351 		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1352 		priv->rx.desc[entry].skbs = NULL;
1353 	}
1354 	priv->rx.proc_idx = 0;
1355 	priv->rx.done_idx = 0;
1356 
1357 	kfree(priv->tx.desc);
1358 	kfree(priv->rx.desc);
1359 
1360 	return 0;
1361 }
1362 
1363 static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1364 {
1365 	struct ave_private *priv = netdev_priv(ndev);
1366 	u32 proc_idx, done_idx, ndesc, cmdsts;
1367 	int ret, freepkt;
1368 	dma_addr_t paddr;
1369 
1370 	proc_idx = priv->tx.proc_idx;
1371 	done_idx = priv->tx.done_idx;
1372 	ndesc = priv->tx.ndesc;
1373 	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1374 
1375 	/* stop queue when not enough entry */
1376 	if (unlikely(freepkt < 1)) {
1377 		netif_stop_queue(ndev);
1378 		return NETDEV_TX_BUSY;
1379 	}
1380 
1381 	/* add padding for short packet */
1382 	if (skb_put_padto(skb, ETH_ZLEN)) {
1383 		priv->stats_tx.dropped++;
1384 		return NETDEV_TX_OK;
1385 	}
1386 
1387 	/* map Tx buffer
1388 	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1389 	 */
1390 	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1391 			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1392 	if (ret) {
1393 		dev_kfree_skb_any(skb);
1394 		priv->stats_tx.dropped++;
1395 		return NETDEV_TX_OK;
1396 	}
1397 
1398 	priv->tx.desc[proc_idx].skbs = skb;
1399 
1400 	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1401 
1402 	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1403 		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1404 
1405 	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1406 	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1407 		cmdsts |= AVE_STS_INTR;
1408 
1409 	/* disable checksum calculation when skb doesn't calurate checksum */
1410 	if (skb->ip_summed == CHECKSUM_NONE ||
1411 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1412 		cmdsts |= AVE_STS_NOCSUM;
1413 
1414 	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1415 
1416 	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1417 
1418 	return NETDEV_TX_OK;
1419 }
1420 
1421 static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1422 {
1423 	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1424 }
1425 
1426 static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1427 static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1428 
1429 static void ave_set_rx_mode(struct net_device *ndev)
1430 {
1431 	struct ave_private *priv = netdev_priv(ndev);
1432 	struct netdev_hw_addr *hw_adr;
1433 	int count, mc_cnt;
1434 	u32 val;
1435 
1436 	/* MAC addr filter enable for promiscious mode */
1437 	mc_cnt = netdev_mc_count(ndev);
1438 	val = readl(priv->base + AVE_RXCR);
1439 	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1440 		val &= ~AVE_RXCR_AFEN;
1441 	else
1442 		val |= AVE_RXCR_AFEN;
1443 	writel(val, priv->base + AVE_RXCR);
1444 
1445 	/* set all multicast address */
1446 	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1447 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1448 				      v4multi_macadr, 1);
1449 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1450 				      v6multi_macadr, 1);
1451 	} else {
1452 		/* stop all multicast filter */
1453 		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1454 			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1455 
1456 		/* set multicast addresses */
1457 		count = 0;
1458 		netdev_for_each_mc_addr(hw_adr, ndev) {
1459 			if (count == mc_cnt)
1460 				break;
1461 			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1462 					      hw_adr->addr, 6);
1463 			count++;
1464 		}
1465 	}
1466 }
1467 
1468 static void ave_get_stats64(struct net_device *ndev,
1469 			    struct rtnl_link_stats64 *stats)
1470 {
1471 	struct ave_private *priv = netdev_priv(ndev);
1472 	unsigned int start;
1473 
1474 	do {
1475 		start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
1476 		stats->rx_packets = priv->stats_rx.packets;
1477 		stats->rx_bytes	  = priv->stats_rx.bytes;
1478 	} while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
1479 
1480 	do {
1481 		start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
1482 		stats->tx_packets = priv->stats_tx.packets;
1483 		stats->tx_bytes	  = priv->stats_tx.bytes;
1484 	} while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
1485 
1486 	stats->rx_errors      = priv->stats_rx.errors;
1487 	stats->tx_errors      = priv->stats_tx.errors;
1488 	stats->rx_dropped     = priv->stats_rx.dropped;
1489 	stats->tx_dropped     = priv->stats_tx.dropped;
1490 	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1491 	stats->collisions     = priv->stats_tx.collisions;
1492 }
1493 
1494 static int ave_set_mac_address(struct net_device *ndev, void *p)
1495 {
1496 	int ret = eth_mac_addr(ndev, p);
1497 
1498 	if (ret)
1499 		return ret;
1500 
1501 	ave_macaddr_init(ndev);
1502 
1503 	return 0;
1504 }
1505 
1506 static const struct net_device_ops ave_netdev_ops = {
1507 	.ndo_init		= ave_init,
1508 	.ndo_uninit		= ave_uninit,
1509 	.ndo_open		= ave_open,
1510 	.ndo_stop		= ave_stop,
1511 	.ndo_start_xmit		= ave_start_xmit,
1512 	.ndo_do_ioctl		= ave_ioctl,
1513 	.ndo_set_rx_mode	= ave_set_rx_mode,
1514 	.ndo_get_stats64	= ave_get_stats64,
1515 	.ndo_set_mac_address	= ave_set_mac_address,
1516 };
1517 
1518 static int ave_probe(struct platform_device *pdev)
1519 {
1520 	const struct ave_soc_data *data;
1521 	struct device *dev = &pdev->dev;
1522 	char buf[ETHTOOL_FWVERS_LEN];
1523 	phy_interface_t phy_mode;
1524 	struct ave_private *priv;
1525 	struct net_device *ndev;
1526 	struct device_node *np;
1527 	struct resource	*res;
1528 	const void *mac_addr;
1529 	void __iomem *base;
1530 	u64 dma_mask;
1531 	int irq, ret;
1532 	u32 ave_id;
1533 
1534 	data = of_device_get_match_data(dev);
1535 	if (WARN_ON(!data))
1536 		return -EINVAL;
1537 
1538 	np = dev->of_node;
1539 	phy_mode = of_get_phy_mode(np);
1540 	if (phy_mode < 0) {
1541 		dev_err(dev, "phy-mode not found\n");
1542 		return -EINVAL;
1543 	}
1544 	if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
1545 	    phy_mode != PHY_INTERFACE_MODE_RMII &&
1546 	    phy_mode != PHY_INTERFACE_MODE_MII) {
1547 		dev_err(dev, "phy-mode is invalid\n");
1548 		return -EINVAL;
1549 	}
1550 
1551 	irq = platform_get_irq(pdev, 0);
1552 	if (irq < 0) {
1553 		dev_err(dev, "IRQ not found\n");
1554 		return irq;
1555 	}
1556 
1557 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1558 	base = devm_ioremap_resource(dev, res);
1559 	if (IS_ERR(base))
1560 		return PTR_ERR(base);
1561 
1562 	ndev = alloc_etherdev(sizeof(struct ave_private));
1563 	if (!ndev) {
1564 		dev_err(dev, "can't allocate ethernet device\n");
1565 		return -ENOMEM;
1566 	}
1567 
1568 	ndev->netdev_ops = &ave_netdev_ops;
1569 	ndev->ethtool_ops = &ave_ethtool_ops;
1570 	SET_NETDEV_DEV(ndev, dev);
1571 
1572 	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1573 	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1574 
1575 	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1576 
1577 	mac_addr = of_get_mac_address(np);
1578 	if (mac_addr)
1579 		ether_addr_copy(ndev->dev_addr, mac_addr);
1580 
1581 	/* if the mac address is invalid, use random mac address */
1582 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1583 		eth_hw_addr_random(ndev);
1584 		dev_warn(dev, "Using random MAC address: %pM\n",
1585 			 ndev->dev_addr);
1586 	}
1587 
1588 	priv = netdev_priv(ndev);
1589 	priv->base = base;
1590 	priv->irq = irq;
1591 	priv->ndev = ndev;
1592 	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1593 	priv->phy_mode = phy_mode;
1594 	priv->data = data;
1595 
1596 	if (IS_DESC_64BIT(priv)) {
1597 		priv->desc_size = AVE_DESC_SIZE_64;
1598 		priv->tx.daddr  = AVE_TXDM_64;
1599 		priv->rx.daddr  = AVE_RXDM_64;
1600 		dma_mask = DMA_BIT_MASK(64);
1601 	} else {
1602 		priv->desc_size = AVE_DESC_SIZE_32;
1603 		priv->tx.daddr  = AVE_TXDM_32;
1604 		priv->rx.daddr  = AVE_RXDM_32;
1605 		dma_mask = DMA_BIT_MASK(32);
1606 	}
1607 	ret = dma_set_mask(dev, dma_mask);
1608 	if (ret)
1609 		goto out_free_netdev;
1610 
1611 	priv->tx.ndesc = AVE_NR_TXDESC;
1612 	priv->rx.ndesc = AVE_NR_RXDESC;
1613 
1614 	u64_stats_init(&priv->stats_tx.syncp);
1615 	u64_stats_init(&priv->stats_rx.syncp);
1616 
1617 	priv->clk = devm_clk_get(dev, NULL);
1618 	if (IS_ERR(priv->clk)) {
1619 		ret = PTR_ERR(priv->clk);
1620 		goto out_free_netdev;
1621 	}
1622 
1623 	priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
1624 	if (IS_ERR(priv->rst)) {
1625 		ret = PTR_ERR(priv->rst);
1626 		goto out_free_netdev;
1627 	}
1628 
1629 	priv->mdio = devm_mdiobus_alloc(dev);
1630 	if (!priv->mdio) {
1631 		ret = -ENOMEM;
1632 		goto out_free_netdev;
1633 	}
1634 	priv->mdio->priv = ndev;
1635 	priv->mdio->parent = dev;
1636 	priv->mdio->read = ave_mdiobus_read;
1637 	priv->mdio->write = ave_mdiobus_write;
1638 	priv->mdio->name = "uniphier-mdio";
1639 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1640 		 pdev->name, pdev->id);
1641 
1642 	/* Register as a NAPI supported driver */
1643 	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
1644 	netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
1645 			  priv->tx.ndesc);
1646 
1647 	platform_set_drvdata(pdev, ndev);
1648 
1649 	ret = register_netdev(ndev);
1650 	if (ret) {
1651 		dev_err(dev, "failed to register netdevice\n");
1652 		goto out_del_napi;
1653 	}
1654 
1655 	/* get ID and version */
1656 	ave_id = readl(priv->base + AVE_IDR);
1657 	ave_hw_read_version(ndev, buf, sizeof(buf));
1658 
1659 	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1660 		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1661 		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1662 		 buf, priv->irq, phy_modes(phy_mode));
1663 
1664 	return 0;
1665 
1666 out_del_napi:
1667 	netif_napi_del(&priv->napi_rx);
1668 	netif_napi_del(&priv->napi_tx);
1669 out_free_netdev:
1670 	free_netdev(ndev);
1671 
1672 	return ret;
1673 }
1674 
1675 static int ave_remove(struct platform_device *pdev)
1676 {
1677 	struct net_device *ndev = platform_get_drvdata(pdev);
1678 	struct ave_private *priv = netdev_priv(ndev);
1679 
1680 	unregister_netdev(ndev);
1681 	netif_napi_del(&priv->napi_rx);
1682 	netif_napi_del(&priv->napi_tx);
1683 	free_netdev(ndev);
1684 
1685 	return 0;
1686 }
1687 
1688 static const struct ave_soc_data ave_pro4_data = {
1689 	.is_desc_64bit = false,
1690 };
1691 
1692 static const struct ave_soc_data ave_pxs2_data = {
1693 	.is_desc_64bit = false,
1694 };
1695 
1696 static const struct ave_soc_data ave_ld11_data = {
1697 	.is_desc_64bit = false,
1698 };
1699 
1700 static const struct ave_soc_data ave_ld20_data = {
1701 	.is_desc_64bit = true,
1702 };
1703 
1704 static const struct of_device_id of_ave_match[] = {
1705 	{
1706 		.compatible = "socionext,uniphier-pro4-ave4",
1707 		.data = &ave_pro4_data,
1708 	},
1709 	{
1710 		.compatible = "socionext,uniphier-pxs2-ave4",
1711 		.data = &ave_pxs2_data,
1712 	},
1713 	{
1714 		.compatible = "socionext,uniphier-ld11-ave4",
1715 		.data = &ave_ld11_data,
1716 	},
1717 	{
1718 		.compatible = "socionext,uniphier-ld20-ave4",
1719 		.data = &ave_ld20_data,
1720 	},
1721 	{ /* Sentinel */ }
1722 };
1723 MODULE_DEVICE_TABLE(of, of_ave_match);
1724 
1725 static struct platform_driver ave_driver = {
1726 	.probe  = ave_probe,
1727 	.remove = ave_remove,
1728 	.driver	= {
1729 		.name = "ave",
1730 		.of_match_table	= of_ave_match,
1731 	},
1732 };
1733 module_platform_driver(ave_driver);
1734 
1735 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1736 MODULE_LICENSE("GPL v2");
1737