1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * sni_ave.c - Socionext UniPhier AVE ethernet driver
4  * Copyright 2014 Panasonic Corporation
5  * Copyright 2015-2017 Socionext Inc.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/mii.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/of_net.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_platform.h>
21 #include <linux/phy.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/types.h>
25 #include <linux/u64_stats_sync.h>
26 
27 /* General Register Group */
28 #define AVE_IDR			0x000	/* ID */
29 #define AVE_VR			0x004	/* Version */
30 #define AVE_GRR			0x008	/* Global Reset */
31 #define AVE_CFGR		0x00c	/* Configuration */
32 
33 /* Interrupt Register Group */
34 #define AVE_GIMR		0x100	/* Global Interrupt Mask */
35 #define AVE_GISR		0x104	/* Global Interrupt Status */
36 
37 /* MAC Register Group */
38 #define AVE_TXCR		0x200	/* TX Setup */
39 #define AVE_RXCR		0x204	/* RX Setup */
40 #define AVE_RXMAC1R		0x208	/* MAC address (lower) */
41 #define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
42 #define AVE_MDIOCTR		0x214	/* MDIO Control */
43 #define AVE_MDIOAR		0x218	/* MDIO Address */
44 #define AVE_MDIOWDR		0x21c	/* MDIO Data */
45 #define AVE_MDIOSR		0x220	/* MDIO Status */
46 #define AVE_MDIORDR		0x224	/* MDIO Rd Data */
47 
48 /* Descriptor Control Register Group */
49 #define AVE_DESCC		0x300	/* Descriptor Control */
50 #define AVE_TXDC		0x304	/* TX Descriptor Configuration */
51 #define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
52 #define AVE_IIRQC		0x34c	/* Interval IRQ Control */
53 
54 /* Packet Filter Register Group */
55 #define AVE_PKTF_BASE		0x800	/* PF Base Address */
56 #define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
57 #define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
58 #define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
59 #define AVE_PFEN		0xffc	/* Packet Filter Enable */
60 #define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
61 #define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
62 #define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
63 #define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
64 
65 /* 64bit descriptor memory */
66 #define AVE_DESC_SIZE_64	12	/* Descriptor Size */
67 
68 #define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
69 #define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
70 
71 #define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
72 #define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
73 
74 /* 32bit descriptor memory */
75 #define AVE_DESC_SIZE_32	8	/* Descriptor Size */
76 
77 #define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
78 #define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
79 
80 #define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
81 #define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
82 
83 /* RMII Bridge Register Group */
84 #define AVE_RSTCTRL		0x8028	/* Reset control */
85 #define AVE_RSTCTRL_RMIIRST	BIT(16)
86 #define AVE_LINKSEL		0x8034	/* Link speed setting */
87 #define AVE_LINKSEL_100M	BIT(0)
88 
89 /* AVE_GRR */
90 #define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
91 #define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
92 #define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
93 
94 /* AVE_CFGR */
95 #define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
96 #define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
97 #define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
98 #define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
99 
100 /* AVE_GISR (common with GIMR) */
101 #define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
102 #define AVE_GI_TX		BIT(16)	/* Tx complete */
103 #define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
104 #define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
105 #define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
106 #define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
107 
108 /* AVE_TXCR */
109 #define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
110 #define AVE_TXCR_TXSPD_1G	BIT(17)
111 #define AVE_TXCR_TXSPD_100	BIT(16)
112 
113 /* AVE_RXCR */
114 #define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
115 #define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
116 #define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
117 #define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
118 #define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
119 #define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
120 
121 /* AVE_MDIOCTR */
122 #define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
123 #define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
124 
125 /* AVE_MDIOSR */
126 #define AVE_MDIOSR_STS		BIT(0)	/* access status */
127 
128 /* AVE_DESCC */
129 #define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
130 #define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
131 #define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
132 #define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
133 
134 /* AVE_TXDC */
135 #define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
136 #define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
137 #define AVE_TXDC_ADDR_START	0
138 
139 /* AVE_RXDC0 */
140 #define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
141 #define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
142 #define AVE_RXDC0_ADDR_START	0
143 
144 /* AVE_IIRQC */
145 #define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
146 #define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
147 
148 /* Command status for descriptor */
149 #define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
150 #define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
151 #define AVE_STS_OK		BIT(27)	/* Normal transmit */
152 /* TX */
153 #define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
154 #define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
155 #define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
156 #define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
157 #define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
158 #define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
159 /* RX */
160 #define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
161 #define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
162 #define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
163 
164 /* Packet filter */
165 #define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
166 #define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
167 #define AVE_PFMBIT_MASK		GENMASK(15, 0)
168 
169 #define AVE_PF_SIZE		17	/* Number of all packet filter */
170 #define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
171 
172 #define AVE_PFNUM_FILTER	0	/* No.0 */
173 #define AVE_PFNUM_UNICAST	1	/* No.1 */
174 #define AVE_PFNUM_BROADCAST	2	/* No.2 */
175 #define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
176 
177 /* NETIF Message control */
178 #define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
179 				 NETIF_MSG_PROBE  |	\
180 				 NETIF_MSG_LINK   |	\
181 				 NETIF_MSG_TIMER  |	\
182 				 NETIF_MSG_IFDOWN |	\
183 				 NETIF_MSG_IFUP   |	\
184 				 NETIF_MSG_RX_ERR |	\
185 				 NETIF_MSG_TX_ERR)
186 
187 /* Parameter for descriptor */
188 #define AVE_NR_TXDESC		32	/* Tx descriptor */
189 #define AVE_NR_RXDESC		64	/* Rx descriptor */
190 
191 #define AVE_DESC_OFS_CMDSTS	0
192 #define AVE_DESC_OFS_ADDRL	4
193 #define AVE_DESC_OFS_ADDRU	8
194 
195 /* Parameter for ethernet frame */
196 #define AVE_MAX_ETHFRAME	1518
197 
198 /* Parameter for interrupt */
199 #define AVE_INTM_COUNT		20
200 #define AVE_FORCE_TXINTCNT	1
201 
202 /* SG */
203 #define SG_ETPINMODE		0x540
204 #define SG_ETPINMODE_EXTPHY	BIT(1)	/* for LD11 */
205 #define SG_ETPINMODE_RMII(ins)	BIT(ins)
206 
207 #define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
208 
209 #define AVE_MAX_CLKS		4
210 #define AVE_MAX_RSTS		2
211 
212 enum desc_id {
213 	AVE_DESCID_RX,
214 	AVE_DESCID_TX,
215 };
216 
217 enum desc_state {
218 	AVE_DESC_RX_PERMIT,
219 	AVE_DESC_RX_SUSPEND,
220 	AVE_DESC_START,
221 	AVE_DESC_STOP,
222 };
223 
224 struct ave_desc {
225 	struct sk_buff	*skbs;
226 	dma_addr_t	skbs_dma;
227 	size_t		skbs_dmalen;
228 };
229 
230 struct ave_desc_info {
231 	u32	ndesc;		/* number of descriptor */
232 	u32	daddr;		/* start address of descriptor */
233 	u32	proc_idx;	/* index of processing packet */
234 	u32	done_idx;	/* index of processed packet */
235 	struct ave_desc *desc;	/* skb info related descriptor */
236 };
237 
238 struct ave_stats {
239 	struct	u64_stats_sync	syncp;
240 	u64	packets;
241 	u64	bytes;
242 	u64	errors;
243 	u64	dropped;
244 	u64	collisions;
245 	u64	fifo_errors;
246 };
247 
248 struct ave_private {
249 	void __iomem            *base;
250 	int                     irq;
251 	int			phy_id;
252 	unsigned int		desc_size;
253 	u32			msg_enable;
254 	int			nclks;
255 	struct clk		*clk[AVE_MAX_CLKS];
256 	int			nrsts;
257 	struct reset_control	*rst[AVE_MAX_RSTS];
258 	phy_interface_t		phy_mode;
259 	struct phy_device	*phydev;
260 	struct mii_bus		*mdio;
261 	struct regmap		*regmap;
262 	unsigned int		pinmode_mask;
263 	unsigned int		pinmode_val;
264 
265 	/* stats */
266 	struct ave_stats	stats_rx;
267 	struct ave_stats	stats_tx;
268 
269 	/* NAPI support */
270 	struct net_device	*ndev;
271 	struct napi_struct	napi_rx;
272 	struct napi_struct	napi_tx;
273 
274 	/* descriptor */
275 	struct ave_desc_info	rx;
276 	struct ave_desc_info	tx;
277 
278 	/* flow control */
279 	int pause_auto;
280 	int pause_rx;
281 	int pause_tx;
282 
283 	const struct ave_soc_data *data;
284 };
285 
286 struct ave_soc_data {
287 	bool	is_desc_64bit;
288 	const char	*clock_names[AVE_MAX_CLKS];
289 	const char	*reset_names[AVE_MAX_RSTS];
290 	int	(*get_pinmode)(struct ave_private *priv,
291 			       phy_interface_t phy_mode, u32 arg);
292 };
293 
294 static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
295 			 int offset)
296 {
297 	struct ave_private *priv = netdev_priv(ndev);
298 	u32 addr;
299 
300 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
301 		+ entry * priv->desc_size + offset;
302 
303 	return readl(priv->base + addr);
304 }
305 
306 static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
307 				int entry)
308 {
309 	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
310 }
311 
312 static void ave_desc_write(struct net_device *ndev, enum desc_id id,
313 			   int entry, int offset, u32 val)
314 {
315 	struct ave_private *priv = netdev_priv(ndev);
316 	u32 addr;
317 
318 	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
319 		+ entry * priv->desc_size + offset;
320 
321 	writel(val, priv->base + addr);
322 }
323 
324 static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
325 				  int entry, u32 val)
326 {
327 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
328 }
329 
330 static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
331 				int entry, dma_addr_t paddr)
332 {
333 	struct ave_private *priv = netdev_priv(ndev);
334 
335 	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
336 		       lower_32_bits(paddr));
337 	if (IS_DESC_64BIT(priv))
338 		ave_desc_write(ndev, id,
339 			       entry, AVE_DESC_OFS_ADDRU,
340 			       upper_32_bits(paddr));
341 }
342 
343 static u32 ave_irq_disable_all(struct net_device *ndev)
344 {
345 	struct ave_private *priv = netdev_priv(ndev);
346 	u32 ret;
347 
348 	ret = readl(priv->base + AVE_GIMR);
349 	writel(0, priv->base + AVE_GIMR);
350 
351 	return ret;
352 }
353 
354 static void ave_irq_restore(struct net_device *ndev, u32 val)
355 {
356 	struct ave_private *priv = netdev_priv(ndev);
357 
358 	writel(val, priv->base + AVE_GIMR);
359 }
360 
361 static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
362 {
363 	struct ave_private *priv = netdev_priv(ndev);
364 
365 	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
366 	writel(bitflag, priv->base + AVE_GISR);
367 }
368 
369 static void ave_hw_write_macaddr(struct net_device *ndev,
370 				 const unsigned char *mac_addr,
371 				 int reg1, int reg2)
372 {
373 	struct ave_private *priv = netdev_priv(ndev);
374 
375 	writel(mac_addr[0] | mac_addr[1] << 8 |
376 	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
377 	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
378 }
379 
380 static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
381 {
382 	struct ave_private *priv = netdev_priv(ndev);
383 	u32 major, minor, vr;
384 
385 	vr = readl(priv->base + AVE_VR);
386 	major = (vr & GENMASK(15, 8)) >> 8;
387 	minor = (vr & GENMASK(7, 0));
388 	snprintf(buf, len, "v%u.%u", major, minor);
389 }
390 
391 static void ave_ethtool_get_drvinfo(struct net_device *ndev,
392 				    struct ethtool_drvinfo *info)
393 {
394 	struct device *dev = ndev->dev.parent;
395 
396 	strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
397 	strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
398 	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
399 }
400 
401 static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
402 {
403 	struct ave_private *priv = netdev_priv(ndev);
404 
405 	return priv->msg_enable;
406 }
407 
408 static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
409 {
410 	struct ave_private *priv = netdev_priv(ndev);
411 
412 	priv->msg_enable = val;
413 }
414 
415 static void ave_ethtool_get_wol(struct net_device *ndev,
416 				struct ethtool_wolinfo *wol)
417 {
418 	wol->supported = 0;
419 	wol->wolopts   = 0;
420 
421 	if (ndev->phydev)
422 		phy_ethtool_get_wol(ndev->phydev, wol);
423 }
424 
425 static int ave_ethtool_set_wol(struct net_device *ndev,
426 			       struct ethtool_wolinfo *wol)
427 {
428 	int ret;
429 
430 	if (!ndev->phydev ||
431 	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
432 		return -EOPNOTSUPP;
433 
434 	ret = phy_ethtool_set_wol(ndev->phydev, wol);
435 	if (!ret)
436 		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
437 
438 	return ret;
439 }
440 
441 static void ave_ethtool_get_pauseparam(struct net_device *ndev,
442 				       struct ethtool_pauseparam *pause)
443 {
444 	struct ave_private *priv = netdev_priv(ndev);
445 
446 	pause->autoneg  = priv->pause_auto;
447 	pause->rx_pause = priv->pause_rx;
448 	pause->tx_pause = priv->pause_tx;
449 }
450 
451 static int ave_ethtool_set_pauseparam(struct net_device *ndev,
452 				      struct ethtool_pauseparam *pause)
453 {
454 	struct ave_private *priv = netdev_priv(ndev);
455 	struct phy_device *phydev = ndev->phydev;
456 
457 	if (!phydev)
458 		return -EINVAL;
459 
460 	priv->pause_auto = pause->autoneg;
461 	priv->pause_rx   = pause->rx_pause;
462 	priv->pause_tx   = pause->tx_pause;
463 
464 	phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
465 
466 	return 0;
467 }
468 
469 static const struct ethtool_ops ave_ethtool_ops = {
470 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
471 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
472 	.get_drvinfo		= ave_ethtool_get_drvinfo,
473 	.nway_reset		= phy_ethtool_nway_reset,
474 	.get_link		= ethtool_op_get_link,
475 	.get_msglevel		= ave_ethtool_get_msglevel,
476 	.set_msglevel		= ave_ethtool_set_msglevel,
477 	.get_wol		= ave_ethtool_get_wol,
478 	.set_wol		= ave_ethtool_set_wol,
479 	.get_pauseparam         = ave_ethtool_get_pauseparam,
480 	.set_pauseparam         = ave_ethtool_set_pauseparam,
481 };
482 
483 static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
484 {
485 	struct net_device *ndev = bus->priv;
486 	struct ave_private *priv;
487 	u32 mdioctl, mdiosr;
488 	int ret;
489 
490 	priv = netdev_priv(ndev);
491 
492 	/* write address */
493 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
494 
495 	/* read request */
496 	mdioctl = readl(priv->base + AVE_MDIOCTR);
497 	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
498 	       priv->base + AVE_MDIOCTR);
499 
500 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
501 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
502 	if (ret) {
503 		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
504 			   phyid, regnum);
505 		return ret;
506 	}
507 
508 	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
509 }
510 
511 static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
512 			     u16 val)
513 {
514 	struct net_device *ndev = bus->priv;
515 	struct ave_private *priv;
516 	u32 mdioctl, mdiosr;
517 	int ret;
518 
519 	priv = netdev_priv(ndev);
520 
521 	/* write address */
522 	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
523 
524 	/* write data */
525 	writel(val, priv->base + AVE_MDIOWDR);
526 
527 	/* write request */
528 	mdioctl = readl(priv->base + AVE_MDIOCTR);
529 	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
530 	       priv->base + AVE_MDIOCTR);
531 
532 	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
533 				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
534 	if (ret)
535 		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
536 			   phyid, regnum);
537 
538 	return ret;
539 }
540 
541 static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
542 		       void *ptr, size_t len, enum dma_data_direction dir,
543 		       dma_addr_t *paddr)
544 {
545 	dma_addr_t map_addr;
546 
547 	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
548 	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
549 		return -ENOMEM;
550 
551 	desc->skbs_dma = map_addr;
552 	desc->skbs_dmalen = len;
553 	*paddr = map_addr;
554 
555 	return 0;
556 }
557 
558 static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
559 			  enum dma_data_direction dir)
560 {
561 	if (!desc->skbs_dma)
562 		return;
563 
564 	dma_unmap_single(ndev->dev.parent,
565 			 desc->skbs_dma, desc->skbs_dmalen, dir);
566 	desc->skbs_dma = 0;
567 }
568 
569 /* Prepare Rx descriptor and memory */
570 static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
571 {
572 	struct ave_private *priv = netdev_priv(ndev);
573 	struct sk_buff *skb;
574 	dma_addr_t paddr;
575 	int ret;
576 
577 	skb = priv->rx.desc[entry].skbs;
578 	if (!skb) {
579 		skb = netdev_alloc_skb_ip_align(ndev,
580 						AVE_MAX_ETHFRAME);
581 		if (!skb) {
582 			netdev_err(ndev, "can't allocate skb for Rx\n");
583 			return -ENOMEM;
584 		}
585 	}
586 
587 	/* set disable to cmdsts */
588 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
589 			      AVE_STS_INTR | AVE_STS_OWN);
590 
591 	/* map Rx buffer
592 	 * Rx buffer set to the Rx descriptor has two restrictions:
593 	 * - Rx buffer address is 4 byte aligned.
594 	 * - Rx buffer begins with 2 byte headroom, and data will be put from
595 	 *   (buffer + 2).
596 	 * To satisfy this, specify the address to put back the buffer
597 	 * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
598 	 * and expand the map size by NET_IP_ALIGN.
599 	 */
600 	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
601 			  skb->data - NET_IP_ALIGN,
602 			  AVE_MAX_ETHFRAME + NET_IP_ALIGN,
603 			  DMA_FROM_DEVICE, &paddr);
604 	if (ret) {
605 		netdev_err(ndev, "can't map skb for Rx\n");
606 		dev_kfree_skb_any(skb);
607 		return ret;
608 	}
609 	priv->rx.desc[entry].skbs = skb;
610 
611 	/* set buffer pointer */
612 	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
613 
614 	/* set enable to cmdsts */
615 	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
616 			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
617 
618 	return ret;
619 }
620 
621 /* Switch state of descriptor */
622 static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
623 {
624 	struct ave_private *priv = netdev_priv(ndev);
625 	int ret = 0;
626 	u32 val;
627 
628 	switch (state) {
629 	case AVE_DESC_START:
630 		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
631 		break;
632 
633 	case AVE_DESC_STOP:
634 		writel(0, priv->base + AVE_DESCC);
635 		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
636 				       150, 15000)) {
637 			netdev_err(ndev, "can't stop descriptor\n");
638 			ret = -EBUSY;
639 		}
640 		break;
641 
642 	case AVE_DESC_RX_SUSPEND:
643 		val = readl(priv->base + AVE_DESCC);
644 		val |= AVE_DESCC_RDSTP;
645 		val &= ~AVE_DESCC_STATUS_MASK;
646 		writel(val, priv->base + AVE_DESCC);
647 		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
648 				       val & (AVE_DESCC_RDSTP << 16),
649 				       150, 150000)) {
650 			netdev_err(ndev, "can't suspend descriptor\n");
651 			ret = -EBUSY;
652 		}
653 		break;
654 
655 	case AVE_DESC_RX_PERMIT:
656 		val = readl(priv->base + AVE_DESCC);
657 		val &= ~AVE_DESCC_RDSTP;
658 		val &= ~AVE_DESCC_STATUS_MASK;
659 		writel(val, priv->base + AVE_DESCC);
660 		break;
661 
662 	default:
663 		ret = -EINVAL;
664 		break;
665 	}
666 
667 	return ret;
668 }
669 
670 static int ave_tx_complete(struct net_device *ndev)
671 {
672 	struct ave_private *priv = netdev_priv(ndev);
673 	u32 proc_idx, done_idx, ndesc, cmdsts;
674 	unsigned int nr_freebuf = 0;
675 	unsigned int tx_packets = 0;
676 	unsigned int tx_bytes = 0;
677 
678 	proc_idx = priv->tx.proc_idx;
679 	done_idx = priv->tx.done_idx;
680 	ndesc    = priv->tx.ndesc;
681 
682 	/* free pre-stored skb from done_idx to proc_idx */
683 	while (proc_idx != done_idx) {
684 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
685 
686 		/* do nothing if owner is HW (==1 for Tx) */
687 		if (cmdsts & AVE_STS_OWN)
688 			break;
689 
690 		/* check Tx status and updates statistics */
691 		if (cmdsts & AVE_STS_OK) {
692 			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
693 			/* success */
694 			if (cmdsts & AVE_STS_LAST)
695 				tx_packets++;
696 		} else {
697 			/* error */
698 			if (cmdsts & AVE_STS_LAST) {
699 				priv->stats_tx.errors++;
700 				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
701 					priv->stats_tx.collisions++;
702 			}
703 		}
704 
705 		/* release skb */
706 		if (priv->tx.desc[done_idx].skbs) {
707 			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
708 				      DMA_TO_DEVICE);
709 			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
710 			priv->tx.desc[done_idx].skbs = NULL;
711 			nr_freebuf++;
712 		}
713 		done_idx = (done_idx + 1) % ndesc;
714 	}
715 
716 	priv->tx.done_idx = done_idx;
717 
718 	/* update stats */
719 	u64_stats_update_begin(&priv->stats_tx.syncp);
720 	priv->stats_tx.packets += tx_packets;
721 	priv->stats_tx.bytes   += tx_bytes;
722 	u64_stats_update_end(&priv->stats_tx.syncp);
723 
724 	/* wake queue for freeing buffer */
725 	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
726 		netif_wake_queue(ndev);
727 
728 	return nr_freebuf;
729 }
730 
731 static int ave_rx_receive(struct net_device *ndev, int num)
732 {
733 	struct ave_private *priv = netdev_priv(ndev);
734 	unsigned int rx_packets = 0;
735 	unsigned int rx_bytes = 0;
736 	u32 proc_idx, done_idx;
737 	struct sk_buff *skb;
738 	unsigned int pktlen;
739 	int restpkt, npkts;
740 	u32 ndesc, cmdsts;
741 
742 	proc_idx = priv->rx.proc_idx;
743 	done_idx = priv->rx.done_idx;
744 	ndesc    = priv->rx.ndesc;
745 	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
746 
747 	for (npkts = 0; npkts < num; npkts++) {
748 		/* we can't receive more packet, so fill desc quickly */
749 		if (--restpkt < 0)
750 			break;
751 
752 		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
753 
754 		/* do nothing if owner is HW (==0 for Rx) */
755 		if (!(cmdsts & AVE_STS_OWN))
756 			break;
757 
758 		if (!(cmdsts & AVE_STS_OK)) {
759 			priv->stats_rx.errors++;
760 			proc_idx = (proc_idx + 1) % ndesc;
761 			continue;
762 		}
763 
764 		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
765 
766 		/* get skbuff for rx */
767 		skb = priv->rx.desc[proc_idx].skbs;
768 		priv->rx.desc[proc_idx].skbs = NULL;
769 
770 		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
771 
772 		skb->dev = ndev;
773 		skb_put(skb, pktlen);
774 		skb->protocol = eth_type_trans(skb, ndev);
775 
776 		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
777 			skb->ip_summed = CHECKSUM_UNNECESSARY;
778 
779 		rx_packets++;
780 		rx_bytes += pktlen;
781 
782 		netif_receive_skb(skb);
783 
784 		proc_idx = (proc_idx + 1) % ndesc;
785 	}
786 
787 	priv->rx.proc_idx = proc_idx;
788 
789 	/* update stats */
790 	u64_stats_update_begin(&priv->stats_rx.syncp);
791 	priv->stats_rx.packets += rx_packets;
792 	priv->stats_rx.bytes   += rx_bytes;
793 	u64_stats_update_end(&priv->stats_rx.syncp);
794 
795 	/* refill the Rx buffers */
796 	while (proc_idx != done_idx) {
797 		if (ave_rxdesc_prepare(ndev, done_idx))
798 			break;
799 		done_idx = (done_idx + 1) % ndesc;
800 	}
801 
802 	priv->rx.done_idx = done_idx;
803 
804 	return npkts;
805 }
806 
807 static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
808 {
809 	struct ave_private *priv;
810 	struct net_device *ndev;
811 	int num;
812 
813 	priv = container_of(napi, struct ave_private, napi_rx);
814 	ndev = priv->ndev;
815 
816 	num = ave_rx_receive(ndev, budget);
817 	if (num < budget) {
818 		napi_complete_done(napi, num);
819 
820 		/* enable Rx interrupt when NAPI finishes */
821 		ave_irq_enable(ndev, AVE_GI_RXIINT);
822 	}
823 
824 	return num;
825 }
826 
827 static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
828 {
829 	struct ave_private *priv;
830 	struct net_device *ndev;
831 	int num;
832 
833 	priv = container_of(napi, struct ave_private, napi_tx);
834 	ndev = priv->ndev;
835 
836 	num = ave_tx_complete(ndev);
837 	napi_complete(napi);
838 
839 	/* enable Tx interrupt when NAPI finishes */
840 	ave_irq_enable(ndev, AVE_GI_TX);
841 
842 	return num;
843 }
844 
845 static void ave_global_reset(struct net_device *ndev)
846 {
847 	struct ave_private *priv = netdev_priv(ndev);
848 	u32 val;
849 
850 	/* set config register */
851 	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
852 	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
853 		val |= AVE_CFGR_MII;
854 	writel(val, priv->base + AVE_CFGR);
855 
856 	/* reset RMII register */
857 	val = readl(priv->base + AVE_RSTCTRL);
858 	val &= ~AVE_RSTCTRL_RMIIRST;
859 	writel(val, priv->base + AVE_RSTCTRL);
860 
861 	/* assert reset */
862 	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
863 	msleep(20);
864 
865 	/* 1st, negate PHY reset only */
866 	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
867 	msleep(40);
868 
869 	/* negate reset */
870 	writel(0, priv->base + AVE_GRR);
871 	msleep(40);
872 
873 	/* negate RMII register */
874 	val = readl(priv->base + AVE_RSTCTRL);
875 	val |= AVE_RSTCTRL_RMIIRST;
876 	writel(val, priv->base + AVE_RSTCTRL);
877 
878 	ave_irq_disable_all(ndev);
879 }
880 
881 static void ave_rxfifo_reset(struct net_device *ndev)
882 {
883 	struct ave_private *priv = netdev_priv(ndev);
884 	u32 rxcr_org;
885 
886 	/* save and disable MAC receive op */
887 	rxcr_org = readl(priv->base + AVE_RXCR);
888 	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
889 
890 	/* suspend Rx descriptor */
891 	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
892 
893 	/* receive all packets before descriptor starts */
894 	ave_rx_receive(ndev, priv->rx.ndesc);
895 
896 	/* assert reset */
897 	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
898 	udelay(50);
899 
900 	/* negate reset */
901 	writel(0, priv->base + AVE_GRR);
902 	udelay(20);
903 
904 	/* negate interrupt status */
905 	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
906 
907 	/* permit descriptor */
908 	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
909 
910 	/* restore MAC reccieve op */
911 	writel(rxcr_org, priv->base + AVE_RXCR);
912 }
913 
914 static irqreturn_t ave_irq_handler(int irq, void *netdev)
915 {
916 	struct net_device *ndev = (struct net_device *)netdev;
917 	struct ave_private *priv = netdev_priv(ndev);
918 	u32 gimr_val, gisr_val;
919 
920 	gimr_val = ave_irq_disable_all(ndev);
921 
922 	/* get interrupt status */
923 	gisr_val = readl(priv->base + AVE_GISR);
924 
925 	/* PHY */
926 	if (gisr_val & AVE_GI_PHY)
927 		writel(AVE_GI_PHY, priv->base + AVE_GISR);
928 
929 	/* check exceeding packet */
930 	if (gisr_val & AVE_GI_RXERR) {
931 		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
932 		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
933 	}
934 
935 	gisr_val &= gimr_val;
936 	if (!gisr_val)
937 		goto exit_isr;
938 
939 	/* RxFIFO overflow */
940 	if (gisr_val & AVE_GI_RXOVF) {
941 		priv->stats_rx.fifo_errors++;
942 		ave_rxfifo_reset(ndev);
943 		goto exit_isr;
944 	}
945 
946 	/* Rx drop */
947 	if (gisr_val & AVE_GI_RXDROP) {
948 		priv->stats_rx.dropped++;
949 		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
950 	}
951 
952 	/* Rx interval */
953 	if (gisr_val & AVE_GI_RXIINT) {
954 		napi_schedule(&priv->napi_rx);
955 		/* still force to disable Rx interrupt until NAPI finishes */
956 		gimr_val &= ~AVE_GI_RXIINT;
957 	}
958 
959 	/* Tx completed */
960 	if (gisr_val & AVE_GI_TX) {
961 		napi_schedule(&priv->napi_tx);
962 		/* still force to disable Tx interrupt until NAPI finishes */
963 		gimr_val &= ~AVE_GI_TX;
964 	}
965 
966 exit_isr:
967 	ave_irq_restore(ndev, gimr_val);
968 
969 	return IRQ_HANDLED;
970 }
971 
972 static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
973 {
974 	struct ave_private *priv = netdev_priv(ndev);
975 	u32 val;
976 
977 	if (WARN_ON(entry > AVE_PF_SIZE))
978 		return -EINVAL;
979 
980 	val = readl(priv->base + AVE_PFEN);
981 	writel(val | BIT(entry), priv->base + AVE_PFEN);
982 
983 	return 0;
984 }
985 
986 static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
987 {
988 	struct ave_private *priv = netdev_priv(ndev);
989 	u32 val;
990 
991 	if (WARN_ON(entry > AVE_PF_SIZE))
992 		return -EINVAL;
993 
994 	val = readl(priv->base + AVE_PFEN);
995 	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
996 
997 	return 0;
998 }
999 
1000 static int ave_pfsel_set_macaddr(struct net_device *ndev,
1001 				 unsigned int entry,
1002 				 const unsigned char *mac_addr,
1003 				 unsigned int set_size)
1004 {
1005 	struct ave_private *priv = netdev_priv(ndev);
1006 
1007 	if (WARN_ON(entry > AVE_PF_SIZE))
1008 		return -EINVAL;
1009 	if (WARN_ON(set_size > 6))
1010 		return -EINVAL;
1011 
1012 	ave_pfsel_stop(ndev, entry);
1013 
1014 	/* set MAC address for the filter */
1015 	ave_hw_write_macaddr(ndev, mac_addr,
1016 			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1017 
1018 	/* set byte mask */
1019 	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1020 	       priv->base + AVE_PFMBYTE(entry));
1021 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1022 
1023 	/* set bit mask filter */
1024 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1025 
1026 	/* set selector to ring 0 */
1027 	writel(0, priv->base + AVE_PFSEL(entry));
1028 
1029 	/* restart filter */
1030 	ave_pfsel_start(ndev, entry);
1031 
1032 	return 0;
1033 }
1034 
1035 static void ave_pfsel_set_promisc(struct net_device *ndev,
1036 				  unsigned int entry, u32 rxring)
1037 {
1038 	struct ave_private *priv = netdev_priv(ndev);
1039 
1040 	if (WARN_ON(entry > AVE_PF_SIZE))
1041 		return;
1042 
1043 	ave_pfsel_stop(ndev, entry);
1044 
1045 	/* set byte mask */
1046 	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1047 	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1048 
1049 	/* set bit mask filter */
1050 	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1051 
1052 	/* set selector to rxring */
1053 	writel(rxring, priv->base + AVE_PFSEL(entry));
1054 
1055 	ave_pfsel_start(ndev, entry);
1056 }
1057 
1058 static void ave_pfsel_init(struct net_device *ndev)
1059 {
1060 	unsigned char bcast_mac[ETH_ALEN];
1061 	int i;
1062 
1063 	eth_broadcast_addr(bcast_mac);
1064 
1065 	for (i = 0; i < AVE_PF_SIZE; i++)
1066 		ave_pfsel_stop(ndev, i);
1067 
1068 	/* promiscious entry, select ring 0 */
1069 	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1070 
1071 	/* unicast entry */
1072 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1073 
1074 	/* broadcast entry */
1075 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1076 }
1077 
1078 static void ave_phy_adjust_link(struct net_device *ndev)
1079 {
1080 	struct ave_private *priv = netdev_priv(ndev);
1081 	struct phy_device *phydev = ndev->phydev;
1082 	u32 val, txcr, rxcr, rxcr_org;
1083 	u16 rmt_adv = 0, lcl_adv = 0;
1084 	u8 cap;
1085 
1086 	/* set RGMII speed */
1087 	val = readl(priv->base + AVE_TXCR);
1088 	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1089 
1090 	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1091 		val |= AVE_TXCR_TXSPD_1G;
1092 	else if (phydev->speed == SPEED_100)
1093 		val |= AVE_TXCR_TXSPD_100;
1094 
1095 	writel(val, priv->base + AVE_TXCR);
1096 
1097 	/* set RMII speed (100M/10M only) */
1098 	if (!phy_interface_is_rgmii(phydev)) {
1099 		val = readl(priv->base + AVE_LINKSEL);
1100 		if (phydev->speed == SPEED_10)
1101 			val &= ~AVE_LINKSEL_100M;
1102 		else
1103 			val |= AVE_LINKSEL_100M;
1104 		writel(val, priv->base + AVE_LINKSEL);
1105 	}
1106 
1107 	/* check current RXCR/TXCR */
1108 	rxcr = readl(priv->base + AVE_RXCR);
1109 	txcr = readl(priv->base + AVE_TXCR);
1110 	rxcr_org = rxcr;
1111 
1112 	if (phydev->duplex) {
1113 		rxcr |= AVE_RXCR_FDUPEN;
1114 
1115 		if (phydev->pause)
1116 			rmt_adv |= LPA_PAUSE_CAP;
1117 		if (phydev->asym_pause)
1118 			rmt_adv |= LPA_PAUSE_ASYM;
1119 
1120 		lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
1121 		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1122 		if (cap & FLOW_CTRL_TX)
1123 			txcr |= AVE_TXCR_FLOCTR;
1124 		else
1125 			txcr &= ~AVE_TXCR_FLOCTR;
1126 		if (cap & FLOW_CTRL_RX)
1127 			rxcr |= AVE_RXCR_FLOCTR;
1128 		else
1129 			rxcr &= ~AVE_RXCR_FLOCTR;
1130 	} else {
1131 		rxcr &= ~AVE_RXCR_FDUPEN;
1132 		rxcr &= ~AVE_RXCR_FLOCTR;
1133 		txcr &= ~AVE_TXCR_FLOCTR;
1134 	}
1135 
1136 	if (rxcr_org != rxcr) {
1137 		/* disable Rx mac */
1138 		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1139 		/* change and enable TX/Rx mac */
1140 		writel(txcr, priv->base + AVE_TXCR);
1141 		writel(rxcr, priv->base + AVE_RXCR);
1142 	}
1143 
1144 	phy_print_status(phydev);
1145 }
1146 
1147 static void ave_macaddr_init(struct net_device *ndev)
1148 {
1149 	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1150 
1151 	/* pfsel unicast entry */
1152 	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1153 }
1154 
1155 static int ave_init(struct net_device *ndev)
1156 {
1157 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1158 	struct ave_private *priv = netdev_priv(ndev);
1159 	struct device *dev = ndev->dev.parent;
1160 	struct device_node *np = dev->of_node;
1161 	struct device_node *mdio_np;
1162 	struct phy_device *phydev;
1163 	int nc, nr, ret;
1164 
1165 	/* enable clk because of hw access until ndo_open */
1166 	for (nc = 0; nc < priv->nclks; nc++) {
1167 		ret = clk_prepare_enable(priv->clk[nc]);
1168 		if (ret) {
1169 			dev_err(dev, "can't enable clock\n");
1170 			goto out_clk_disable;
1171 		}
1172 	}
1173 
1174 	for (nr = 0; nr < priv->nrsts; nr++) {
1175 		ret = reset_control_deassert(priv->rst[nr]);
1176 		if (ret) {
1177 			dev_err(dev, "can't deassert reset\n");
1178 			goto out_reset_assert;
1179 		}
1180 	}
1181 
1182 	ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1183 				 priv->pinmode_mask, priv->pinmode_val);
1184 	if (ret)
1185 		return ret;
1186 
1187 	ave_global_reset(ndev);
1188 
1189 	mdio_np = of_get_child_by_name(np, "mdio");
1190 	if (!mdio_np) {
1191 		dev_err(dev, "mdio node not found\n");
1192 		ret = -EINVAL;
1193 		goto out_reset_assert;
1194 	}
1195 	ret = of_mdiobus_register(priv->mdio, mdio_np);
1196 	of_node_put(mdio_np);
1197 	if (ret) {
1198 		dev_err(dev, "failed to register mdiobus\n");
1199 		goto out_reset_assert;
1200 	}
1201 
1202 	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1203 	if (!phydev) {
1204 		dev_err(dev, "could not attach to PHY\n");
1205 		ret = -ENODEV;
1206 		goto out_mdio_unregister;
1207 	}
1208 
1209 	priv->phydev = phydev;
1210 
1211 	phy_ethtool_get_wol(phydev, &wol);
1212 	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1213 
1214 	if (!phy_interface_is_rgmii(phydev))
1215 		phy_set_max_speed(phydev, SPEED_100);
1216 
1217 	phy_support_asym_pause(phydev);
1218 
1219 	phy_attached_info(phydev);
1220 
1221 	return 0;
1222 
1223 out_mdio_unregister:
1224 	mdiobus_unregister(priv->mdio);
1225 out_reset_assert:
1226 	while (--nr >= 0)
1227 		reset_control_assert(priv->rst[nr]);
1228 out_clk_disable:
1229 	while (--nc >= 0)
1230 		clk_disable_unprepare(priv->clk[nc]);
1231 
1232 	return ret;
1233 }
1234 
1235 static void ave_uninit(struct net_device *ndev)
1236 {
1237 	struct ave_private *priv = netdev_priv(ndev);
1238 	int i;
1239 
1240 	phy_disconnect(priv->phydev);
1241 	mdiobus_unregister(priv->mdio);
1242 
1243 	/* disable clk because of hw access after ndo_stop */
1244 	for (i = 0; i < priv->nrsts; i++)
1245 		reset_control_assert(priv->rst[i]);
1246 	for (i = 0; i < priv->nclks; i++)
1247 		clk_disable_unprepare(priv->clk[i]);
1248 }
1249 
1250 static int ave_open(struct net_device *ndev)
1251 {
1252 	struct ave_private *priv = netdev_priv(ndev);
1253 	int entry;
1254 	int ret;
1255 	u32 val;
1256 
1257 	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1258 			  ndev);
1259 	if (ret)
1260 		return ret;
1261 
1262 	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1263 				GFP_KERNEL);
1264 	if (!priv->tx.desc) {
1265 		ret = -ENOMEM;
1266 		goto out_free_irq;
1267 	}
1268 
1269 	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1270 				GFP_KERNEL);
1271 	if (!priv->rx.desc) {
1272 		kfree(priv->tx.desc);
1273 		ret = -ENOMEM;
1274 		goto out_free_irq;
1275 	}
1276 
1277 	/* initialize Tx work and descriptor */
1278 	priv->tx.proc_idx = 0;
1279 	priv->tx.done_idx = 0;
1280 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1281 		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1282 		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1283 	}
1284 	writel(AVE_TXDC_ADDR_START |
1285 	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1286 	       priv->base + AVE_TXDC);
1287 
1288 	/* initialize Rx work and descriptor */
1289 	priv->rx.proc_idx = 0;
1290 	priv->rx.done_idx = 0;
1291 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1292 		if (ave_rxdesc_prepare(ndev, entry))
1293 			break;
1294 	}
1295 	writel(AVE_RXDC0_ADDR_START |
1296 	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1297 	       priv->base + AVE_RXDC0);
1298 
1299 	ave_desc_switch(ndev, AVE_DESC_START);
1300 
1301 	ave_pfsel_init(ndev);
1302 	ave_macaddr_init(ndev);
1303 
1304 	/* set Rx configuration */
1305 	/* full duplex, enable pause drop, enalbe flow control */
1306 	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1307 		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1308 	writel(val, priv->base + AVE_RXCR);
1309 
1310 	/* set Tx configuration */
1311 	/* enable flow control, disable loopback */
1312 	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1313 
1314 	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1315 	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1316 	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1317 	writel(val, priv->base + AVE_IIRQC);
1318 
1319 	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1320 	ave_irq_restore(ndev, val);
1321 
1322 	napi_enable(&priv->napi_rx);
1323 	napi_enable(&priv->napi_tx);
1324 
1325 	phy_start(ndev->phydev);
1326 	phy_start_aneg(ndev->phydev);
1327 	netif_start_queue(ndev);
1328 
1329 	return 0;
1330 
1331 out_free_irq:
1332 	disable_irq(priv->irq);
1333 	free_irq(priv->irq, ndev);
1334 
1335 	return ret;
1336 }
1337 
1338 static int ave_stop(struct net_device *ndev)
1339 {
1340 	struct ave_private *priv = netdev_priv(ndev);
1341 	int entry;
1342 
1343 	ave_irq_disable_all(ndev);
1344 	disable_irq(priv->irq);
1345 	free_irq(priv->irq, ndev);
1346 
1347 	netif_tx_disable(ndev);
1348 	phy_stop(ndev->phydev);
1349 	napi_disable(&priv->napi_tx);
1350 	napi_disable(&priv->napi_rx);
1351 
1352 	ave_desc_switch(ndev, AVE_DESC_STOP);
1353 
1354 	/* free Tx buffer */
1355 	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1356 		if (!priv->tx.desc[entry].skbs)
1357 			continue;
1358 
1359 		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1360 		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1361 		priv->tx.desc[entry].skbs = NULL;
1362 	}
1363 	priv->tx.proc_idx = 0;
1364 	priv->tx.done_idx = 0;
1365 
1366 	/* free Rx buffer */
1367 	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1368 		if (!priv->rx.desc[entry].skbs)
1369 			continue;
1370 
1371 		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1372 		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1373 		priv->rx.desc[entry].skbs = NULL;
1374 	}
1375 	priv->rx.proc_idx = 0;
1376 	priv->rx.done_idx = 0;
1377 
1378 	kfree(priv->tx.desc);
1379 	kfree(priv->rx.desc);
1380 
1381 	return 0;
1382 }
1383 
1384 static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1385 {
1386 	struct ave_private *priv = netdev_priv(ndev);
1387 	u32 proc_idx, done_idx, ndesc, cmdsts;
1388 	int ret, freepkt;
1389 	dma_addr_t paddr;
1390 
1391 	proc_idx = priv->tx.proc_idx;
1392 	done_idx = priv->tx.done_idx;
1393 	ndesc = priv->tx.ndesc;
1394 	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1395 
1396 	/* stop queue when not enough entry */
1397 	if (unlikely(freepkt < 1)) {
1398 		netif_stop_queue(ndev);
1399 		return NETDEV_TX_BUSY;
1400 	}
1401 
1402 	/* add padding for short packet */
1403 	if (skb_put_padto(skb, ETH_ZLEN)) {
1404 		priv->stats_tx.dropped++;
1405 		return NETDEV_TX_OK;
1406 	}
1407 
1408 	/* map Tx buffer
1409 	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1410 	 */
1411 	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1412 			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1413 	if (ret) {
1414 		dev_kfree_skb_any(skb);
1415 		priv->stats_tx.dropped++;
1416 		return NETDEV_TX_OK;
1417 	}
1418 
1419 	priv->tx.desc[proc_idx].skbs = skb;
1420 
1421 	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1422 
1423 	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1424 		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1425 
1426 	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1427 	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1428 		cmdsts |= AVE_STS_INTR;
1429 
1430 	/* disable checksum calculation when skb doesn't calurate checksum */
1431 	if (skb->ip_summed == CHECKSUM_NONE ||
1432 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1433 		cmdsts |= AVE_STS_NOCSUM;
1434 
1435 	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1436 
1437 	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1438 
1439 	return NETDEV_TX_OK;
1440 }
1441 
1442 static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1443 {
1444 	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1445 }
1446 
1447 static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1448 static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1449 
1450 static void ave_set_rx_mode(struct net_device *ndev)
1451 {
1452 	struct ave_private *priv = netdev_priv(ndev);
1453 	struct netdev_hw_addr *hw_adr;
1454 	int count, mc_cnt;
1455 	u32 val;
1456 
1457 	/* MAC addr filter enable for promiscious mode */
1458 	mc_cnt = netdev_mc_count(ndev);
1459 	val = readl(priv->base + AVE_RXCR);
1460 	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1461 		val &= ~AVE_RXCR_AFEN;
1462 	else
1463 		val |= AVE_RXCR_AFEN;
1464 	writel(val, priv->base + AVE_RXCR);
1465 
1466 	/* set all multicast address */
1467 	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1468 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1469 				      v4multi_macadr, 1);
1470 		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1471 				      v6multi_macadr, 1);
1472 	} else {
1473 		/* stop all multicast filter */
1474 		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1475 			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1476 
1477 		/* set multicast addresses */
1478 		count = 0;
1479 		netdev_for_each_mc_addr(hw_adr, ndev) {
1480 			if (count == mc_cnt)
1481 				break;
1482 			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1483 					      hw_adr->addr, 6);
1484 			count++;
1485 		}
1486 	}
1487 }
1488 
1489 static void ave_get_stats64(struct net_device *ndev,
1490 			    struct rtnl_link_stats64 *stats)
1491 {
1492 	struct ave_private *priv = netdev_priv(ndev);
1493 	unsigned int start;
1494 
1495 	do {
1496 		start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
1497 		stats->rx_packets = priv->stats_rx.packets;
1498 		stats->rx_bytes	  = priv->stats_rx.bytes;
1499 	} while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
1500 
1501 	do {
1502 		start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
1503 		stats->tx_packets = priv->stats_tx.packets;
1504 		stats->tx_bytes	  = priv->stats_tx.bytes;
1505 	} while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
1506 
1507 	stats->rx_errors      = priv->stats_rx.errors;
1508 	stats->tx_errors      = priv->stats_tx.errors;
1509 	stats->rx_dropped     = priv->stats_rx.dropped;
1510 	stats->tx_dropped     = priv->stats_tx.dropped;
1511 	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1512 	stats->collisions     = priv->stats_tx.collisions;
1513 }
1514 
1515 static int ave_set_mac_address(struct net_device *ndev, void *p)
1516 {
1517 	int ret = eth_mac_addr(ndev, p);
1518 
1519 	if (ret)
1520 		return ret;
1521 
1522 	ave_macaddr_init(ndev);
1523 
1524 	return 0;
1525 }
1526 
1527 static const struct net_device_ops ave_netdev_ops = {
1528 	.ndo_init		= ave_init,
1529 	.ndo_uninit		= ave_uninit,
1530 	.ndo_open		= ave_open,
1531 	.ndo_stop		= ave_stop,
1532 	.ndo_start_xmit		= ave_start_xmit,
1533 	.ndo_do_ioctl		= ave_ioctl,
1534 	.ndo_set_rx_mode	= ave_set_rx_mode,
1535 	.ndo_get_stats64	= ave_get_stats64,
1536 	.ndo_set_mac_address	= ave_set_mac_address,
1537 };
1538 
1539 static int ave_probe(struct platform_device *pdev)
1540 {
1541 	const struct ave_soc_data *data;
1542 	struct device *dev = &pdev->dev;
1543 	char buf[ETHTOOL_FWVERS_LEN];
1544 	struct of_phandle_args args;
1545 	phy_interface_t phy_mode;
1546 	struct ave_private *priv;
1547 	struct net_device *ndev;
1548 	struct device_node *np;
1549 	struct resource	*res;
1550 	const void *mac_addr;
1551 	void __iomem *base;
1552 	const char *name;
1553 	int i, irq, ret;
1554 	u64 dma_mask;
1555 	u32 ave_id;
1556 
1557 	data = of_device_get_match_data(dev);
1558 	if (WARN_ON(!data))
1559 		return -EINVAL;
1560 
1561 	np = dev->of_node;
1562 	phy_mode = of_get_phy_mode(np);
1563 	if (phy_mode < 0) {
1564 		dev_err(dev, "phy-mode not found\n");
1565 		return -EINVAL;
1566 	}
1567 
1568 	irq = platform_get_irq(pdev, 0);
1569 	if (irq < 0) {
1570 		dev_err(dev, "IRQ not found\n");
1571 		return irq;
1572 	}
1573 
1574 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1575 	base = devm_ioremap_resource(dev, res);
1576 	if (IS_ERR(base))
1577 		return PTR_ERR(base);
1578 
1579 	ndev = alloc_etherdev(sizeof(struct ave_private));
1580 	if (!ndev) {
1581 		dev_err(dev, "can't allocate ethernet device\n");
1582 		return -ENOMEM;
1583 	}
1584 
1585 	ndev->netdev_ops = &ave_netdev_ops;
1586 	ndev->ethtool_ops = &ave_ethtool_ops;
1587 	SET_NETDEV_DEV(ndev, dev);
1588 
1589 	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1590 	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1591 
1592 	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1593 
1594 	mac_addr = of_get_mac_address(np);
1595 	if (mac_addr)
1596 		ether_addr_copy(ndev->dev_addr, mac_addr);
1597 
1598 	/* if the mac address is invalid, use random mac address */
1599 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1600 		eth_hw_addr_random(ndev);
1601 		dev_warn(dev, "Using random MAC address: %pM\n",
1602 			 ndev->dev_addr);
1603 	}
1604 
1605 	priv = netdev_priv(ndev);
1606 	priv->base = base;
1607 	priv->irq = irq;
1608 	priv->ndev = ndev;
1609 	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1610 	priv->phy_mode = phy_mode;
1611 	priv->data = data;
1612 
1613 	if (IS_DESC_64BIT(priv)) {
1614 		priv->desc_size = AVE_DESC_SIZE_64;
1615 		priv->tx.daddr  = AVE_TXDM_64;
1616 		priv->rx.daddr  = AVE_RXDM_64;
1617 		dma_mask = DMA_BIT_MASK(64);
1618 	} else {
1619 		priv->desc_size = AVE_DESC_SIZE_32;
1620 		priv->tx.daddr  = AVE_TXDM_32;
1621 		priv->rx.daddr  = AVE_RXDM_32;
1622 		dma_mask = DMA_BIT_MASK(32);
1623 	}
1624 	ret = dma_set_mask(dev, dma_mask);
1625 	if (ret)
1626 		goto out_free_netdev;
1627 
1628 	priv->tx.ndesc = AVE_NR_TXDESC;
1629 	priv->rx.ndesc = AVE_NR_RXDESC;
1630 
1631 	u64_stats_init(&priv->stats_tx.syncp);
1632 	u64_stats_init(&priv->stats_rx.syncp);
1633 
1634 	for (i = 0; i < AVE_MAX_CLKS; i++) {
1635 		name = priv->data->clock_names[i];
1636 		if (!name)
1637 			break;
1638 		priv->clk[i] = devm_clk_get(dev, name);
1639 		if (IS_ERR(priv->clk[i])) {
1640 			ret = PTR_ERR(priv->clk[i]);
1641 			goto out_free_netdev;
1642 		}
1643 		priv->nclks++;
1644 	}
1645 
1646 	for (i = 0; i < AVE_MAX_RSTS; i++) {
1647 		name = priv->data->reset_names[i];
1648 		if (!name)
1649 			break;
1650 		priv->rst[i] = devm_reset_control_get_shared(dev, name);
1651 		if (IS_ERR(priv->rst[i])) {
1652 			ret = PTR_ERR(priv->rst[i]);
1653 			goto out_free_netdev;
1654 		}
1655 		priv->nrsts++;
1656 	}
1657 
1658 	ret = of_parse_phandle_with_fixed_args(np,
1659 					       "socionext,syscon-phy-mode",
1660 					       1, 0, &args);
1661 	if (ret) {
1662 		netdev_err(ndev, "can't get syscon-phy-mode property\n");
1663 		goto out_free_netdev;
1664 	}
1665 	priv->regmap = syscon_node_to_regmap(args.np);
1666 	of_node_put(args.np);
1667 	if (IS_ERR(priv->regmap)) {
1668 		netdev_err(ndev, "can't map syscon-phy-mode\n");
1669 		ret = PTR_ERR(priv->regmap);
1670 		goto out_free_netdev;
1671 	}
1672 	ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1673 	if (ret) {
1674 		netdev_err(ndev, "invalid phy-mode setting\n");
1675 		goto out_free_netdev;
1676 	}
1677 
1678 	priv->mdio = devm_mdiobus_alloc(dev);
1679 	if (!priv->mdio) {
1680 		ret = -ENOMEM;
1681 		goto out_free_netdev;
1682 	}
1683 	priv->mdio->priv = ndev;
1684 	priv->mdio->parent = dev;
1685 	priv->mdio->read = ave_mdiobus_read;
1686 	priv->mdio->write = ave_mdiobus_write;
1687 	priv->mdio->name = "uniphier-mdio";
1688 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1689 		 pdev->name, pdev->id);
1690 
1691 	/* Register as a NAPI supported driver */
1692 	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
1693 	netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
1694 			  priv->tx.ndesc);
1695 
1696 	platform_set_drvdata(pdev, ndev);
1697 
1698 	ret = register_netdev(ndev);
1699 	if (ret) {
1700 		dev_err(dev, "failed to register netdevice\n");
1701 		goto out_del_napi;
1702 	}
1703 
1704 	/* get ID and version */
1705 	ave_id = readl(priv->base + AVE_IDR);
1706 	ave_hw_read_version(ndev, buf, sizeof(buf));
1707 
1708 	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1709 		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1710 		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1711 		 buf, priv->irq, phy_modes(phy_mode));
1712 
1713 	return 0;
1714 
1715 out_del_napi:
1716 	netif_napi_del(&priv->napi_rx);
1717 	netif_napi_del(&priv->napi_tx);
1718 out_free_netdev:
1719 	free_netdev(ndev);
1720 
1721 	return ret;
1722 }
1723 
1724 static int ave_remove(struct platform_device *pdev)
1725 {
1726 	struct net_device *ndev = platform_get_drvdata(pdev);
1727 	struct ave_private *priv = netdev_priv(ndev);
1728 
1729 	unregister_netdev(ndev);
1730 	netif_napi_del(&priv->napi_rx);
1731 	netif_napi_del(&priv->napi_tx);
1732 	free_netdev(ndev);
1733 
1734 	return 0;
1735 }
1736 
1737 static int ave_pro4_get_pinmode(struct ave_private *priv,
1738 				phy_interface_t phy_mode, u32 arg)
1739 {
1740 	if (arg > 0)
1741 		return -EINVAL;
1742 
1743 	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1744 
1745 	switch (phy_mode) {
1746 	case PHY_INTERFACE_MODE_RMII:
1747 		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1748 		break;
1749 	case PHY_INTERFACE_MODE_MII:
1750 	case PHY_INTERFACE_MODE_RGMII:
1751 		priv->pinmode_val = 0;
1752 		break;
1753 	default:
1754 		return -EINVAL;
1755 	}
1756 
1757 	return 0;
1758 }
1759 
1760 static int ave_ld11_get_pinmode(struct ave_private *priv,
1761 				phy_interface_t phy_mode, u32 arg)
1762 {
1763 	if (arg > 0)
1764 		return -EINVAL;
1765 
1766 	priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1767 
1768 	switch (phy_mode) {
1769 	case PHY_INTERFACE_MODE_INTERNAL:
1770 		priv->pinmode_val = 0;
1771 		break;
1772 	case PHY_INTERFACE_MODE_RMII:
1773 		priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1774 		break;
1775 	default:
1776 		return -EINVAL;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
1782 static int ave_ld20_get_pinmode(struct ave_private *priv,
1783 				phy_interface_t phy_mode, u32 arg)
1784 {
1785 	if (arg > 0)
1786 		return -EINVAL;
1787 
1788 	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1789 
1790 	switch (phy_mode) {
1791 	case PHY_INTERFACE_MODE_RMII:
1792 		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1793 		break;
1794 	case PHY_INTERFACE_MODE_RGMII:
1795 		priv->pinmode_val = 0;
1796 		break;
1797 	default:
1798 		return -EINVAL;
1799 	}
1800 
1801 	return 0;
1802 }
1803 
1804 static int ave_pxs3_get_pinmode(struct ave_private *priv,
1805 				phy_interface_t phy_mode, u32 arg)
1806 {
1807 	if (arg > 1)
1808 		return -EINVAL;
1809 
1810 	priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1811 
1812 	switch (phy_mode) {
1813 	case PHY_INTERFACE_MODE_RMII:
1814 		priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1815 		break;
1816 	case PHY_INTERFACE_MODE_RGMII:
1817 		priv->pinmode_val = 0;
1818 		break;
1819 	default:
1820 		return -EINVAL;
1821 	}
1822 
1823 	return 0;
1824 }
1825 
1826 static const struct ave_soc_data ave_pro4_data = {
1827 	.is_desc_64bit = false,
1828 	.clock_names = {
1829 		"gio", "ether", "ether-gb", "ether-phy",
1830 	},
1831 	.reset_names = {
1832 		"gio", "ether",
1833 	},
1834 	.get_pinmode = ave_pro4_get_pinmode,
1835 };
1836 
1837 static const struct ave_soc_data ave_pxs2_data = {
1838 	.is_desc_64bit = false,
1839 	.clock_names = {
1840 		"ether",
1841 	},
1842 	.reset_names = {
1843 		"ether",
1844 	},
1845 	.get_pinmode = ave_pro4_get_pinmode,
1846 };
1847 
1848 static const struct ave_soc_data ave_ld11_data = {
1849 	.is_desc_64bit = false,
1850 	.clock_names = {
1851 		"ether",
1852 	},
1853 	.reset_names = {
1854 		"ether",
1855 	},
1856 	.get_pinmode = ave_ld11_get_pinmode,
1857 };
1858 
1859 static const struct ave_soc_data ave_ld20_data = {
1860 	.is_desc_64bit = true,
1861 	.clock_names = {
1862 		"ether",
1863 	},
1864 	.reset_names = {
1865 		"ether",
1866 	},
1867 	.get_pinmode = ave_ld20_get_pinmode,
1868 };
1869 
1870 static const struct ave_soc_data ave_pxs3_data = {
1871 	.is_desc_64bit = false,
1872 	.clock_names = {
1873 		"ether",
1874 	},
1875 	.reset_names = {
1876 		"ether",
1877 	},
1878 	.get_pinmode = ave_pxs3_get_pinmode,
1879 };
1880 
1881 static const struct of_device_id of_ave_match[] = {
1882 	{
1883 		.compatible = "socionext,uniphier-pro4-ave4",
1884 		.data = &ave_pro4_data,
1885 	},
1886 	{
1887 		.compatible = "socionext,uniphier-pxs2-ave4",
1888 		.data = &ave_pxs2_data,
1889 	},
1890 	{
1891 		.compatible = "socionext,uniphier-ld11-ave4",
1892 		.data = &ave_ld11_data,
1893 	},
1894 	{
1895 		.compatible = "socionext,uniphier-ld20-ave4",
1896 		.data = &ave_ld20_data,
1897 	},
1898 	{
1899 		.compatible = "socionext,uniphier-pxs3-ave4",
1900 		.data = &ave_pxs3_data,
1901 	},
1902 	{ /* Sentinel */ }
1903 };
1904 MODULE_DEVICE_TABLE(of, of_ave_match);
1905 
1906 static struct platform_driver ave_driver = {
1907 	.probe  = ave_probe,
1908 	.remove = ave_remove,
1909 	.driver	= {
1910 		.name = "ave",
1911 		.of_match_table	= of_ave_match,
1912 	},
1913 };
1914 module_platform_driver(ave_driver);
1915 
1916 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1917 MODULE_LICENSE("GPL v2");
1918