1 // SPDX-License-Identifier: GPL-2.0
2 /*  Atheros AR71xx built-in ethernet mac driver
3  *
4  *  Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
5  *
6  *  List of authors contributed to this driver before mainlining:
7  *  Alexander Couzens <lynxis@fe80.eu>
8  *  Christian Lamparter <chunkeey@gmail.com>
9  *  Chuanhong Guo <gch981213@gmail.com>
10  *  Daniel F. Dickinson <cshored@thecshore.com>
11  *  David Bauer <mail@david-bauer.net>
12  *  Felix Fietkau <nbd@nbd.name>
13  *  Gabor Juhos <juhosg@freemail.hu>
14  *  Hauke Mehrtens <hauke@hauke-m.de>
15  *  Johann Neuhauser <johann@it-neuhauser.de>
16  *  John Crispin <john@phrozen.org>
17  *  Jo-Philipp Wich <jo@mein.io>
18  *  Koen Vandeputte <koen.vandeputte@ncentric.com>
19  *  Lucian Cristian <lucian.cristian@gmail.com>
20  *  Matt Merhar <mattmerhar@protonmail.com>
21  *  Milan Krstic <milan.krstic@gmail.com>
22  *  Petr Štetiar <ynezz@true.cz>
23  *  Rosen Penev <rosenp@gmail.com>
24  *  Stephen Walker <stephendwalker+github@gmail.com>
25  *  Vittorio Gambaletta <openwrt@vittgam.net>
26  *  Weijie Gao <hackpascal@gmail.com>
27  *  Imre Kaloz <kaloz@openwrt.org>
28  */
29 
30 #include <linux/if_vlan.h>
31 #include <linux/mfd/syscon.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
34 #include <linux/of_platform.h>
35 #include <linux/phylink.h>
36 #include <linux/regmap.h>
37 #include <linux/reset.h>
38 #include <linux/clk.h>
39 #include <linux/io.h>
40 #include <net/selftests.h>
41 
42 /* For our NAPI weight bigger does *NOT* mean better - it means more
43  * D-cache misses and lots more wasted cycles than we'll ever
44  * possibly gain from saving instructions.
45  */
46 #define AG71XX_NAPI_WEIGHT	32
47 #define AG71XX_OOM_REFILL	(1 + HZ / 10)
48 
49 #define AG71XX_INT_ERR	(AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
50 #define AG71XX_INT_TX	(AG71XX_INT_TX_PS)
51 #define AG71XX_INT_RX	(AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
52 
53 #define AG71XX_INT_POLL	(AG71XX_INT_RX | AG71XX_INT_TX)
54 #define AG71XX_INT_INIT	(AG71XX_INT_ERR | AG71XX_INT_POLL)
55 
56 #define AG71XX_TX_MTU_LEN	1540
57 
58 #define AG71XX_TX_RING_SPLIT		512
59 #define AG71XX_TX_RING_DS_PER_PKT	DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
60 						     AG71XX_TX_RING_SPLIT)
61 #define AG71XX_TX_RING_SIZE_DEFAULT	128
62 #define AG71XX_RX_RING_SIZE_DEFAULT	256
63 
64 #define AG71XX_MDIO_RETRY	1000
65 #define AG71XX_MDIO_DELAY	5
66 #define AG71XX_MDIO_MAX_CLK	5000000
67 
68 /* Register offsets */
69 #define AG71XX_REG_MAC_CFG1	0x0000
70 #define MAC_CFG1_TXE		BIT(0)	/* Tx Enable */
71 #define MAC_CFG1_STX		BIT(1)	/* Synchronize Tx Enable */
72 #define MAC_CFG1_RXE		BIT(2)	/* Rx Enable */
73 #define MAC_CFG1_SRX		BIT(3)	/* Synchronize Rx Enable */
74 #define MAC_CFG1_TFC		BIT(4)	/* Tx Flow Control Enable */
75 #define MAC_CFG1_RFC		BIT(5)	/* Rx Flow Control Enable */
76 #define MAC_CFG1_SR		BIT(31)	/* Soft Reset */
77 #define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
78 			 MAC_CFG1_SRX | MAC_CFG1_STX)
79 
80 #define AG71XX_REG_MAC_CFG2	0x0004
81 #define MAC_CFG2_FDX		BIT(0)
82 #define MAC_CFG2_PAD_CRC_EN	BIT(2)
83 #define MAC_CFG2_LEN_CHECK	BIT(4)
84 #define MAC_CFG2_IF_1000	BIT(9)
85 #define MAC_CFG2_IF_10_100	BIT(8)
86 
87 #define AG71XX_REG_MAC_MFL	0x0010
88 
89 #define AG71XX_REG_MII_CFG	0x0020
90 #define MII_CFG_CLK_DIV_4	0
91 #define MII_CFG_CLK_DIV_6	2
92 #define MII_CFG_CLK_DIV_8	3
93 #define MII_CFG_CLK_DIV_10	4
94 #define MII_CFG_CLK_DIV_14	5
95 #define MII_CFG_CLK_DIV_20	6
96 #define MII_CFG_CLK_DIV_28	7
97 #define MII_CFG_CLK_DIV_34	8
98 #define MII_CFG_CLK_DIV_42	9
99 #define MII_CFG_CLK_DIV_50	10
100 #define MII_CFG_CLK_DIV_58	11
101 #define MII_CFG_CLK_DIV_66	12
102 #define MII_CFG_CLK_DIV_74	13
103 #define MII_CFG_CLK_DIV_82	14
104 #define MII_CFG_CLK_DIV_98	15
105 #define MII_CFG_RESET		BIT(31)
106 
107 #define AG71XX_REG_MII_CMD	0x0024
108 #define MII_CMD_READ		BIT(0)
109 
110 #define AG71XX_REG_MII_ADDR	0x0028
111 #define MII_ADDR_SHIFT		8
112 
113 #define AG71XX_REG_MII_CTRL	0x002c
114 #define AG71XX_REG_MII_STATUS	0x0030
115 #define AG71XX_REG_MII_IND	0x0034
116 #define MII_IND_BUSY		BIT(0)
117 #define MII_IND_INVALID		BIT(2)
118 
119 #define AG71XX_REG_MAC_IFCTL	0x0038
120 #define MAC_IFCTL_SPEED		BIT(16)
121 
122 #define AG71XX_REG_MAC_ADDR1	0x0040
123 #define AG71XX_REG_MAC_ADDR2	0x0044
124 #define AG71XX_REG_FIFO_CFG0	0x0048
125 #define FIFO_CFG0_WTM		BIT(0)	/* Watermark Module */
126 #define FIFO_CFG0_RXS		BIT(1)	/* Rx System Module */
127 #define FIFO_CFG0_RXF		BIT(2)	/* Rx Fabric Module */
128 #define FIFO_CFG0_TXS		BIT(3)	/* Tx System Module */
129 #define FIFO_CFG0_TXF		BIT(4)	/* Tx Fabric Module */
130 #define FIFO_CFG0_ALL	(FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
131 			| FIFO_CFG0_TXS | FIFO_CFG0_TXF)
132 #define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
133 
134 #define FIFO_CFG0_ENABLE_SHIFT	8
135 
136 #define AG71XX_REG_FIFO_CFG1	0x004c
137 #define AG71XX_REG_FIFO_CFG2	0x0050
138 #define AG71XX_REG_FIFO_CFG3	0x0054
139 #define AG71XX_REG_FIFO_CFG4	0x0058
140 #define FIFO_CFG4_DE		BIT(0)	/* Drop Event */
141 #define FIFO_CFG4_DV		BIT(1)	/* RX_DV Event */
142 #define FIFO_CFG4_FC		BIT(2)	/* False Carrier */
143 #define FIFO_CFG4_CE		BIT(3)	/* Code Error */
144 #define FIFO_CFG4_CR		BIT(4)	/* CRC error */
145 #define FIFO_CFG4_LM		BIT(5)	/* Length Mismatch */
146 #define FIFO_CFG4_LO		BIT(6)	/* Length out of range */
147 #define FIFO_CFG4_OK		BIT(7)	/* Packet is OK */
148 #define FIFO_CFG4_MC		BIT(8)	/* Multicast Packet */
149 #define FIFO_CFG4_BC		BIT(9)	/* Broadcast Packet */
150 #define FIFO_CFG4_DR		BIT(10)	/* Dribble */
151 #define FIFO_CFG4_LE		BIT(11)	/* Long Event */
152 #define FIFO_CFG4_CF		BIT(12)	/* Control Frame */
153 #define FIFO_CFG4_PF		BIT(13)	/* Pause Frame */
154 #define FIFO_CFG4_UO		BIT(14)	/* Unsupported Opcode */
155 #define FIFO_CFG4_VT		BIT(15)	/* VLAN tag detected */
156 #define FIFO_CFG4_FT		BIT(16)	/* Frame Truncated */
157 #define FIFO_CFG4_UC		BIT(17)	/* Unicast Packet */
158 #define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
159 			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
160 			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
161 			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
162 			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
163 			 FIFO_CFG4_VT)
164 
165 #define AG71XX_REG_FIFO_CFG5	0x005c
166 #define FIFO_CFG5_DE		BIT(0)	/* Drop Event */
167 #define FIFO_CFG5_DV		BIT(1)	/* RX_DV Event */
168 #define FIFO_CFG5_FC		BIT(2)	/* False Carrier */
169 #define FIFO_CFG5_CE		BIT(3)	/* Code Error */
170 #define FIFO_CFG5_LM		BIT(4)	/* Length Mismatch */
171 #define FIFO_CFG5_LO		BIT(5)	/* Length Out of Range */
172 #define FIFO_CFG5_OK		BIT(6)	/* Packet is OK */
173 #define FIFO_CFG5_MC		BIT(7)	/* Multicast Packet */
174 #define FIFO_CFG5_BC		BIT(8)	/* Broadcast Packet */
175 #define FIFO_CFG5_DR		BIT(9)	/* Dribble */
176 #define FIFO_CFG5_CF		BIT(10)	/* Control Frame */
177 #define FIFO_CFG5_PF		BIT(11)	/* Pause Frame */
178 #define FIFO_CFG5_UO		BIT(12)	/* Unsupported Opcode */
179 #define FIFO_CFG5_VT		BIT(13)	/* VLAN tag detected */
180 #define FIFO_CFG5_LE		BIT(14)	/* Long Event */
181 #define FIFO_CFG5_FT		BIT(15)	/* Frame Truncated */
182 #define FIFO_CFG5_16		BIT(16)	/* unknown */
183 #define FIFO_CFG5_17		BIT(17)	/* unknown */
184 #define FIFO_CFG5_SF		BIT(18)	/* Short Frame */
185 #define FIFO_CFG5_BM		BIT(19)	/* Byte Mode */
186 #define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
187 			 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
188 			 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
189 			 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
190 			 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
191 			 FIFO_CFG5_17 | FIFO_CFG5_SF)
192 
193 #define AG71XX_REG_TX_CTRL	0x0180
194 #define TX_CTRL_TXE		BIT(0)	/* Tx Enable */
195 
196 #define AG71XX_REG_TX_DESC	0x0184
197 #define AG71XX_REG_TX_STATUS	0x0188
198 #define TX_STATUS_PS		BIT(0)	/* Packet Sent */
199 #define TX_STATUS_UR		BIT(1)	/* Tx Underrun */
200 #define TX_STATUS_BE		BIT(3)	/* Bus Error */
201 
202 #define AG71XX_REG_RX_CTRL	0x018c
203 #define RX_CTRL_RXE		BIT(0)	/* Rx Enable */
204 
205 #define AG71XX_DMA_RETRY	10
206 #define AG71XX_DMA_DELAY	1
207 
208 #define AG71XX_REG_RX_DESC	0x0190
209 #define AG71XX_REG_RX_STATUS	0x0194
210 #define RX_STATUS_PR		BIT(0)	/* Packet Received */
211 #define RX_STATUS_OF		BIT(2)	/* Rx Overflow */
212 #define RX_STATUS_BE		BIT(3)	/* Bus Error */
213 
214 #define AG71XX_REG_INT_ENABLE	0x0198
215 #define AG71XX_REG_INT_STATUS	0x019c
216 #define AG71XX_INT_TX_PS	BIT(0)
217 #define AG71XX_INT_TX_UR	BIT(1)
218 #define AG71XX_INT_TX_BE	BIT(3)
219 #define AG71XX_INT_RX_PR	BIT(4)
220 #define AG71XX_INT_RX_OF	BIT(6)
221 #define AG71XX_INT_RX_BE	BIT(7)
222 
223 #define AG71XX_REG_FIFO_DEPTH	0x01a8
224 #define AG71XX_REG_RX_SM	0x01b0
225 #define AG71XX_REG_TX_SM	0x01b4
226 
227 #define AG71XX_DEFAULT_MSG_ENABLE	\
228 	(NETIF_MSG_DRV			\
229 	| NETIF_MSG_PROBE		\
230 	| NETIF_MSG_LINK		\
231 	| NETIF_MSG_TIMER		\
232 	| NETIF_MSG_IFDOWN		\
233 	| NETIF_MSG_IFUP		\
234 	| NETIF_MSG_RX_ERR		\
235 	| NETIF_MSG_TX_ERR)
236 
237 struct ag71xx_statistic {
238 	unsigned short offset;
239 	u32 mask;
240 	const char name[ETH_GSTRING_LEN];
241 };
242 
243 static const struct ag71xx_statistic ag71xx_statistics[] = {
244 	{ 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
245 	{ 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
246 	{ 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
247 	{ 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
248 	{ 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
249 	{ 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
250 	{ 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
251 	{ 0x009C, GENMASK(23, 0), "Rx Byte", },
252 	{ 0x00A0, GENMASK(17, 0), "Rx Packet", },
253 	{ 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
254 	{ 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
255 	{ 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
256 	{ 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
257 	{ 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
258 	{ 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
259 	{ 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
260 	{ 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
261 	{ 0x00C4, GENMASK(11, 0), "Rx Code Error", },
262 	{ 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
263 	{ 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
264 	{ 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
265 	{ 0x00D4, GENMASK(11, 0), "Rx Fragments", },
266 	{ 0x00D8, GENMASK(11, 0), "Rx Jabber", },
267 	{ 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
268 	{ 0x00E0, GENMASK(23, 0), "Tx Byte", },
269 	{ 0x00E4, GENMASK(17, 0), "Tx Packet", },
270 	{ 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
271 	{ 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
272 	{ 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
273 	{ 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
274 	{ 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
275 	{ 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
276 	{ 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
277 	{ 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
278 	{ 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
279 	{ 0x010C, GENMASK(12, 0), "Tx Total Collision", },
280 	{ 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
281 	{ 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
282 	{ 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
283 	{ 0x011C, GENMASK(11, 0), "Tx FCS Error", },
284 	{ 0x0120, GENMASK(11, 0), "Tx Control Frame", },
285 	{ 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
286 	{ 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
287 	{ 0x012C, GENMASK(11, 0), "Tx Fragment", },
288 };
289 
290 #define DESC_EMPTY		BIT(31)
291 #define DESC_MORE		BIT(24)
292 #define DESC_PKTLEN_M		0xfff
293 struct ag71xx_desc {
294 	u32 data;
295 	u32 ctrl;
296 	u32 next;
297 	u32 pad;
298 } __aligned(4);
299 
300 #define AG71XX_DESC_SIZE	roundup(sizeof(struct ag71xx_desc), \
301 					L1_CACHE_BYTES)
302 
303 struct ag71xx_buf {
304 	union {
305 		struct {
306 			struct sk_buff *skb;
307 			unsigned int len;
308 		} tx;
309 		struct {
310 			dma_addr_t dma_addr;
311 			void *rx_buf;
312 		} rx;
313 	};
314 };
315 
316 struct ag71xx_ring {
317 	/* "Hot" fields in the data path. */
318 	unsigned int curr;
319 	unsigned int dirty;
320 
321 	/* "Cold" fields - not used in the data path. */
322 	struct ag71xx_buf *buf;
323 	u16 order;
324 	u16 desc_split;
325 	dma_addr_t descs_dma;
326 	u8 *descs_cpu;
327 };
328 
329 enum ag71xx_type {
330 	AR7100,
331 	AR7240,
332 	AR9130,
333 	AR9330,
334 	AR9340,
335 	QCA9530,
336 	QCA9550,
337 };
338 
339 struct ag71xx_dcfg {
340 	u32 max_frame_len;
341 	const u32 *fifodata;
342 	u16 desc_pktlen_mask;
343 	bool tx_hang_workaround;
344 	enum ag71xx_type type;
345 };
346 
347 struct ag71xx {
348 	/* Critical data related to the per-packet data path are clustered
349 	 * early in this structure to help improve the D-cache footprint.
350 	 */
351 	struct ag71xx_ring rx_ring ____cacheline_aligned;
352 	struct ag71xx_ring tx_ring ____cacheline_aligned;
353 
354 	u16 rx_buf_size;
355 	u8 rx_buf_offset;
356 
357 	struct net_device *ndev;
358 	struct platform_device *pdev;
359 	struct napi_struct napi;
360 	u32 msg_enable;
361 	const struct ag71xx_dcfg *dcfg;
362 
363 	/* From this point onwards we're not looking at per-packet fields. */
364 	void __iomem *mac_base;
365 
366 	struct ag71xx_desc *stop_desc;
367 	dma_addr_t stop_desc_dma;
368 
369 	phy_interface_t phy_if_mode;
370 	struct phylink *phylink;
371 	struct phylink_config phylink_config;
372 
373 	struct delayed_work restart_work;
374 	struct timer_list oom_timer;
375 
376 	struct reset_control *mac_reset;
377 
378 	u32 fifodata[3];
379 	int mac_idx;
380 
381 	struct reset_control *mdio_reset;
382 	struct mii_bus *mii_bus;
383 	struct clk *clk_mdio;
384 	struct clk *clk_eth;
385 };
386 
387 static int ag71xx_desc_empty(struct ag71xx_desc *desc)
388 {
389 	return (desc->ctrl & DESC_EMPTY) != 0;
390 }
391 
392 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
393 {
394 	return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
395 }
396 
397 static int ag71xx_ring_size_order(int size)
398 {
399 	return fls(size - 1);
400 }
401 
402 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
403 {
404 	return ag->dcfg->type == type;
405 }
406 
407 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
408 {
409 	iowrite32(value, ag->mac_base + reg);
410 	/* flush write */
411 	(void)ioread32(ag->mac_base + reg);
412 }
413 
414 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
415 {
416 	return ioread32(ag->mac_base + reg);
417 }
418 
419 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
420 {
421 	void __iomem *r;
422 
423 	r = ag->mac_base + reg;
424 	iowrite32(ioread32(r) | mask, r);
425 	/* flush write */
426 	(void)ioread32(r);
427 }
428 
429 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
430 {
431 	void __iomem *r;
432 
433 	r = ag->mac_base + reg;
434 	iowrite32(ioread32(r) & ~mask, r);
435 	/* flush write */
436 	(void)ioread32(r);
437 }
438 
439 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
440 {
441 	ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
442 }
443 
444 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
445 {
446 	ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
447 }
448 
449 static void ag71xx_get_drvinfo(struct net_device *ndev,
450 			       struct ethtool_drvinfo *info)
451 {
452 	struct ag71xx *ag = netdev_priv(ndev);
453 
454 	strscpy(info->driver, "ag71xx", sizeof(info->driver));
455 	strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
456 		sizeof(info->bus_info));
457 }
458 
459 static int ag71xx_get_link_ksettings(struct net_device *ndev,
460 				   struct ethtool_link_ksettings *kset)
461 {
462 	struct ag71xx *ag = netdev_priv(ndev);
463 
464 	return phylink_ethtool_ksettings_get(ag->phylink, kset);
465 }
466 
467 static int ag71xx_set_link_ksettings(struct net_device *ndev,
468 				   const struct ethtool_link_ksettings *kset)
469 {
470 	struct ag71xx *ag = netdev_priv(ndev);
471 
472 	return phylink_ethtool_ksettings_set(ag->phylink, kset);
473 }
474 
475 static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
476 {
477 	struct ag71xx *ag = netdev_priv(ndev);
478 
479 	return phylink_ethtool_nway_reset(ag->phylink);
480 }
481 
482 static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
483 					  struct ethtool_pauseparam *pause)
484 {
485 	struct ag71xx *ag = netdev_priv(ndev);
486 
487 	phylink_ethtool_get_pauseparam(ag->phylink, pause);
488 }
489 
490 static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
491 					 struct ethtool_pauseparam *pause)
492 {
493 	struct ag71xx *ag = netdev_priv(ndev);
494 
495 	return phylink_ethtool_set_pauseparam(ag->phylink, pause);
496 }
497 
498 static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
499 				       u8 *data)
500 {
501 	int i;
502 
503 	switch (sset) {
504 	case ETH_SS_STATS:
505 		for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
506 			memcpy(data + i * ETH_GSTRING_LEN,
507 			       ag71xx_statistics[i].name, ETH_GSTRING_LEN);
508 		break;
509 	case ETH_SS_TEST:
510 		net_selftest_get_strings(data);
511 		break;
512 	}
513 }
514 
515 static void ag71xx_ethtool_get_stats(struct net_device *ndev,
516 				     struct ethtool_stats *stats, u64 *data)
517 {
518 	struct ag71xx *ag = netdev_priv(ndev);
519 	int i;
520 
521 	for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
522 		*data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
523 				& ag71xx_statistics[i].mask;
524 }
525 
526 static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
527 {
528 	switch (sset) {
529 	case ETH_SS_STATS:
530 		return ARRAY_SIZE(ag71xx_statistics);
531 	case ETH_SS_TEST:
532 		return net_selftest_get_count();
533 	default:
534 		return -EOPNOTSUPP;
535 	}
536 }
537 
538 static const struct ethtool_ops ag71xx_ethtool_ops = {
539 	.get_drvinfo			= ag71xx_get_drvinfo,
540 	.get_link			= ethtool_op_get_link,
541 	.get_ts_info			= ethtool_op_get_ts_info,
542 	.get_link_ksettings		= ag71xx_get_link_ksettings,
543 	.set_link_ksettings		= ag71xx_set_link_ksettings,
544 	.nway_reset			= ag71xx_ethtool_nway_reset,
545 	.get_pauseparam			= ag71xx_ethtool_get_pauseparam,
546 	.set_pauseparam			= ag71xx_ethtool_set_pauseparam,
547 	.get_strings			= ag71xx_ethtool_get_strings,
548 	.get_ethtool_stats		= ag71xx_ethtool_get_stats,
549 	.get_sset_count			= ag71xx_ethtool_get_sset_count,
550 	.self_test			= net_selftest,
551 };
552 
553 static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
554 {
555 	struct net_device *ndev = ag->ndev;
556 	int i;
557 
558 	for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
559 		u32 busy;
560 
561 		udelay(AG71XX_MDIO_DELAY);
562 
563 		busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
564 		if (!busy)
565 			return 0;
566 
567 		udelay(AG71XX_MDIO_DELAY);
568 	}
569 
570 	netif_err(ag, link, ndev, "MDIO operation timed out\n");
571 
572 	return -ETIMEDOUT;
573 }
574 
575 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
576 {
577 	struct ag71xx *ag = bus->priv;
578 	int err, val;
579 
580 	err = ag71xx_mdio_wait_busy(ag);
581 	if (err)
582 		return err;
583 
584 	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
585 		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
586 	/* enable read mode */
587 	ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
588 
589 	err = ag71xx_mdio_wait_busy(ag);
590 	if (err)
591 		return err;
592 
593 	val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
594 	/* disable read mode */
595 	ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
596 
597 	netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
598 		  addr, reg, val);
599 
600 	return val;
601 }
602 
603 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
604 				 u16 val)
605 {
606 	struct ag71xx *ag = bus->priv;
607 
608 	netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
609 		  addr, reg, val);
610 
611 	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
612 		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
613 	ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
614 
615 	return ag71xx_mdio_wait_busy(ag);
616 }
617 
618 static const u32 ar71xx_mdio_div_table[] = {
619 	4, 4, 6, 8, 10, 14, 20, 28,
620 };
621 
622 static const u32 ar7240_mdio_div_table[] = {
623 	2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
624 };
625 
626 static const u32 ar933x_mdio_div_table[] = {
627 	4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
628 };
629 
630 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
631 {
632 	unsigned long ref_clock;
633 	const u32 *table;
634 	int ndivs, i;
635 
636 	ref_clock = clk_get_rate(ag->clk_mdio);
637 	if (!ref_clock)
638 		return -EINVAL;
639 
640 	if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
641 		table = ar933x_mdio_div_table;
642 		ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
643 	} else if (ag71xx_is(ag, AR7240)) {
644 		table = ar7240_mdio_div_table;
645 		ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
646 	} else {
647 		table = ar71xx_mdio_div_table;
648 		ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
649 	}
650 
651 	for (i = 0; i < ndivs; i++) {
652 		unsigned long t;
653 
654 		t = ref_clock / table[i];
655 		if (t <= AG71XX_MDIO_MAX_CLK) {
656 			*div = i;
657 			return 0;
658 		}
659 	}
660 
661 	return -ENOENT;
662 }
663 
664 static int ag71xx_mdio_reset(struct mii_bus *bus)
665 {
666 	struct ag71xx *ag = bus->priv;
667 	int err;
668 	u32 t;
669 
670 	err = ag71xx_mdio_get_divider(ag, &t);
671 	if (err)
672 		return err;
673 
674 	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
675 	usleep_range(100, 200);
676 
677 	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
678 	usleep_range(100, 200);
679 
680 	return 0;
681 }
682 
683 static int ag71xx_mdio_probe(struct ag71xx *ag)
684 {
685 	struct device *dev = &ag->pdev->dev;
686 	struct net_device *ndev = ag->ndev;
687 	static struct mii_bus *mii_bus;
688 	struct device_node *np, *mnp;
689 	int err;
690 
691 	np = dev->of_node;
692 	ag->mii_bus = NULL;
693 
694 	ag->clk_mdio = devm_clk_get(dev, "mdio");
695 	if (IS_ERR(ag->clk_mdio)) {
696 		netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
697 		return PTR_ERR(ag->clk_mdio);
698 	}
699 
700 	err = clk_prepare_enable(ag->clk_mdio);
701 	if (err) {
702 		netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
703 		return err;
704 	}
705 
706 	mii_bus = devm_mdiobus_alloc(dev);
707 	if (!mii_bus) {
708 		err = -ENOMEM;
709 		goto mdio_err_put_clk;
710 	}
711 
712 	ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
713 	if (IS_ERR(ag->mdio_reset)) {
714 		netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
715 		err = PTR_ERR(ag->mdio_reset);
716 		goto mdio_err_put_clk;
717 	}
718 
719 	mii_bus->name = "ag71xx_mdio";
720 	mii_bus->read = ag71xx_mdio_mii_read;
721 	mii_bus->write = ag71xx_mdio_mii_write;
722 	mii_bus->reset = ag71xx_mdio_reset;
723 	mii_bus->priv = ag;
724 	mii_bus->parent = dev;
725 	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
726 
727 	if (!IS_ERR(ag->mdio_reset)) {
728 		reset_control_assert(ag->mdio_reset);
729 		msleep(100);
730 		reset_control_deassert(ag->mdio_reset);
731 		msleep(200);
732 	}
733 
734 	mnp = of_get_child_by_name(np, "mdio");
735 	err = of_mdiobus_register(mii_bus, mnp);
736 	of_node_put(mnp);
737 	if (err)
738 		goto mdio_err_put_clk;
739 
740 	ag->mii_bus = mii_bus;
741 
742 	return 0;
743 
744 mdio_err_put_clk:
745 	clk_disable_unprepare(ag->clk_mdio);
746 	return err;
747 }
748 
749 static void ag71xx_mdio_remove(struct ag71xx *ag)
750 {
751 	if (ag->mii_bus)
752 		mdiobus_unregister(ag->mii_bus);
753 	clk_disable_unprepare(ag->clk_mdio);
754 }
755 
756 static void ag71xx_hw_stop(struct ag71xx *ag)
757 {
758 	/* disable all interrupts and stop the rx/tx engine */
759 	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
760 	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
761 	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
762 }
763 
764 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
765 {
766 	unsigned long timestamp;
767 	u32 rx_sm, tx_sm, rx_fd;
768 
769 	timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
770 	if (likely(time_before(jiffies, timestamp + HZ / 10)))
771 		return false;
772 
773 	if (!netif_carrier_ok(ag->ndev))
774 		return false;
775 
776 	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
777 	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
778 		return true;
779 
780 	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
781 	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
782 	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
783 	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
784 		return true;
785 
786 	return false;
787 }
788 
789 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
790 {
791 	struct ag71xx_ring *ring = &ag->tx_ring;
792 	int sent = 0, bytes_compl = 0, n = 0;
793 	struct net_device *ndev = ag->ndev;
794 	int ring_mask, ring_size;
795 	bool dma_stuck = false;
796 
797 	ring_mask = BIT(ring->order) - 1;
798 	ring_size = BIT(ring->order);
799 
800 	netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
801 
802 	while (ring->dirty + n != ring->curr) {
803 		struct ag71xx_desc *desc;
804 		struct sk_buff *skb;
805 		unsigned int i;
806 
807 		i = (ring->dirty + n) & ring_mask;
808 		desc = ag71xx_ring_desc(ring, i);
809 		skb = ring->buf[i].tx.skb;
810 
811 		if (!flush && !ag71xx_desc_empty(desc)) {
812 			if (ag->dcfg->tx_hang_workaround &&
813 			    ag71xx_check_dma_stuck(ag)) {
814 				schedule_delayed_work(&ag->restart_work,
815 						      HZ / 2);
816 				dma_stuck = true;
817 			}
818 			break;
819 		}
820 
821 		if (flush)
822 			desc->ctrl |= DESC_EMPTY;
823 
824 		n++;
825 		if (!skb)
826 			continue;
827 
828 		napi_consume_skb(skb, budget);
829 		ring->buf[i].tx.skb = NULL;
830 
831 		bytes_compl += ring->buf[i].tx.len;
832 
833 		sent++;
834 		ring->dirty += n;
835 
836 		while (n > 0) {
837 			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
838 			n--;
839 		}
840 	}
841 
842 	netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
843 
844 	if (!sent)
845 		return 0;
846 
847 	ag->ndev->stats.tx_bytes += bytes_compl;
848 	ag->ndev->stats.tx_packets += sent;
849 
850 	netdev_completed_queue(ag->ndev, sent, bytes_compl);
851 	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
852 		netif_wake_queue(ag->ndev);
853 
854 	if (!dma_stuck)
855 		cancel_delayed_work(&ag->restart_work);
856 
857 	return sent;
858 }
859 
860 static void ag71xx_dma_wait_stop(struct ag71xx *ag)
861 {
862 	struct net_device *ndev = ag->ndev;
863 	int i;
864 
865 	for (i = 0; i < AG71XX_DMA_RETRY; i++) {
866 		u32 rx, tx;
867 
868 		mdelay(AG71XX_DMA_DELAY);
869 
870 		rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
871 		tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
872 		if (!rx && !tx)
873 			return;
874 	}
875 
876 	netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
877 }
878 
879 static void ag71xx_dma_reset(struct ag71xx *ag)
880 {
881 	struct net_device *ndev = ag->ndev;
882 	u32 val;
883 	int i;
884 
885 	/* stop RX and TX */
886 	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
887 	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
888 
889 	/* give the hardware some time to really stop all rx/tx activity
890 	 * clearing the descriptors too early causes random memory corruption
891 	 */
892 	ag71xx_dma_wait_stop(ag);
893 
894 	/* clear descriptor addresses */
895 	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
896 	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
897 
898 	/* clear pending RX/TX interrupts */
899 	for (i = 0; i < 256; i++) {
900 		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
901 		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
902 	}
903 
904 	/* clear pending errors */
905 	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
906 	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
907 
908 	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
909 	if (val)
910 		netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
911 			  val);
912 
913 	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
914 
915 	/* mask out reserved bits */
916 	val &= ~0xff000000;
917 
918 	if (val)
919 		netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
920 			  val);
921 }
922 
923 static void ag71xx_hw_setup(struct ag71xx *ag)
924 {
925 	u32 init = MAC_CFG1_INIT;
926 
927 	/* setup MAC configuration registers */
928 	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
929 
930 	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
931 		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
932 
933 	/* setup max frame length to zero */
934 	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
935 
936 	/* setup FIFO configuration registers */
937 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
938 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
939 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
940 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
941 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
942 }
943 
944 static unsigned int ag71xx_max_frame_len(unsigned int mtu)
945 {
946 	return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
947 }
948 
949 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
950 {
951 	u32 t;
952 
953 	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
954 	  | (((u32)mac[3]) << 8) | ((u32)mac[2]);
955 
956 	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
957 
958 	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
959 	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
960 }
961 
962 static void ag71xx_fast_reset(struct ag71xx *ag)
963 {
964 	struct net_device *dev = ag->ndev;
965 	u32 rx_ds;
966 	u32 mii_reg;
967 
968 	ag71xx_hw_stop(ag);
969 
970 	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
971 	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
972 
973 	ag71xx_tx_packets(ag, true, 0);
974 
975 	reset_control_assert(ag->mac_reset);
976 	usleep_range(10, 20);
977 	reset_control_deassert(ag->mac_reset);
978 	usleep_range(10, 20);
979 
980 	ag71xx_dma_reset(ag);
981 	ag71xx_hw_setup(ag);
982 	ag->tx_ring.curr = 0;
983 	ag->tx_ring.dirty = 0;
984 	netdev_reset_queue(ag->ndev);
985 
986 	/* setup max frame length */
987 	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
988 		  ag71xx_max_frame_len(ag->ndev->mtu));
989 
990 	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
991 	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
992 	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
993 
994 	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
995 }
996 
997 static void ag71xx_hw_start(struct ag71xx *ag)
998 {
999 	/* start RX engine */
1000 	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1001 
1002 	/* enable interrupts */
1003 	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
1004 
1005 	netif_wake_queue(ag->ndev);
1006 }
1007 
1008 static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
1009 			      const struct phylink_link_state *state)
1010 {
1011 	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1012 
1013 	if (phylink_autoneg_inband(mode))
1014 		return;
1015 
1016 	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1017 		ag71xx_fast_reset(ag);
1018 
1019 	if (ag->tx_ring.desc_split) {
1020 		ag->fifodata[2] &= 0xffff;
1021 		ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1022 	}
1023 
1024 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1025 }
1026 
1027 static void ag71xx_mac_link_down(struct phylink_config *config,
1028 				 unsigned int mode, phy_interface_t interface)
1029 {
1030 	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1031 
1032 	ag71xx_hw_stop(ag);
1033 }
1034 
1035 static void ag71xx_mac_link_up(struct phylink_config *config,
1036 			       struct phy_device *phy,
1037 			       unsigned int mode, phy_interface_t interface,
1038 			       int speed, int duplex,
1039 			       bool tx_pause, bool rx_pause)
1040 {
1041 	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1042 	u32 cfg1, cfg2;
1043 	u32 ifctl;
1044 	u32 fifo5;
1045 
1046 	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1047 	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1048 	cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1049 
1050 	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1051 	ifctl &= ~(MAC_IFCTL_SPEED);
1052 
1053 	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1054 	fifo5 &= ~FIFO_CFG5_BM;
1055 
1056 	switch (speed) {
1057 	case SPEED_1000:
1058 		cfg2 |= MAC_CFG2_IF_1000;
1059 		fifo5 |= FIFO_CFG5_BM;
1060 		break;
1061 	case SPEED_100:
1062 		cfg2 |= MAC_CFG2_IF_10_100;
1063 		ifctl |= MAC_IFCTL_SPEED;
1064 		break;
1065 	case SPEED_10:
1066 		cfg2 |= MAC_CFG2_IF_10_100;
1067 		break;
1068 	default:
1069 		return;
1070 	}
1071 
1072 	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1073 	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1074 	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1075 
1076 	cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1077 	cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1078 	if (tx_pause)
1079 		cfg1 |= MAC_CFG1_TFC;
1080 
1081 	if (rx_pause)
1082 		cfg1 |= MAC_CFG1_RFC;
1083 	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1084 
1085 	ag71xx_hw_start(ag);
1086 }
1087 
1088 static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1089 	.mac_config = ag71xx_mac_config,
1090 	.mac_link_down = ag71xx_mac_link_down,
1091 	.mac_link_up = ag71xx_mac_link_up,
1092 };
1093 
1094 static int ag71xx_phylink_setup(struct ag71xx *ag)
1095 {
1096 	struct phylink *phylink;
1097 
1098 	ag->phylink_config.dev = &ag->ndev->dev;
1099 	ag->phylink_config.type = PHYLINK_NETDEV;
1100 	ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
1101 		MAC_10 | MAC_100 | MAC_1000FD;
1102 
1103 	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1104 	    ag71xx_is(ag, AR9340) ||
1105 	    ag71xx_is(ag, QCA9530) ||
1106 	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1107 		__set_bit(PHY_INTERFACE_MODE_MII,
1108 			  ag->phylink_config.supported_interfaces);
1109 
1110 	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1111 	    (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1112 	    (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1113 		__set_bit(PHY_INTERFACE_MODE_GMII,
1114 			  ag->phylink_config.supported_interfaces);
1115 
1116 	if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1117 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1118 			  ag->phylink_config.supported_interfaces);
1119 
1120 	if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1121 		__set_bit(PHY_INTERFACE_MODE_RMII,
1122 			  ag->phylink_config.supported_interfaces);
1123 
1124 	if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1125 	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1126 		__set_bit(PHY_INTERFACE_MODE_RGMII,
1127 			  ag->phylink_config.supported_interfaces);
1128 
1129 	phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1130 				 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1131 	if (IS_ERR(phylink))
1132 		return PTR_ERR(phylink);
1133 
1134 	ag->phylink = phylink;
1135 	return 0;
1136 }
1137 
1138 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1139 {
1140 	struct ag71xx_ring *ring = &ag->tx_ring;
1141 	int ring_mask = BIT(ring->order) - 1;
1142 	u32 bytes_compl = 0, pkts_compl = 0;
1143 	struct net_device *ndev = ag->ndev;
1144 
1145 	while (ring->curr != ring->dirty) {
1146 		struct ag71xx_desc *desc;
1147 		u32 i = ring->dirty & ring_mask;
1148 
1149 		desc = ag71xx_ring_desc(ring, i);
1150 		if (!ag71xx_desc_empty(desc)) {
1151 			desc->ctrl = 0;
1152 			ndev->stats.tx_errors++;
1153 		}
1154 
1155 		if (ring->buf[i].tx.skb) {
1156 			bytes_compl += ring->buf[i].tx.len;
1157 			pkts_compl++;
1158 			dev_kfree_skb_any(ring->buf[i].tx.skb);
1159 		}
1160 		ring->buf[i].tx.skb = NULL;
1161 		ring->dirty++;
1162 	}
1163 
1164 	/* flush descriptors */
1165 	wmb();
1166 
1167 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1168 }
1169 
1170 static void ag71xx_ring_tx_init(struct ag71xx *ag)
1171 {
1172 	struct ag71xx_ring *ring = &ag->tx_ring;
1173 	int ring_size = BIT(ring->order);
1174 	int ring_mask = ring_size - 1;
1175 	int i;
1176 
1177 	for (i = 0; i < ring_size; i++) {
1178 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1179 
1180 		desc->next = (u32)(ring->descs_dma +
1181 			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1182 
1183 		desc->ctrl = DESC_EMPTY;
1184 		ring->buf[i].tx.skb = NULL;
1185 	}
1186 
1187 	/* flush descriptors */
1188 	wmb();
1189 
1190 	ring->curr = 0;
1191 	ring->dirty = 0;
1192 	netdev_reset_queue(ag->ndev);
1193 }
1194 
1195 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1196 {
1197 	struct ag71xx_ring *ring = &ag->rx_ring;
1198 	int ring_size = BIT(ring->order);
1199 	int i;
1200 
1201 	if (!ring->buf)
1202 		return;
1203 
1204 	for (i = 0; i < ring_size; i++)
1205 		if (ring->buf[i].rx.rx_buf) {
1206 			dma_unmap_single(&ag->pdev->dev,
1207 					 ring->buf[i].rx.dma_addr,
1208 					 ag->rx_buf_size, DMA_FROM_DEVICE);
1209 			skb_free_frag(ring->buf[i].rx.rx_buf);
1210 		}
1211 }
1212 
1213 static int ag71xx_buffer_size(struct ag71xx *ag)
1214 {
1215 	return ag->rx_buf_size +
1216 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1217 }
1218 
1219 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1220 			       int offset,
1221 			       void *(*alloc)(unsigned int size))
1222 {
1223 	struct ag71xx_ring *ring = &ag->rx_ring;
1224 	struct ag71xx_desc *desc;
1225 	void *data;
1226 
1227 	desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1228 
1229 	data = alloc(ag71xx_buffer_size(ag));
1230 	if (!data)
1231 		return false;
1232 
1233 	buf->rx.rx_buf = data;
1234 	buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1235 					  DMA_FROM_DEVICE);
1236 	desc->data = (u32)buf->rx.dma_addr + offset;
1237 	return true;
1238 }
1239 
1240 static int ag71xx_ring_rx_init(struct ag71xx *ag)
1241 {
1242 	struct ag71xx_ring *ring = &ag->rx_ring;
1243 	struct net_device *ndev = ag->ndev;
1244 	int ring_mask = BIT(ring->order) - 1;
1245 	int ring_size = BIT(ring->order);
1246 	unsigned int i;
1247 	int ret;
1248 
1249 	ret = 0;
1250 	for (i = 0; i < ring_size; i++) {
1251 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1252 
1253 		desc->next = (u32)(ring->descs_dma +
1254 			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1255 
1256 		netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1257 			  desc, desc->next);
1258 	}
1259 
1260 	for (i = 0; i < ring_size; i++) {
1261 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1262 
1263 		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1264 					netdev_alloc_frag)) {
1265 			ret = -ENOMEM;
1266 			break;
1267 		}
1268 
1269 		desc->ctrl = DESC_EMPTY;
1270 	}
1271 
1272 	/* flush descriptors */
1273 	wmb();
1274 
1275 	ring->curr = 0;
1276 	ring->dirty = 0;
1277 
1278 	return ret;
1279 }
1280 
1281 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1282 {
1283 	struct ag71xx_ring *ring = &ag->rx_ring;
1284 	int ring_mask = BIT(ring->order) - 1;
1285 	int offset = ag->rx_buf_offset;
1286 	unsigned int count;
1287 
1288 	count = 0;
1289 	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1290 		struct ag71xx_desc *desc;
1291 		unsigned int i;
1292 
1293 		i = ring->dirty & ring_mask;
1294 		desc = ag71xx_ring_desc(ring, i);
1295 
1296 		if (!ring->buf[i].rx.rx_buf &&
1297 		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1298 					napi_alloc_frag))
1299 			break;
1300 
1301 		desc->ctrl = DESC_EMPTY;
1302 		count++;
1303 	}
1304 
1305 	/* flush descriptors */
1306 	wmb();
1307 
1308 	netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1309 		  count);
1310 
1311 	return count;
1312 }
1313 
1314 static int ag71xx_rings_init(struct ag71xx *ag)
1315 {
1316 	struct ag71xx_ring *tx = &ag->tx_ring;
1317 	struct ag71xx_ring *rx = &ag->rx_ring;
1318 	int ring_size, tx_size;
1319 
1320 	ring_size = BIT(tx->order) + BIT(rx->order);
1321 	tx_size = BIT(tx->order);
1322 
1323 	tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1324 	if (!tx->buf)
1325 		return -ENOMEM;
1326 
1327 	tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1328 					   ring_size * AG71XX_DESC_SIZE,
1329 					   &tx->descs_dma, GFP_KERNEL);
1330 	if (!tx->descs_cpu) {
1331 		kfree(tx->buf);
1332 		tx->buf = NULL;
1333 		return -ENOMEM;
1334 	}
1335 
1336 	rx->buf = &tx->buf[tx_size];
1337 	rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1338 	rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1339 
1340 	ag71xx_ring_tx_init(ag);
1341 	return ag71xx_ring_rx_init(ag);
1342 }
1343 
1344 static void ag71xx_rings_free(struct ag71xx *ag)
1345 {
1346 	struct ag71xx_ring *tx = &ag->tx_ring;
1347 	struct ag71xx_ring *rx = &ag->rx_ring;
1348 	int ring_size;
1349 
1350 	ring_size = BIT(tx->order) + BIT(rx->order);
1351 
1352 	if (tx->descs_cpu)
1353 		dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1354 				  tx->descs_cpu, tx->descs_dma);
1355 
1356 	kfree(tx->buf);
1357 
1358 	tx->descs_cpu = NULL;
1359 	rx->descs_cpu = NULL;
1360 	tx->buf = NULL;
1361 	rx->buf = NULL;
1362 }
1363 
1364 static void ag71xx_rings_cleanup(struct ag71xx *ag)
1365 {
1366 	ag71xx_ring_rx_clean(ag);
1367 	ag71xx_ring_tx_clean(ag);
1368 	ag71xx_rings_free(ag);
1369 
1370 	netdev_reset_queue(ag->ndev);
1371 }
1372 
1373 static void ag71xx_hw_init(struct ag71xx *ag)
1374 {
1375 	ag71xx_hw_stop(ag);
1376 
1377 	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1378 	usleep_range(20, 30);
1379 
1380 	reset_control_assert(ag->mac_reset);
1381 	msleep(100);
1382 	reset_control_deassert(ag->mac_reset);
1383 	msleep(200);
1384 
1385 	ag71xx_hw_setup(ag);
1386 
1387 	ag71xx_dma_reset(ag);
1388 }
1389 
1390 static int ag71xx_hw_enable(struct ag71xx *ag)
1391 {
1392 	int ret;
1393 
1394 	ret = ag71xx_rings_init(ag);
1395 	if (ret)
1396 		return ret;
1397 
1398 	napi_enable(&ag->napi);
1399 	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1400 	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1401 	netif_start_queue(ag->ndev);
1402 
1403 	return 0;
1404 }
1405 
1406 static void ag71xx_hw_disable(struct ag71xx *ag)
1407 {
1408 	netif_stop_queue(ag->ndev);
1409 
1410 	ag71xx_hw_stop(ag);
1411 	ag71xx_dma_reset(ag);
1412 
1413 	napi_disable(&ag->napi);
1414 	del_timer_sync(&ag->oom_timer);
1415 
1416 	ag71xx_rings_cleanup(ag);
1417 }
1418 
1419 static int ag71xx_open(struct net_device *ndev)
1420 {
1421 	struct ag71xx *ag = netdev_priv(ndev);
1422 	unsigned int max_frame_len;
1423 	int ret;
1424 
1425 	ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1426 	if (ret) {
1427 		netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1428 			  ret);
1429 		return ret;
1430 	}
1431 
1432 	max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1433 	ag->rx_buf_size =
1434 		SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1435 
1436 	/* setup max frame length */
1437 	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1438 	ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1439 
1440 	ret = ag71xx_hw_enable(ag);
1441 	if (ret)
1442 		goto err;
1443 
1444 	phylink_start(ag->phylink);
1445 
1446 	return 0;
1447 
1448 err:
1449 	ag71xx_rings_cleanup(ag);
1450 	phylink_disconnect_phy(ag->phylink);
1451 	return ret;
1452 }
1453 
1454 static int ag71xx_stop(struct net_device *ndev)
1455 {
1456 	struct ag71xx *ag = netdev_priv(ndev);
1457 
1458 	phylink_stop(ag->phylink);
1459 	phylink_disconnect_phy(ag->phylink);
1460 	ag71xx_hw_disable(ag);
1461 
1462 	return 0;
1463 }
1464 
1465 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1466 {
1467 	int i, ring_mask, ndesc, split;
1468 	struct ag71xx_desc *desc;
1469 
1470 	ring_mask = BIT(ring->order) - 1;
1471 	ndesc = 0;
1472 	split = ring->desc_split;
1473 
1474 	if (!split)
1475 		split = len;
1476 
1477 	while (len > 0) {
1478 		unsigned int cur_len = len;
1479 
1480 		i = (ring->curr + ndesc) & ring_mask;
1481 		desc = ag71xx_ring_desc(ring, i);
1482 
1483 		if (!ag71xx_desc_empty(desc))
1484 			return -1;
1485 
1486 		if (cur_len > split) {
1487 			cur_len = split;
1488 
1489 			/*  TX will hang if DMA transfers <= 4 bytes,
1490 			 * make sure next segment is more than 4 bytes long.
1491 			 */
1492 			if (len <= split + 4)
1493 				cur_len -= 4;
1494 		}
1495 
1496 		desc->data = addr;
1497 		addr += cur_len;
1498 		len -= cur_len;
1499 
1500 		if (len > 0)
1501 			cur_len |= DESC_MORE;
1502 
1503 		/* prevent early tx attempt of this descriptor */
1504 		if (!ndesc)
1505 			cur_len |= DESC_EMPTY;
1506 
1507 		desc->ctrl = cur_len;
1508 		ndesc++;
1509 	}
1510 
1511 	return ndesc;
1512 }
1513 
1514 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1515 					  struct net_device *ndev)
1516 {
1517 	int i, n, ring_min, ring_mask, ring_size;
1518 	struct ag71xx *ag = netdev_priv(ndev);
1519 	struct ag71xx_ring *ring;
1520 	struct ag71xx_desc *desc;
1521 	dma_addr_t dma_addr;
1522 
1523 	ring = &ag->tx_ring;
1524 	ring_mask = BIT(ring->order) - 1;
1525 	ring_size = BIT(ring->order);
1526 
1527 	if (skb->len <= 4) {
1528 		netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1529 		goto err_drop;
1530 	}
1531 
1532 	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1533 				  DMA_TO_DEVICE);
1534 
1535 	i = ring->curr & ring_mask;
1536 	desc = ag71xx_ring_desc(ring, i);
1537 
1538 	/* setup descriptor fields */
1539 	n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1540 				 skb->len & ag->dcfg->desc_pktlen_mask);
1541 	if (n < 0)
1542 		goto err_drop_unmap;
1543 
1544 	i = (ring->curr + n - 1) & ring_mask;
1545 	ring->buf[i].tx.len = skb->len;
1546 	ring->buf[i].tx.skb = skb;
1547 
1548 	netdev_sent_queue(ndev, skb->len);
1549 
1550 	skb_tx_timestamp(skb);
1551 
1552 	desc->ctrl &= ~DESC_EMPTY;
1553 	ring->curr += n;
1554 
1555 	/* flush descriptor */
1556 	wmb();
1557 
1558 	ring_min = 2;
1559 	if (ring->desc_split)
1560 		ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1561 
1562 	if (ring->curr - ring->dirty >= ring_size - ring_min) {
1563 		netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1564 		netif_stop_queue(ndev);
1565 	}
1566 
1567 	netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1568 
1569 	/* enable TX engine */
1570 	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1571 
1572 	return NETDEV_TX_OK;
1573 
1574 err_drop_unmap:
1575 	dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1576 
1577 err_drop:
1578 	ndev->stats.tx_dropped++;
1579 
1580 	dev_kfree_skb(skb);
1581 	return NETDEV_TX_OK;
1582 }
1583 
1584 static void ag71xx_oom_timer_handler(struct timer_list *t)
1585 {
1586 	struct ag71xx *ag = from_timer(ag, t, oom_timer);
1587 
1588 	napi_schedule(&ag->napi);
1589 }
1590 
1591 static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1592 {
1593 	struct ag71xx *ag = netdev_priv(ndev);
1594 
1595 	netif_err(ag, tx_err, ndev, "tx timeout\n");
1596 
1597 	schedule_delayed_work(&ag->restart_work, 1);
1598 }
1599 
1600 static void ag71xx_restart_work_func(struct work_struct *work)
1601 {
1602 	struct ag71xx *ag = container_of(work, struct ag71xx,
1603 					 restart_work.work);
1604 
1605 	rtnl_lock();
1606 	ag71xx_hw_disable(ag);
1607 	ag71xx_hw_enable(ag);
1608 
1609 	phylink_stop(ag->phylink);
1610 	phylink_start(ag->phylink);
1611 
1612 	rtnl_unlock();
1613 }
1614 
1615 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1616 {
1617 	struct net_device *ndev = ag->ndev;
1618 	int ring_mask, ring_size, done = 0;
1619 	unsigned int pktlen_mask, offset;
1620 	struct ag71xx_ring *ring;
1621 	struct list_head rx_list;
1622 	struct sk_buff *skb;
1623 
1624 	ring = &ag->rx_ring;
1625 	pktlen_mask = ag->dcfg->desc_pktlen_mask;
1626 	offset = ag->rx_buf_offset;
1627 	ring_mask = BIT(ring->order) - 1;
1628 	ring_size = BIT(ring->order);
1629 
1630 	netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1631 		  limit, ring->curr, ring->dirty);
1632 
1633 	INIT_LIST_HEAD(&rx_list);
1634 
1635 	while (done < limit) {
1636 		unsigned int i = ring->curr & ring_mask;
1637 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1638 		int pktlen;
1639 		int err = 0;
1640 
1641 		if (ag71xx_desc_empty(desc))
1642 			break;
1643 
1644 		if ((ring->dirty + ring_size) == ring->curr) {
1645 			WARN_ONCE(1, "RX out of ring");
1646 			break;
1647 		}
1648 
1649 		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1650 
1651 		pktlen = desc->ctrl & pktlen_mask;
1652 		pktlen -= ETH_FCS_LEN;
1653 
1654 		dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1655 				 ag->rx_buf_size, DMA_FROM_DEVICE);
1656 
1657 		ndev->stats.rx_packets++;
1658 		ndev->stats.rx_bytes += pktlen;
1659 
1660 		skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1661 		if (!skb) {
1662 			skb_free_frag(ring->buf[i].rx.rx_buf);
1663 			goto next;
1664 		}
1665 
1666 		skb_reserve(skb, offset);
1667 		skb_put(skb, pktlen);
1668 
1669 		if (err) {
1670 			ndev->stats.rx_dropped++;
1671 			kfree_skb(skb);
1672 		} else {
1673 			skb->dev = ndev;
1674 			skb->ip_summed = CHECKSUM_NONE;
1675 			list_add_tail(&skb->list, &rx_list);
1676 		}
1677 
1678 next:
1679 		ring->buf[i].rx.rx_buf = NULL;
1680 		done++;
1681 
1682 		ring->curr++;
1683 	}
1684 
1685 	ag71xx_ring_rx_refill(ag);
1686 
1687 	list_for_each_entry(skb, &rx_list, list)
1688 		skb->protocol = eth_type_trans(skb, ndev);
1689 	netif_receive_skb_list(&rx_list);
1690 
1691 	netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1692 		  ring->curr, ring->dirty, done);
1693 
1694 	return done;
1695 }
1696 
1697 static int ag71xx_poll(struct napi_struct *napi, int limit)
1698 {
1699 	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1700 	struct ag71xx_ring *rx_ring = &ag->rx_ring;
1701 	int rx_ring_size = BIT(rx_ring->order);
1702 	struct net_device *ndev = ag->ndev;
1703 	int tx_done, rx_done;
1704 	u32 status;
1705 
1706 	tx_done = ag71xx_tx_packets(ag, false, limit);
1707 
1708 	netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1709 	rx_done = ag71xx_rx_packets(ag, limit);
1710 
1711 	if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1712 		goto oom;
1713 
1714 	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1715 	if (unlikely(status & RX_STATUS_OF)) {
1716 		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1717 		ndev->stats.rx_fifo_errors++;
1718 
1719 		/* restart RX */
1720 		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1721 	}
1722 
1723 	if (rx_done < limit) {
1724 		if (status & RX_STATUS_PR)
1725 			goto more;
1726 
1727 		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1728 		if (status & TX_STATUS_PS)
1729 			goto more;
1730 
1731 		netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1732 			  rx_done, tx_done, limit);
1733 
1734 		napi_complete(napi);
1735 
1736 		/* enable interrupts */
1737 		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1738 		return rx_done;
1739 	}
1740 
1741 more:
1742 	netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1743 		  rx_done, tx_done, limit);
1744 	return limit;
1745 
1746 oom:
1747 	netif_err(ag, rx_err, ndev, "out of memory\n");
1748 
1749 	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1750 	napi_complete(napi);
1751 	return 0;
1752 }
1753 
1754 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1755 {
1756 	struct net_device *ndev = dev_id;
1757 	struct ag71xx *ag;
1758 	u32 status;
1759 
1760 	ag = netdev_priv(ndev);
1761 	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1762 
1763 	if (unlikely(!status))
1764 		return IRQ_NONE;
1765 
1766 	if (unlikely(status & AG71XX_INT_ERR)) {
1767 		if (status & AG71XX_INT_TX_BE) {
1768 			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1769 			netif_err(ag, intr, ndev, "TX BUS error\n");
1770 		}
1771 		if (status & AG71XX_INT_RX_BE) {
1772 			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1773 			netif_err(ag, intr, ndev, "RX BUS error\n");
1774 		}
1775 	}
1776 
1777 	if (likely(status & AG71XX_INT_POLL)) {
1778 		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1779 		netif_dbg(ag, intr, ndev, "enable polling mode\n");
1780 		napi_schedule(&ag->napi);
1781 	}
1782 
1783 	return IRQ_HANDLED;
1784 }
1785 
1786 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1787 {
1788 	struct ag71xx *ag = netdev_priv(ndev);
1789 
1790 	ndev->mtu = new_mtu;
1791 	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1792 		  ag71xx_max_frame_len(ndev->mtu));
1793 
1794 	return 0;
1795 }
1796 
1797 static const struct net_device_ops ag71xx_netdev_ops = {
1798 	.ndo_open		= ag71xx_open,
1799 	.ndo_stop		= ag71xx_stop,
1800 	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1801 	.ndo_eth_ioctl		= phy_do_ioctl,
1802 	.ndo_tx_timeout		= ag71xx_tx_timeout,
1803 	.ndo_change_mtu		= ag71xx_change_mtu,
1804 	.ndo_set_mac_address	= eth_mac_addr,
1805 	.ndo_validate_addr	= eth_validate_addr,
1806 };
1807 
1808 static const u32 ar71xx_addr_ar7100[] = {
1809 	0x19000000, 0x1a000000,
1810 };
1811 
1812 static int ag71xx_probe(struct platform_device *pdev)
1813 {
1814 	struct device_node *np = pdev->dev.of_node;
1815 	const struct ag71xx_dcfg *dcfg;
1816 	struct net_device *ndev;
1817 	struct resource *res;
1818 	int tx_size, err, i;
1819 	struct ag71xx *ag;
1820 
1821 	if (!np)
1822 		return -ENODEV;
1823 
1824 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1825 	if (!ndev)
1826 		return -ENOMEM;
1827 
1828 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1829 	if (!res)
1830 		return -EINVAL;
1831 
1832 	dcfg = of_device_get_match_data(&pdev->dev);
1833 	if (!dcfg)
1834 		return -EINVAL;
1835 
1836 	ag = netdev_priv(ndev);
1837 	ag->mac_idx = -1;
1838 	for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1839 		if (ar71xx_addr_ar7100[i] == res->start)
1840 			ag->mac_idx = i;
1841 	}
1842 
1843 	if (ag->mac_idx < 0) {
1844 		netif_err(ag, probe, ndev, "unknown mac idx\n");
1845 		return -EINVAL;
1846 	}
1847 
1848 	ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1849 	if (IS_ERR(ag->clk_eth)) {
1850 		netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1851 		return PTR_ERR(ag->clk_eth);
1852 	}
1853 
1854 	SET_NETDEV_DEV(ndev, &pdev->dev);
1855 
1856 	ag->pdev = pdev;
1857 	ag->ndev = ndev;
1858 	ag->dcfg = dcfg;
1859 	ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1860 	memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1861 
1862 	ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1863 	if (IS_ERR(ag->mac_reset)) {
1864 		netif_err(ag, probe, ndev, "missing mac reset\n");
1865 		return PTR_ERR(ag->mac_reset);
1866 	}
1867 
1868 	ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1869 	if (!ag->mac_base)
1870 		return -ENOMEM;
1871 
1872 	ndev->irq = platform_get_irq(pdev, 0);
1873 	err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1874 			       0x0, dev_name(&pdev->dev), ndev);
1875 	if (err) {
1876 		netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1877 			  ndev->irq);
1878 		return err;
1879 	}
1880 
1881 	ndev->netdev_ops = &ag71xx_netdev_ops;
1882 	ndev->ethtool_ops = &ag71xx_ethtool_ops;
1883 
1884 	INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1885 	timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1886 
1887 	tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1888 	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1889 
1890 	ndev->min_mtu = 68;
1891 	ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1892 
1893 	ag->rx_buf_offset = NET_SKB_PAD;
1894 	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1895 		ag->rx_buf_offset += NET_IP_ALIGN;
1896 
1897 	if (ag71xx_is(ag, AR7100)) {
1898 		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1899 		tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1900 	}
1901 	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1902 
1903 	ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1904 					    sizeof(struct ag71xx_desc),
1905 					    &ag->stop_desc_dma, GFP_KERNEL);
1906 	if (!ag->stop_desc)
1907 		return -ENOMEM;
1908 
1909 	ag->stop_desc->data = 0;
1910 	ag->stop_desc->ctrl = 0;
1911 	ag->stop_desc->next = (u32)ag->stop_desc_dma;
1912 
1913 	err = of_get_ethdev_address(np, ndev);
1914 	if (err) {
1915 		netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1916 		eth_hw_addr_random(ndev);
1917 	}
1918 
1919 	err = of_get_phy_mode(np, &ag->phy_if_mode);
1920 	if (err) {
1921 		netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1922 		return err;
1923 	}
1924 
1925 	netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
1926 			      AG71XX_NAPI_WEIGHT);
1927 
1928 	err = clk_prepare_enable(ag->clk_eth);
1929 	if (err) {
1930 		netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1931 		return err;
1932 	}
1933 
1934 	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1935 
1936 	ag71xx_hw_init(ag);
1937 
1938 	err = ag71xx_mdio_probe(ag);
1939 	if (err)
1940 		goto err_put_clk;
1941 
1942 	platform_set_drvdata(pdev, ndev);
1943 
1944 	err = ag71xx_phylink_setup(ag);
1945 	if (err) {
1946 		netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1947 		goto err_mdio_remove;
1948 	}
1949 
1950 	err = register_netdev(ndev);
1951 	if (err) {
1952 		netif_err(ag, probe, ndev, "unable to register net device\n");
1953 		platform_set_drvdata(pdev, NULL);
1954 		goto err_mdio_remove;
1955 	}
1956 
1957 	netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1958 		   (unsigned long)ag->mac_base, ndev->irq,
1959 		   phy_modes(ag->phy_if_mode));
1960 
1961 	return 0;
1962 
1963 err_mdio_remove:
1964 	ag71xx_mdio_remove(ag);
1965 err_put_clk:
1966 	clk_disable_unprepare(ag->clk_eth);
1967 	return err;
1968 }
1969 
1970 static int ag71xx_remove(struct platform_device *pdev)
1971 {
1972 	struct net_device *ndev = platform_get_drvdata(pdev);
1973 	struct ag71xx *ag;
1974 
1975 	if (!ndev)
1976 		return 0;
1977 
1978 	ag = netdev_priv(ndev);
1979 	unregister_netdev(ndev);
1980 	ag71xx_mdio_remove(ag);
1981 	clk_disable_unprepare(ag->clk_eth);
1982 	platform_set_drvdata(pdev, NULL);
1983 
1984 	return 0;
1985 }
1986 
1987 static const u32 ar71xx_fifo_ar7100[] = {
1988 	0x0fff0000, 0x00001fff, 0x00780fff,
1989 };
1990 
1991 static const u32 ar71xx_fifo_ar9130[] = {
1992 	0x0fff0000, 0x00001fff, 0x008001ff,
1993 };
1994 
1995 static const u32 ar71xx_fifo_ar9330[] = {
1996 	0x0010ffff, 0x015500aa, 0x01f00140,
1997 };
1998 
1999 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
2000 	.type = AR7100,
2001 	.fifodata = ar71xx_fifo_ar7100,
2002 	.max_frame_len = 1540,
2003 	.desc_pktlen_mask = SZ_4K - 1,
2004 	.tx_hang_workaround = false,
2005 };
2006 
2007 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
2008 	.type = AR7240,
2009 	.fifodata = ar71xx_fifo_ar7100,
2010 	.max_frame_len = 1540,
2011 	.desc_pktlen_mask = SZ_4K - 1,
2012 	.tx_hang_workaround = true,
2013 };
2014 
2015 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
2016 	.type = AR9130,
2017 	.fifodata = ar71xx_fifo_ar9130,
2018 	.max_frame_len = 1540,
2019 	.desc_pktlen_mask = SZ_4K - 1,
2020 	.tx_hang_workaround = false,
2021 };
2022 
2023 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
2024 	.type = AR9330,
2025 	.fifodata = ar71xx_fifo_ar9330,
2026 	.max_frame_len = 1540,
2027 	.desc_pktlen_mask = SZ_4K - 1,
2028 	.tx_hang_workaround = true,
2029 };
2030 
2031 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
2032 	.type = AR9340,
2033 	.fifodata = ar71xx_fifo_ar9330,
2034 	.max_frame_len = SZ_16K - 1,
2035 	.desc_pktlen_mask = SZ_16K - 1,
2036 	.tx_hang_workaround = true,
2037 };
2038 
2039 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
2040 	.type = QCA9530,
2041 	.fifodata = ar71xx_fifo_ar9330,
2042 	.max_frame_len = SZ_16K - 1,
2043 	.desc_pktlen_mask = SZ_16K - 1,
2044 	.tx_hang_workaround = true,
2045 };
2046 
2047 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
2048 	.type = QCA9550,
2049 	.fifodata = ar71xx_fifo_ar9330,
2050 	.max_frame_len = 1540,
2051 	.desc_pktlen_mask = SZ_16K - 1,
2052 	.tx_hang_workaround = true,
2053 };
2054 
2055 static const struct of_device_id ag71xx_match[] = {
2056 	{ .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2057 	{ .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2058 	{ .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2059 	{ .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2060 	{ .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2061 	{ .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2062 	{ .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2063 	{ .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2064 	{ .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2065 	{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2066 	{}
2067 };
2068 
2069 static struct platform_driver ag71xx_driver = {
2070 	.probe		= ag71xx_probe,
2071 	.remove		= ag71xx_remove,
2072 	.driver = {
2073 		.name	= "ag71xx",
2074 		.of_match_table = ag71xx_match,
2075 	}
2076 };
2077 
2078 module_platform_driver(ag71xx_driver);
2079 MODULE_LICENSE("GPL v2");
2080