xref: /openbmc/linux/drivers/net/ethernet/atheros/ag71xx.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0
2  /*  Atheros AR71xx built-in ethernet mac driver
3   *
4   *  Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
5   *
6   *  List of authors contributed to this driver before mainlining:
7   *  Alexander Couzens <lynxis@fe80.eu>
8   *  Christian Lamparter <chunkeey@gmail.com>
9   *  Chuanhong Guo <gch981213@gmail.com>
10   *  Daniel F. Dickinson <cshored@thecshore.com>
11   *  David Bauer <mail@david-bauer.net>
12   *  Felix Fietkau <nbd@nbd.name>
13   *  Gabor Juhos <juhosg@freemail.hu>
14   *  Hauke Mehrtens <hauke@hauke-m.de>
15   *  Johann Neuhauser <johann@it-neuhauser.de>
16   *  John Crispin <john@phrozen.org>
17   *  Jo-Philipp Wich <jo@mein.io>
18   *  Koen Vandeputte <koen.vandeputte@ncentric.com>
19   *  Lucian Cristian <lucian.cristian@gmail.com>
20   *  Matt Merhar <mattmerhar@protonmail.com>
21   *  Milan Krstic <milan.krstic@gmail.com>
22   *  Petr Štetiar <ynezz@true.cz>
23   *  Rosen Penev <rosenp@gmail.com>
24   *  Stephen Walker <stephendwalker+github@gmail.com>
25   *  Vittorio Gambaletta <openwrt@vittgam.net>
26   *  Weijie Gao <hackpascal@gmail.com>
27   *  Imre Kaloz <kaloz@openwrt.org>
28   */
29  
30  #include <linux/if_vlan.h>
31  #include <linux/mfd/syscon.h>
32  #include <linux/of.h>
33  #include <linux/of_mdio.h>
34  #include <linux/of_net.h>
35  #include <linux/platform_device.h>
36  #include <linux/phylink.h>
37  #include <linux/regmap.h>
38  #include <linux/reset.h>
39  #include <linux/clk.h>
40  #include <linux/io.h>
41  #include <net/selftests.h>
42  
43  /* For our NAPI weight bigger does *NOT* mean better - it means more
44   * D-cache misses and lots more wasted cycles than we'll ever
45   * possibly gain from saving instructions.
46   */
47  #define AG71XX_NAPI_WEIGHT	32
48  #define AG71XX_OOM_REFILL	(1 + HZ / 10)
49  
50  #define AG71XX_INT_ERR	(AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
51  #define AG71XX_INT_TX	(AG71XX_INT_TX_PS)
52  #define AG71XX_INT_RX	(AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
53  
54  #define AG71XX_INT_POLL	(AG71XX_INT_RX | AG71XX_INT_TX)
55  #define AG71XX_INT_INIT	(AG71XX_INT_ERR | AG71XX_INT_POLL)
56  
57  #define AG71XX_TX_MTU_LEN	1540
58  
59  #define AG71XX_TX_RING_SPLIT		512
60  #define AG71XX_TX_RING_DS_PER_PKT	DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
61  						     AG71XX_TX_RING_SPLIT)
62  #define AG71XX_TX_RING_SIZE_DEFAULT	128
63  #define AG71XX_RX_RING_SIZE_DEFAULT	256
64  
65  #define AG71XX_MDIO_RETRY	1000
66  #define AG71XX_MDIO_DELAY	5
67  #define AG71XX_MDIO_MAX_CLK	5000000
68  
69  /* Register offsets */
70  #define AG71XX_REG_MAC_CFG1	0x0000
71  #define MAC_CFG1_TXE		BIT(0)	/* Tx Enable */
72  #define MAC_CFG1_STX		BIT(1)	/* Synchronize Tx Enable */
73  #define MAC_CFG1_RXE		BIT(2)	/* Rx Enable */
74  #define MAC_CFG1_SRX		BIT(3)	/* Synchronize Rx Enable */
75  #define MAC_CFG1_TFC		BIT(4)	/* Tx Flow Control Enable */
76  #define MAC_CFG1_RFC		BIT(5)	/* Rx Flow Control Enable */
77  #define MAC_CFG1_SR		BIT(31)	/* Soft Reset */
78  #define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
79  			 MAC_CFG1_SRX | MAC_CFG1_STX)
80  
81  #define AG71XX_REG_MAC_CFG2	0x0004
82  #define MAC_CFG2_FDX		BIT(0)
83  #define MAC_CFG2_PAD_CRC_EN	BIT(2)
84  #define MAC_CFG2_LEN_CHECK	BIT(4)
85  #define MAC_CFG2_IF_1000	BIT(9)
86  #define MAC_CFG2_IF_10_100	BIT(8)
87  
88  #define AG71XX_REG_MAC_MFL	0x0010
89  
90  #define AG71XX_REG_MII_CFG	0x0020
91  #define MII_CFG_CLK_DIV_4	0
92  #define MII_CFG_CLK_DIV_6	2
93  #define MII_CFG_CLK_DIV_8	3
94  #define MII_CFG_CLK_DIV_10	4
95  #define MII_CFG_CLK_DIV_14	5
96  #define MII_CFG_CLK_DIV_20	6
97  #define MII_CFG_CLK_DIV_28	7
98  #define MII_CFG_CLK_DIV_34	8
99  #define MII_CFG_CLK_DIV_42	9
100  #define MII_CFG_CLK_DIV_50	10
101  #define MII_CFG_CLK_DIV_58	11
102  #define MII_CFG_CLK_DIV_66	12
103  #define MII_CFG_CLK_DIV_74	13
104  #define MII_CFG_CLK_DIV_82	14
105  #define MII_CFG_CLK_DIV_98	15
106  #define MII_CFG_RESET		BIT(31)
107  
108  #define AG71XX_REG_MII_CMD	0x0024
109  #define MII_CMD_READ		BIT(0)
110  
111  #define AG71XX_REG_MII_ADDR	0x0028
112  #define MII_ADDR_SHIFT		8
113  
114  #define AG71XX_REG_MII_CTRL	0x002c
115  #define AG71XX_REG_MII_STATUS	0x0030
116  #define AG71XX_REG_MII_IND	0x0034
117  #define MII_IND_BUSY		BIT(0)
118  #define MII_IND_INVALID		BIT(2)
119  
120  #define AG71XX_REG_MAC_IFCTL	0x0038
121  #define MAC_IFCTL_SPEED		BIT(16)
122  
123  #define AG71XX_REG_MAC_ADDR1	0x0040
124  #define AG71XX_REG_MAC_ADDR2	0x0044
125  #define AG71XX_REG_FIFO_CFG0	0x0048
126  #define FIFO_CFG0_WTM		BIT(0)	/* Watermark Module */
127  #define FIFO_CFG0_RXS		BIT(1)	/* Rx System Module */
128  #define FIFO_CFG0_RXF		BIT(2)	/* Rx Fabric Module */
129  #define FIFO_CFG0_TXS		BIT(3)	/* Tx System Module */
130  #define FIFO_CFG0_TXF		BIT(4)	/* Tx Fabric Module */
131  #define FIFO_CFG0_ALL	(FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
132  			| FIFO_CFG0_TXS | FIFO_CFG0_TXF)
133  #define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
134  
135  #define FIFO_CFG0_ENABLE_SHIFT	8
136  
137  #define AG71XX_REG_FIFO_CFG1	0x004c
138  #define AG71XX_REG_FIFO_CFG2	0x0050
139  #define AG71XX_REG_FIFO_CFG3	0x0054
140  #define AG71XX_REG_FIFO_CFG4	0x0058
141  #define FIFO_CFG4_DE		BIT(0)	/* Drop Event */
142  #define FIFO_CFG4_DV		BIT(1)	/* RX_DV Event */
143  #define FIFO_CFG4_FC		BIT(2)	/* False Carrier */
144  #define FIFO_CFG4_CE		BIT(3)	/* Code Error */
145  #define FIFO_CFG4_CR		BIT(4)	/* CRC error */
146  #define FIFO_CFG4_LM		BIT(5)	/* Length Mismatch */
147  #define FIFO_CFG4_LO		BIT(6)	/* Length out of range */
148  #define FIFO_CFG4_OK		BIT(7)	/* Packet is OK */
149  #define FIFO_CFG4_MC		BIT(8)	/* Multicast Packet */
150  #define FIFO_CFG4_BC		BIT(9)	/* Broadcast Packet */
151  #define FIFO_CFG4_DR		BIT(10)	/* Dribble */
152  #define FIFO_CFG4_LE		BIT(11)	/* Long Event */
153  #define FIFO_CFG4_CF		BIT(12)	/* Control Frame */
154  #define FIFO_CFG4_PF		BIT(13)	/* Pause Frame */
155  #define FIFO_CFG4_UO		BIT(14)	/* Unsupported Opcode */
156  #define FIFO_CFG4_VT		BIT(15)	/* VLAN tag detected */
157  #define FIFO_CFG4_FT		BIT(16)	/* Frame Truncated */
158  #define FIFO_CFG4_UC		BIT(17)	/* Unicast Packet */
159  #define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
160  			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
161  			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
162  			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
163  			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
164  			 FIFO_CFG4_VT)
165  
166  #define AG71XX_REG_FIFO_CFG5	0x005c
167  #define FIFO_CFG5_DE		BIT(0)	/* Drop Event */
168  #define FIFO_CFG5_DV		BIT(1)	/* RX_DV Event */
169  #define FIFO_CFG5_FC		BIT(2)	/* False Carrier */
170  #define FIFO_CFG5_CE		BIT(3)	/* Code Error */
171  #define FIFO_CFG5_LM		BIT(4)	/* Length Mismatch */
172  #define FIFO_CFG5_LO		BIT(5)	/* Length Out of Range */
173  #define FIFO_CFG5_OK		BIT(6)	/* Packet is OK */
174  #define FIFO_CFG5_MC		BIT(7)	/* Multicast Packet */
175  #define FIFO_CFG5_BC		BIT(8)	/* Broadcast Packet */
176  #define FIFO_CFG5_DR		BIT(9)	/* Dribble */
177  #define FIFO_CFG5_CF		BIT(10)	/* Control Frame */
178  #define FIFO_CFG5_PF		BIT(11)	/* Pause Frame */
179  #define FIFO_CFG5_UO		BIT(12)	/* Unsupported Opcode */
180  #define FIFO_CFG5_VT		BIT(13)	/* VLAN tag detected */
181  #define FIFO_CFG5_LE		BIT(14)	/* Long Event */
182  #define FIFO_CFG5_FT		BIT(15)	/* Frame Truncated */
183  #define FIFO_CFG5_16		BIT(16)	/* unknown */
184  #define FIFO_CFG5_17		BIT(17)	/* unknown */
185  #define FIFO_CFG5_SF		BIT(18)	/* Short Frame */
186  #define FIFO_CFG5_BM		BIT(19)	/* Byte Mode */
187  #define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
188  			 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
189  			 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
190  			 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
191  			 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
192  			 FIFO_CFG5_17 | FIFO_CFG5_SF)
193  
194  #define AG71XX_REG_TX_CTRL	0x0180
195  #define TX_CTRL_TXE		BIT(0)	/* Tx Enable */
196  
197  #define AG71XX_REG_TX_DESC	0x0184
198  #define AG71XX_REG_TX_STATUS	0x0188
199  #define TX_STATUS_PS		BIT(0)	/* Packet Sent */
200  #define TX_STATUS_UR		BIT(1)	/* Tx Underrun */
201  #define TX_STATUS_BE		BIT(3)	/* Bus Error */
202  
203  #define AG71XX_REG_RX_CTRL	0x018c
204  #define RX_CTRL_RXE		BIT(0)	/* Rx Enable */
205  
206  #define AG71XX_DMA_RETRY	10
207  #define AG71XX_DMA_DELAY	1
208  
209  #define AG71XX_REG_RX_DESC	0x0190
210  #define AG71XX_REG_RX_STATUS	0x0194
211  #define RX_STATUS_PR		BIT(0)	/* Packet Received */
212  #define RX_STATUS_OF		BIT(2)	/* Rx Overflow */
213  #define RX_STATUS_BE		BIT(3)	/* Bus Error */
214  
215  #define AG71XX_REG_INT_ENABLE	0x0198
216  #define AG71XX_REG_INT_STATUS	0x019c
217  #define AG71XX_INT_TX_PS	BIT(0)
218  #define AG71XX_INT_TX_UR	BIT(1)
219  #define AG71XX_INT_TX_BE	BIT(3)
220  #define AG71XX_INT_RX_PR	BIT(4)
221  #define AG71XX_INT_RX_OF	BIT(6)
222  #define AG71XX_INT_RX_BE	BIT(7)
223  
224  #define AG71XX_REG_FIFO_DEPTH	0x01a8
225  #define AG71XX_REG_RX_SM	0x01b0
226  #define AG71XX_REG_TX_SM	0x01b4
227  
228  #define AG71XX_DEFAULT_MSG_ENABLE	\
229  	(NETIF_MSG_DRV			\
230  	| NETIF_MSG_PROBE		\
231  	| NETIF_MSG_LINK		\
232  	| NETIF_MSG_TIMER		\
233  	| NETIF_MSG_IFDOWN		\
234  	| NETIF_MSG_IFUP		\
235  	| NETIF_MSG_RX_ERR		\
236  	| NETIF_MSG_TX_ERR)
237  
238  struct ag71xx_statistic {
239  	unsigned short offset;
240  	u32 mask;
241  	const char name[ETH_GSTRING_LEN];
242  };
243  
244  static const struct ag71xx_statistic ag71xx_statistics[] = {
245  	{ 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
246  	{ 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
247  	{ 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
248  	{ 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
249  	{ 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
250  	{ 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
251  	{ 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
252  	{ 0x009C, GENMASK(23, 0), "Rx Byte", },
253  	{ 0x00A0, GENMASK(17, 0), "Rx Packet", },
254  	{ 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
255  	{ 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
256  	{ 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
257  	{ 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
258  	{ 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
259  	{ 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
260  	{ 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
261  	{ 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
262  	{ 0x00C4, GENMASK(11, 0), "Rx Code Error", },
263  	{ 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
264  	{ 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
265  	{ 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
266  	{ 0x00D4, GENMASK(11, 0), "Rx Fragments", },
267  	{ 0x00D8, GENMASK(11, 0), "Rx Jabber", },
268  	{ 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
269  	{ 0x00E0, GENMASK(23, 0), "Tx Byte", },
270  	{ 0x00E4, GENMASK(17, 0), "Tx Packet", },
271  	{ 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
272  	{ 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
273  	{ 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
274  	{ 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
275  	{ 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
276  	{ 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
277  	{ 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
278  	{ 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
279  	{ 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
280  	{ 0x010C, GENMASK(12, 0), "Tx Total Collision", },
281  	{ 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
282  	{ 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
283  	{ 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
284  	{ 0x011C, GENMASK(11, 0), "Tx FCS Error", },
285  	{ 0x0120, GENMASK(11, 0), "Tx Control Frame", },
286  	{ 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
287  	{ 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
288  	{ 0x012C, GENMASK(11, 0), "Tx Fragment", },
289  };
290  
291  #define DESC_EMPTY		BIT(31)
292  #define DESC_MORE		BIT(24)
293  #define DESC_PKTLEN_M		0xfff
294  struct ag71xx_desc {
295  	u32 data;
296  	u32 ctrl;
297  	u32 next;
298  	u32 pad;
299  } __aligned(4);
300  
301  #define AG71XX_DESC_SIZE	roundup(sizeof(struct ag71xx_desc), \
302  					L1_CACHE_BYTES)
303  
304  struct ag71xx_buf {
305  	union {
306  		struct {
307  			struct sk_buff *skb;
308  			unsigned int len;
309  		} tx;
310  		struct {
311  			dma_addr_t dma_addr;
312  			void *rx_buf;
313  		} rx;
314  	};
315  };
316  
317  struct ag71xx_ring {
318  	/* "Hot" fields in the data path. */
319  	unsigned int curr;
320  	unsigned int dirty;
321  
322  	/* "Cold" fields - not used in the data path. */
323  	struct ag71xx_buf *buf;
324  	u16 order;
325  	u16 desc_split;
326  	dma_addr_t descs_dma;
327  	u8 *descs_cpu;
328  };
329  
330  enum ag71xx_type {
331  	AR7100,
332  	AR7240,
333  	AR9130,
334  	AR9330,
335  	AR9340,
336  	QCA9530,
337  	QCA9550,
338  };
339  
340  struct ag71xx_dcfg {
341  	u32 max_frame_len;
342  	const u32 *fifodata;
343  	u16 desc_pktlen_mask;
344  	bool tx_hang_workaround;
345  	enum ag71xx_type type;
346  };
347  
348  struct ag71xx {
349  	/* Critical data related to the per-packet data path are clustered
350  	 * early in this structure to help improve the D-cache footprint.
351  	 */
352  	struct ag71xx_ring rx_ring ____cacheline_aligned;
353  	struct ag71xx_ring tx_ring ____cacheline_aligned;
354  
355  	u16 rx_buf_size;
356  	u8 rx_buf_offset;
357  
358  	struct net_device *ndev;
359  	struct platform_device *pdev;
360  	struct napi_struct napi;
361  	u32 msg_enable;
362  	const struct ag71xx_dcfg *dcfg;
363  
364  	/* From this point onwards we're not looking at per-packet fields. */
365  	void __iomem *mac_base;
366  
367  	struct ag71xx_desc *stop_desc;
368  	dma_addr_t stop_desc_dma;
369  
370  	phy_interface_t phy_if_mode;
371  	struct phylink *phylink;
372  	struct phylink_config phylink_config;
373  
374  	struct delayed_work restart_work;
375  	struct timer_list oom_timer;
376  
377  	struct reset_control *mac_reset;
378  
379  	u32 fifodata[3];
380  	int mac_idx;
381  
382  	struct reset_control *mdio_reset;
383  	struct mii_bus *mii_bus;
384  	struct clk *clk_mdio;
385  	struct clk *clk_eth;
386  };
387  
ag71xx_desc_empty(struct ag71xx_desc * desc)388  static int ag71xx_desc_empty(struct ag71xx_desc *desc)
389  {
390  	return (desc->ctrl & DESC_EMPTY) != 0;
391  }
392  
ag71xx_ring_desc(struct ag71xx_ring * ring,int idx)393  static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
394  {
395  	return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
396  }
397  
ag71xx_ring_size_order(int size)398  static int ag71xx_ring_size_order(int size)
399  {
400  	return fls(size - 1);
401  }
402  
ag71xx_is(struct ag71xx * ag,enum ag71xx_type type)403  static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
404  {
405  	return ag->dcfg->type == type;
406  }
407  
ag71xx_wr(struct ag71xx * ag,unsigned int reg,u32 value)408  static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
409  {
410  	iowrite32(value, ag->mac_base + reg);
411  	/* flush write */
412  	(void)ioread32(ag->mac_base + reg);
413  }
414  
ag71xx_rr(struct ag71xx * ag,unsigned int reg)415  static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
416  {
417  	return ioread32(ag->mac_base + reg);
418  }
419  
ag71xx_sb(struct ag71xx * ag,unsigned int reg,u32 mask)420  static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
421  {
422  	void __iomem *r;
423  
424  	r = ag->mac_base + reg;
425  	iowrite32(ioread32(r) | mask, r);
426  	/* flush write */
427  	(void)ioread32(r);
428  }
429  
ag71xx_cb(struct ag71xx * ag,unsigned int reg,u32 mask)430  static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
431  {
432  	void __iomem *r;
433  
434  	r = ag->mac_base + reg;
435  	iowrite32(ioread32(r) & ~mask, r);
436  	/* flush write */
437  	(void)ioread32(r);
438  }
439  
ag71xx_int_enable(struct ag71xx * ag,u32 ints)440  static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
441  {
442  	ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
443  }
444  
ag71xx_int_disable(struct ag71xx * ag,u32 ints)445  static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
446  {
447  	ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
448  }
449  
ag71xx_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)450  static void ag71xx_get_drvinfo(struct net_device *ndev,
451  			       struct ethtool_drvinfo *info)
452  {
453  	struct ag71xx *ag = netdev_priv(ndev);
454  
455  	strscpy(info->driver, "ag71xx", sizeof(info->driver));
456  	strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
457  		sizeof(info->bus_info));
458  }
459  
ag71xx_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * kset)460  static int ag71xx_get_link_ksettings(struct net_device *ndev,
461  				   struct ethtool_link_ksettings *kset)
462  {
463  	struct ag71xx *ag = netdev_priv(ndev);
464  
465  	return phylink_ethtool_ksettings_get(ag->phylink, kset);
466  }
467  
ag71xx_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * kset)468  static int ag71xx_set_link_ksettings(struct net_device *ndev,
469  				   const struct ethtool_link_ksettings *kset)
470  {
471  	struct ag71xx *ag = netdev_priv(ndev);
472  
473  	return phylink_ethtool_ksettings_set(ag->phylink, kset);
474  }
475  
ag71xx_ethtool_nway_reset(struct net_device * ndev)476  static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
477  {
478  	struct ag71xx *ag = netdev_priv(ndev);
479  
480  	return phylink_ethtool_nway_reset(ag->phylink);
481  }
482  
ag71xx_ethtool_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)483  static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
484  					  struct ethtool_pauseparam *pause)
485  {
486  	struct ag71xx *ag = netdev_priv(ndev);
487  
488  	phylink_ethtool_get_pauseparam(ag->phylink, pause);
489  }
490  
ag71xx_ethtool_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)491  static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
492  					 struct ethtool_pauseparam *pause)
493  {
494  	struct ag71xx *ag = netdev_priv(ndev);
495  
496  	return phylink_ethtool_set_pauseparam(ag->phylink, pause);
497  }
498  
ag71xx_ethtool_get_strings(struct net_device * netdev,u32 sset,u8 * data)499  static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
500  				       u8 *data)
501  {
502  	int i;
503  
504  	switch (sset) {
505  	case ETH_SS_STATS:
506  		for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
507  			memcpy(data + i * ETH_GSTRING_LEN,
508  			       ag71xx_statistics[i].name, ETH_GSTRING_LEN);
509  		break;
510  	case ETH_SS_TEST:
511  		net_selftest_get_strings(data);
512  		break;
513  	}
514  }
515  
ag71xx_ethtool_get_stats(struct net_device * ndev,struct ethtool_stats * stats,u64 * data)516  static void ag71xx_ethtool_get_stats(struct net_device *ndev,
517  				     struct ethtool_stats *stats, u64 *data)
518  {
519  	struct ag71xx *ag = netdev_priv(ndev);
520  	int i;
521  
522  	for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
523  		*data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
524  				& ag71xx_statistics[i].mask;
525  }
526  
ag71xx_ethtool_get_sset_count(struct net_device * ndev,int sset)527  static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
528  {
529  	switch (sset) {
530  	case ETH_SS_STATS:
531  		return ARRAY_SIZE(ag71xx_statistics);
532  	case ETH_SS_TEST:
533  		return net_selftest_get_count();
534  	default:
535  		return -EOPNOTSUPP;
536  	}
537  }
538  
539  static const struct ethtool_ops ag71xx_ethtool_ops = {
540  	.get_drvinfo			= ag71xx_get_drvinfo,
541  	.get_link			= ethtool_op_get_link,
542  	.get_ts_info			= ethtool_op_get_ts_info,
543  	.get_link_ksettings		= ag71xx_get_link_ksettings,
544  	.set_link_ksettings		= ag71xx_set_link_ksettings,
545  	.nway_reset			= ag71xx_ethtool_nway_reset,
546  	.get_pauseparam			= ag71xx_ethtool_get_pauseparam,
547  	.set_pauseparam			= ag71xx_ethtool_set_pauseparam,
548  	.get_strings			= ag71xx_ethtool_get_strings,
549  	.get_ethtool_stats		= ag71xx_ethtool_get_stats,
550  	.get_sset_count			= ag71xx_ethtool_get_sset_count,
551  	.self_test			= net_selftest,
552  };
553  
ag71xx_mdio_wait_busy(struct ag71xx * ag)554  static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
555  {
556  	struct net_device *ndev = ag->ndev;
557  	int i;
558  
559  	for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
560  		u32 busy;
561  
562  		udelay(AG71XX_MDIO_DELAY);
563  
564  		busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
565  		if (!busy)
566  			return 0;
567  
568  		udelay(AG71XX_MDIO_DELAY);
569  	}
570  
571  	netif_err(ag, link, ndev, "MDIO operation timed out\n");
572  
573  	return -ETIMEDOUT;
574  }
575  
ag71xx_mdio_mii_read(struct mii_bus * bus,int addr,int reg)576  static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
577  {
578  	struct ag71xx *ag = bus->priv;
579  	int err, val;
580  
581  	err = ag71xx_mdio_wait_busy(ag);
582  	if (err)
583  		return err;
584  
585  	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
586  		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
587  	/* enable read mode */
588  	ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
589  
590  	err = ag71xx_mdio_wait_busy(ag);
591  	if (err)
592  		return err;
593  
594  	val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
595  	/* disable read mode */
596  	ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
597  
598  	netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
599  		  addr, reg, val);
600  
601  	return val;
602  }
603  
ag71xx_mdio_mii_write(struct mii_bus * bus,int addr,int reg,u16 val)604  static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
605  				 u16 val)
606  {
607  	struct ag71xx *ag = bus->priv;
608  
609  	netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
610  		  addr, reg, val);
611  
612  	ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
613  		  ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
614  	ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
615  
616  	return ag71xx_mdio_wait_busy(ag);
617  }
618  
619  static const u32 ar71xx_mdio_div_table[] = {
620  	4, 4, 6, 8, 10, 14, 20, 28,
621  };
622  
623  static const u32 ar7240_mdio_div_table[] = {
624  	2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
625  };
626  
627  static const u32 ar933x_mdio_div_table[] = {
628  	4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
629  };
630  
ag71xx_mdio_get_divider(struct ag71xx * ag,u32 * div)631  static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
632  {
633  	unsigned long ref_clock;
634  	const u32 *table;
635  	int ndivs, i;
636  
637  	ref_clock = clk_get_rate(ag->clk_mdio);
638  	if (!ref_clock)
639  		return -EINVAL;
640  
641  	if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
642  		table = ar933x_mdio_div_table;
643  		ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
644  	} else if (ag71xx_is(ag, AR7240)) {
645  		table = ar7240_mdio_div_table;
646  		ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
647  	} else {
648  		table = ar71xx_mdio_div_table;
649  		ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
650  	}
651  
652  	for (i = 0; i < ndivs; i++) {
653  		unsigned long t;
654  
655  		t = ref_clock / table[i];
656  		if (t <= AG71XX_MDIO_MAX_CLK) {
657  			*div = i;
658  			return 0;
659  		}
660  	}
661  
662  	return -ENOENT;
663  }
664  
ag71xx_mdio_reset(struct mii_bus * bus)665  static int ag71xx_mdio_reset(struct mii_bus *bus)
666  {
667  	struct ag71xx *ag = bus->priv;
668  	int err;
669  	u32 t;
670  
671  	err = ag71xx_mdio_get_divider(ag, &t);
672  	if (err)
673  		return err;
674  
675  	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
676  	usleep_range(100, 200);
677  
678  	ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
679  	usleep_range(100, 200);
680  
681  	return 0;
682  }
683  
ag71xx_mdio_probe(struct ag71xx * ag)684  static int ag71xx_mdio_probe(struct ag71xx *ag)
685  {
686  	struct device *dev = &ag->pdev->dev;
687  	struct net_device *ndev = ag->ndev;
688  	static struct mii_bus *mii_bus;
689  	struct device_node *np, *mnp;
690  	int err;
691  
692  	np = dev->of_node;
693  	ag->mii_bus = NULL;
694  
695  	ag->clk_mdio = devm_clk_get(dev, "mdio");
696  	if (IS_ERR(ag->clk_mdio)) {
697  		netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
698  		return PTR_ERR(ag->clk_mdio);
699  	}
700  
701  	err = clk_prepare_enable(ag->clk_mdio);
702  	if (err) {
703  		netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
704  		return err;
705  	}
706  
707  	mii_bus = devm_mdiobus_alloc(dev);
708  	if (!mii_bus) {
709  		err = -ENOMEM;
710  		goto mdio_err_put_clk;
711  	}
712  
713  	ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
714  	if (IS_ERR(ag->mdio_reset)) {
715  		netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
716  		err = PTR_ERR(ag->mdio_reset);
717  		goto mdio_err_put_clk;
718  	}
719  
720  	mii_bus->name = "ag71xx_mdio";
721  	mii_bus->read = ag71xx_mdio_mii_read;
722  	mii_bus->write = ag71xx_mdio_mii_write;
723  	mii_bus->reset = ag71xx_mdio_reset;
724  	mii_bus->priv = ag;
725  	mii_bus->parent = dev;
726  	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
727  
728  	if (!IS_ERR(ag->mdio_reset)) {
729  		reset_control_assert(ag->mdio_reset);
730  		msleep(100);
731  		reset_control_deassert(ag->mdio_reset);
732  		msleep(200);
733  	}
734  
735  	mnp = of_get_child_by_name(np, "mdio");
736  	err = of_mdiobus_register(mii_bus, mnp);
737  	of_node_put(mnp);
738  	if (err)
739  		goto mdio_err_put_clk;
740  
741  	ag->mii_bus = mii_bus;
742  
743  	return 0;
744  
745  mdio_err_put_clk:
746  	clk_disable_unprepare(ag->clk_mdio);
747  	return err;
748  }
749  
ag71xx_mdio_remove(struct ag71xx * ag)750  static void ag71xx_mdio_remove(struct ag71xx *ag)
751  {
752  	if (ag->mii_bus)
753  		mdiobus_unregister(ag->mii_bus);
754  	clk_disable_unprepare(ag->clk_mdio);
755  }
756  
ag71xx_hw_stop(struct ag71xx * ag)757  static void ag71xx_hw_stop(struct ag71xx *ag)
758  {
759  	/* disable all interrupts and stop the rx/tx engine */
760  	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
761  	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
762  	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
763  }
764  
ag71xx_check_dma_stuck(struct ag71xx * ag)765  static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
766  {
767  	unsigned long timestamp;
768  	u32 rx_sm, tx_sm, rx_fd;
769  
770  	timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
771  	if (likely(time_before(jiffies, timestamp + HZ / 10)))
772  		return false;
773  
774  	if (!netif_carrier_ok(ag->ndev))
775  		return false;
776  
777  	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
778  	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
779  		return true;
780  
781  	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
782  	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
783  	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
784  	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
785  		return true;
786  
787  	return false;
788  }
789  
ag71xx_tx_packets(struct ag71xx * ag,bool flush,int budget)790  static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
791  {
792  	struct ag71xx_ring *ring = &ag->tx_ring;
793  	int sent = 0, bytes_compl = 0, n = 0;
794  	struct net_device *ndev = ag->ndev;
795  	int ring_mask, ring_size;
796  	bool dma_stuck = false;
797  
798  	ring_mask = BIT(ring->order) - 1;
799  	ring_size = BIT(ring->order);
800  
801  	netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
802  
803  	while (ring->dirty + n != ring->curr) {
804  		struct ag71xx_desc *desc;
805  		struct sk_buff *skb;
806  		unsigned int i;
807  
808  		i = (ring->dirty + n) & ring_mask;
809  		desc = ag71xx_ring_desc(ring, i);
810  		skb = ring->buf[i].tx.skb;
811  
812  		if (!flush && !ag71xx_desc_empty(desc)) {
813  			if (ag->dcfg->tx_hang_workaround &&
814  			    ag71xx_check_dma_stuck(ag)) {
815  				schedule_delayed_work(&ag->restart_work,
816  						      HZ / 2);
817  				dma_stuck = true;
818  			}
819  			break;
820  		}
821  
822  		if (flush)
823  			desc->ctrl |= DESC_EMPTY;
824  
825  		n++;
826  		if (!skb)
827  			continue;
828  
829  		napi_consume_skb(skb, budget);
830  		ring->buf[i].tx.skb = NULL;
831  
832  		bytes_compl += ring->buf[i].tx.len;
833  
834  		sent++;
835  		ring->dirty += n;
836  
837  		while (n > 0) {
838  			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
839  			n--;
840  		}
841  	}
842  
843  	netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
844  
845  	if (!sent)
846  		return 0;
847  
848  	ag->ndev->stats.tx_bytes += bytes_compl;
849  	ag->ndev->stats.tx_packets += sent;
850  
851  	netdev_completed_queue(ag->ndev, sent, bytes_compl);
852  	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
853  		netif_wake_queue(ag->ndev);
854  
855  	if (!dma_stuck)
856  		cancel_delayed_work(&ag->restart_work);
857  
858  	return sent;
859  }
860  
ag71xx_dma_wait_stop(struct ag71xx * ag)861  static void ag71xx_dma_wait_stop(struct ag71xx *ag)
862  {
863  	struct net_device *ndev = ag->ndev;
864  	int i;
865  
866  	for (i = 0; i < AG71XX_DMA_RETRY; i++) {
867  		u32 rx, tx;
868  
869  		mdelay(AG71XX_DMA_DELAY);
870  
871  		rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
872  		tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
873  		if (!rx && !tx)
874  			return;
875  	}
876  
877  	netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
878  }
879  
ag71xx_dma_reset(struct ag71xx * ag)880  static void ag71xx_dma_reset(struct ag71xx *ag)
881  {
882  	struct net_device *ndev = ag->ndev;
883  	u32 val;
884  	int i;
885  
886  	/* stop RX and TX */
887  	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
888  	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
889  
890  	/* give the hardware some time to really stop all rx/tx activity
891  	 * clearing the descriptors too early causes random memory corruption
892  	 */
893  	ag71xx_dma_wait_stop(ag);
894  
895  	/* clear descriptor addresses */
896  	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
897  	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
898  
899  	/* clear pending RX/TX interrupts */
900  	for (i = 0; i < 256; i++) {
901  		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
902  		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
903  	}
904  
905  	/* clear pending errors */
906  	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
907  	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
908  
909  	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
910  	if (val)
911  		netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
912  			  val);
913  
914  	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
915  
916  	/* mask out reserved bits */
917  	val &= ~0xff000000;
918  
919  	if (val)
920  		netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
921  			  val);
922  }
923  
ag71xx_hw_setup(struct ag71xx * ag)924  static void ag71xx_hw_setup(struct ag71xx *ag)
925  {
926  	u32 init = MAC_CFG1_INIT;
927  
928  	/* setup MAC configuration registers */
929  	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
930  
931  	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
932  		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
933  
934  	/* setup max frame length to zero */
935  	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
936  
937  	/* setup FIFO configuration registers */
938  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
939  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
940  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
941  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
942  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
943  }
944  
ag71xx_max_frame_len(unsigned int mtu)945  static unsigned int ag71xx_max_frame_len(unsigned int mtu)
946  {
947  	return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
948  }
949  
ag71xx_hw_set_macaddr(struct ag71xx * ag,const unsigned char * mac)950  static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
951  {
952  	u32 t;
953  
954  	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
955  	  | (((u32)mac[3]) << 8) | ((u32)mac[2]);
956  
957  	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
958  
959  	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
960  	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
961  }
962  
ag71xx_fast_reset(struct ag71xx * ag)963  static void ag71xx_fast_reset(struct ag71xx *ag)
964  {
965  	struct net_device *dev = ag->ndev;
966  	u32 rx_ds;
967  	u32 mii_reg;
968  
969  	ag71xx_hw_stop(ag);
970  
971  	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
972  	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
973  
974  	ag71xx_tx_packets(ag, true, 0);
975  
976  	reset_control_assert(ag->mac_reset);
977  	usleep_range(10, 20);
978  	reset_control_deassert(ag->mac_reset);
979  	usleep_range(10, 20);
980  
981  	ag71xx_dma_reset(ag);
982  	ag71xx_hw_setup(ag);
983  	ag->tx_ring.curr = 0;
984  	ag->tx_ring.dirty = 0;
985  	netdev_reset_queue(ag->ndev);
986  
987  	/* setup max frame length */
988  	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
989  		  ag71xx_max_frame_len(ag->ndev->mtu));
990  
991  	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
992  	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
993  	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
994  
995  	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
996  }
997  
ag71xx_hw_start(struct ag71xx * ag)998  static void ag71xx_hw_start(struct ag71xx *ag)
999  {
1000  	/* start RX engine */
1001  	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1002  
1003  	/* enable interrupts */
1004  	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
1005  
1006  	netif_wake_queue(ag->ndev);
1007  }
1008  
ag71xx_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1009  static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
1010  			      const struct phylink_link_state *state)
1011  {
1012  	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1013  
1014  	if (phylink_autoneg_inband(mode))
1015  		return;
1016  
1017  	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1018  		ag71xx_fast_reset(ag);
1019  
1020  	if (ag->tx_ring.desc_split) {
1021  		ag->fifodata[2] &= 0xffff;
1022  		ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1023  	}
1024  
1025  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1026  }
1027  
ag71xx_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1028  static void ag71xx_mac_link_down(struct phylink_config *config,
1029  				 unsigned int mode, phy_interface_t interface)
1030  {
1031  	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1032  
1033  	ag71xx_hw_stop(ag);
1034  }
1035  
ag71xx_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1036  static void ag71xx_mac_link_up(struct phylink_config *config,
1037  			       struct phy_device *phy,
1038  			       unsigned int mode, phy_interface_t interface,
1039  			       int speed, int duplex,
1040  			       bool tx_pause, bool rx_pause)
1041  {
1042  	struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1043  	u32 cfg1, cfg2;
1044  	u32 ifctl;
1045  	u32 fifo5;
1046  
1047  	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1048  	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1049  	cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1050  
1051  	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1052  	ifctl &= ~(MAC_IFCTL_SPEED);
1053  
1054  	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1055  	fifo5 &= ~FIFO_CFG5_BM;
1056  
1057  	switch (speed) {
1058  	case SPEED_1000:
1059  		cfg2 |= MAC_CFG2_IF_1000;
1060  		fifo5 |= FIFO_CFG5_BM;
1061  		break;
1062  	case SPEED_100:
1063  		cfg2 |= MAC_CFG2_IF_10_100;
1064  		ifctl |= MAC_IFCTL_SPEED;
1065  		break;
1066  	case SPEED_10:
1067  		cfg2 |= MAC_CFG2_IF_10_100;
1068  		break;
1069  	default:
1070  		return;
1071  	}
1072  
1073  	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1074  	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1075  	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1076  
1077  	cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1078  	cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1079  	if (tx_pause)
1080  		cfg1 |= MAC_CFG1_TFC;
1081  
1082  	if (rx_pause)
1083  		cfg1 |= MAC_CFG1_RFC;
1084  	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1085  
1086  	ag71xx_hw_start(ag);
1087  }
1088  
1089  static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1090  	.mac_config = ag71xx_mac_config,
1091  	.mac_link_down = ag71xx_mac_link_down,
1092  	.mac_link_up = ag71xx_mac_link_up,
1093  };
1094  
ag71xx_phylink_setup(struct ag71xx * ag)1095  static int ag71xx_phylink_setup(struct ag71xx *ag)
1096  {
1097  	struct phylink *phylink;
1098  
1099  	ag->phylink_config.dev = &ag->ndev->dev;
1100  	ag->phylink_config.type = PHYLINK_NETDEV;
1101  	ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
1102  		MAC_10 | MAC_100 | MAC_1000FD;
1103  
1104  	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1105  	    ag71xx_is(ag, AR9340) ||
1106  	    ag71xx_is(ag, QCA9530) ||
1107  	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1108  		__set_bit(PHY_INTERFACE_MODE_MII,
1109  			  ag->phylink_config.supported_interfaces);
1110  
1111  	if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1112  	    (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1113  	    (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1114  		__set_bit(PHY_INTERFACE_MODE_GMII,
1115  			  ag->phylink_config.supported_interfaces);
1116  
1117  	if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1118  		__set_bit(PHY_INTERFACE_MODE_SGMII,
1119  			  ag->phylink_config.supported_interfaces);
1120  
1121  	if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1122  		__set_bit(PHY_INTERFACE_MODE_RMII,
1123  			  ag->phylink_config.supported_interfaces);
1124  
1125  	if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1126  	    (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1127  		__set_bit(PHY_INTERFACE_MODE_RGMII,
1128  			  ag->phylink_config.supported_interfaces);
1129  
1130  	phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1131  				 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1132  	if (IS_ERR(phylink))
1133  		return PTR_ERR(phylink);
1134  
1135  	ag->phylink = phylink;
1136  	return 0;
1137  }
1138  
ag71xx_ring_tx_clean(struct ag71xx * ag)1139  static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1140  {
1141  	struct ag71xx_ring *ring = &ag->tx_ring;
1142  	int ring_mask = BIT(ring->order) - 1;
1143  	u32 bytes_compl = 0, pkts_compl = 0;
1144  	struct net_device *ndev = ag->ndev;
1145  
1146  	while (ring->curr != ring->dirty) {
1147  		struct ag71xx_desc *desc;
1148  		u32 i = ring->dirty & ring_mask;
1149  
1150  		desc = ag71xx_ring_desc(ring, i);
1151  		if (!ag71xx_desc_empty(desc)) {
1152  			desc->ctrl = 0;
1153  			ndev->stats.tx_errors++;
1154  		}
1155  
1156  		if (ring->buf[i].tx.skb) {
1157  			bytes_compl += ring->buf[i].tx.len;
1158  			pkts_compl++;
1159  			dev_kfree_skb_any(ring->buf[i].tx.skb);
1160  		}
1161  		ring->buf[i].tx.skb = NULL;
1162  		ring->dirty++;
1163  	}
1164  
1165  	/* flush descriptors */
1166  	wmb();
1167  
1168  	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1169  }
1170  
ag71xx_ring_tx_init(struct ag71xx * ag)1171  static void ag71xx_ring_tx_init(struct ag71xx *ag)
1172  {
1173  	struct ag71xx_ring *ring = &ag->tx_ring;
1174  	int ring_size = BIT(ring->order);
1175  	int ring_mask = ring_size - 1;
1176  	int i;
1177  
1178  	for (i = 0; i < ring_size; i++) {
1179  		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1180  
1181  		desc->next = (u32)(ring->descs_dma +
1182  			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1183  
1184  		desc->ctrl = DESC_EMPTY;
1185  		ring->buf[i].tx.skb = NULL;
1186  	}
1187  
1188  	/* flush descriptors */
1189  	wmb();
1190  
1191  	ring->curr = 0;
1192  	ring->dirty = 0;
1193  	netdev_reset_queue(ag->ndev);
1194  }
1195  
ag71xx_ring_rx_clean(struct ag71xx * ag)1196  static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1197  {
1198  	struct ag71xx_ring *ring = &ag->rx_ring;
1199  	int ring_size = BIT(ring->order);
1200  	int i;
1201  
1202  	if (!ring->buf)
1203  		return;
1204  
1205  	for (i = 0; i < ring_size; i++)
1206  		if (ring->buf[i].rx.rx_buf) {
1207  			dma_unmap_single(&ag->pdev->dev,
1208  					 ring->buf[i].rx.dma_addr,
1209  					 ag->rx_buf_size, DMA_FROM_DEVICE);
1210  			skb_free_frag(ring->buf[i].rx.rx_buf);
1211  		}
1212  }
1213  
ag71xx_buffer_size(struct ag71xx * ag)1214  static int ag71xx_buffer_size(struct ag71xx *ag)
1215  {
1216  	return ag->rx_buf_size +
1217  	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1218  }
1219  
ag71xx_fill_rx_buf(struct ag71xx * ag,struct ag71xx_buf * buf,int offset,void * (* alloc)(unsigned int size))1220  static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1221  			       int offset,
1222  			       void *(*alloc)(unsigned int size))
1223  {
1224  	struct ag71xx_ring *ring = &ag->rx_ring;
1225  	struct ag71xx_desc *desc;
1226  	void *data;
1227  
1228  	desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1229  
1230  	data = alloc(ag71xx_buffer_size(ag));
1231  	if (!data)
1232  		return false;
1233  
1234  	buf->rx.rx_buf = data;
1235  	buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1236  					  DMA_FROM_DEVICE);
1237  	desc->data = (u32)buf->rx.dma_addr + offset;
1238  	return true;
1239  }
1240  
ag71xx_ring_rx_init(struct ag71xx * ag)1241  static int ag71xx_ring_rx_init(struct ag71xx *ag)
1242  {
1243  	struct ag71xx_ring *ring = &ag->rx_ring;
1244  	struct net_device *ndev = ag->ndev;
1245  	int ring_mask = BIT(ring->order) - 1;
1246  	int ring_size = BIT(ring->order);
1247  	unsigned int i;
1248  	int ret;
1249  
1250  	ret = 0;
1251  	for (i = 0; i < ring_size; i++) {
1252  		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1253  
1254  		desc->next = (u32)(ring->descs_dma +
1255  			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1256  
1257  		netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1258  			  desc, desc->next);
1259  	}
1260  
1261  	for (i = 0; i < ring_size; i++) {
1262  		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1263  
1264  		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1265  					netdev_alloc_frag)) {
1266  			ret = -ENOMEM;
1267  			break;
1268  		}
1269  
1270  		desc->ctrl = DESC_EMPTY;
1271  	}
1272  
1273  	/* flush descriptors */
1274  	wmb();
1275  
1276  	ring->curr = 0;
1277  	ring->dirty = 0;
1278  
1279  	return ret;
1280  }
1281  
ag71xx_ring_rx_refill(struct ag71xx * ag)1282  static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1283  {
1284  	struct ag71xx_ring *ring = &ag->rx_ring;
1285  	int ring_mask = BIT(ring->order) - 1;
1286  	int offset = ag->rx_buf_offset;
1287  	unsigned int count;
1288  
1289  	count = 0;
1290  	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1291  		struct ag71xx_desc *desc;
1292  		unsigned int i;
1293  
1294  		i = ring->dirty & ring_mask;
1295  		desc = ag71xx_ring_desc(ring, i);
1296  
1297  		if (!ring->buf[i].rx.rx_buf &&
1298  		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1299  					napi_alloc_frag))
1300  			break;
1301  
1302  		desc->ctrl = DESC_EMPTY;
1303  		count++;
1304  	}
1305  
1306  	/* flush descriptors */
1307  	wmb();
1308  
1309  	netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1310  		  count);
1311  
1312  	return count;
1313  }
1314  
ag71xx_rings_init(struct ag71xx * ag)1315  static int ag71xx_rings_init(struct ag71xx *ag)
1316  {
1317  	struct ag71xx_ring *tx = &ag->tx_ring;
1318  	struct ag71xx_ring *rx = &ag->rx_ring;
1319  	int ring_size, tx_size;
1320  
1321  	ring_size = BIT(tx->order) + BIT(rx->order);
1322  	tx_size = BIT(tx->order);
1323  
1324  	tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1325  	if (!tx->buf)
1326  		return -ENOMEM;
1327  
1328  	tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1329  					   ring_size * AG71XX_DESC_SIZE,
1330  					   &tx->descs_dma, GFP_KERNEL);
1331  	if (!tx->descs_cpu) {
1332  		kfree(tx->buf);
1333  		tx->buf = NULL;
1334  		return -ENOMEM;
1335  	}
1336  
1337  	rx->buf = &tx->buf[tx_size];
1338  	rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1339  	rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1340  
1341  	ag71xx_ring_tx_init(ag);
1342  	return ag71xx_ring_rx_init(ag);
1343  }
1344  
ag71xx_rings_free(struct ag71xx * ag)1345  static void ag71xx_rings_free(struct ag71xx *ag)
1346  {
1347  	struct ag71xx_ring *tx = &ag->tx_ring;
1348  	struct ag71xx_ring *rx = &ag->rx_ring;
1349  	int ring_size;
1350  
1351  	ring_size = BIT(tx->order) + BIT(rx->order);
1352  
1353  	if (tx->descs_cpu)
1354  		dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1355  				  tx->descs_cpu, tx->descs_dma);
1356  
1357  	kfree(tx->buf);
1358  
1359  	tx->descs_cpu = NULL;
1360  	rx->descs_cpu = NULL;
1361  	tx->buf = NULL;
1362  	rx->buf = NULL;
1363  }
1364  
ag71xx_rings_cleanup(struct ag71xx * ag)1365  static void ag71xx_rings_cleanup(struct ag71xx *ag)
1366  {
1367  	ag71xx_ring_rx_clean(ag);
1368  	ag71xx_ring_tx_clean(ag);
1369  	ag71xx_rings_free(ag);
1370  
1371  	netdev_reset_queue(ag->ndev);
1372  }
1373  
ag71xx_hw_init(struct ag71xx * ag)1374  static void ag71xx_hw_init(struct ag71xx *ag)
1375  {
1376  	ag71xx_hw_stop(ag);
1377  
1378  	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1379  	usleep_range(20, 30);
1380  
1381  	reset_control_assert(ag->mac_reset);
1382  	msleep(100);
1383  	reset_control_deassert(ag->mac_reset);
1384  	msleep(200);
1385  
1386  	ag71xx_hw_setup(ag);
1387  
1388  	ag71xx_dma_reset(ag);
1389  }
1390  
ag71xx_hw_enable(struct ag71xx * ag)1391  static int ag71xx_hw_enable(struct ag71xx *ag)
1392  {
1393  	int ret;
1394  
1395  	ret = ag71xx_rings_init(ag);
1396  	if (ret)
1397  		return ret;
1398  
1399  	napi_enable(&ag->napi);
1400  	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1401  	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1402  	netif_start_queue(ag->ndev);
1403  
1404  	return 0;
1405  }
1406  
ag71xx_hw_disable(struct ag71xx * ag)1407  static void ag71xx_hw_disable(struct ag71xx *ag)
1408  {
1409  	netif_stop_queue(ag->ndev);
1410  
1411  	ag71xx_hw_stop(ag);
1412  	ag71xx_dma_reset(ag);
1413  
1414  	napi_disable(&ag->napi);
1415  	del_timer_sync(&ag->oom_timer);
1416  
1417  	ag71xx_rings_cleanup(ag);
1418  }
1419  
ag71xx_open(struct net_device * ndev)1420  static int ag71xx_open(struct net_device *ndev)
1421  {
1422  	struct ag71xx *ag = netdev_priv(ndev);
1423  	unsigned int max_frame_len;
1424  	int ret;
1425  
1426  	ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1427  	if (ret) {
1428  		netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1429  			  ret);
1430  		return ret;
1431  	}
1432  
1433  	max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1434  	ag->rx_buf_size =
1435  		SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1436  
1437  	/* setup max frame length */
1438  	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1439  	ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1440  
1441  	ret = ag71xx_hw_enable(ag);
1442  	if (ret)
1443  		goto err;
1444  
1445  	phylink_start(ag->phylink);
1446  
1447  	return 0;
1448  
1449  err:
1450  	ag71xx_rings_cleanup(ag);
1451  	phylink_disconnect_phy(ag->phylink);
1452  	return ret;
1453  }
1454  
ag71xx_stop(struct net_device * ndev)1455  static int ag71xx_stop(struct net_device *ndev)
1456  {
1457  	struct ag71xx *ag = netdev_priv(ndev);
1458  
1459  	phylink_stop(ag->phylink);
1460  	phylink_disconnect_phy(ag->phylink);
1461  	ag71xx_hw_disable(ag);
1462  
1463  	return 0;
1464  }
1465  
ag71xx_fill_dma_desc(struct ag71xx_ring * ring,u32 addr,int len)1466  static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1467  {
1468  	int i, ring_mask, ndesc, split;
1469  	struct ag71xx_desc *desc;
1470  
1471  	ring_mask = BIT(ring->order) - 1;
1472  	ndesc = 0;
1473  	split = ring->desc_split;
1474  
1475  	if (!split)
1476  		split = len;
1477  
1478  	while (len > 0) {
1479  		unsigned int cur_len = len;
1480  
1481  		i = (ring->curr + ndesc) & ring_mask;
1482  		desc = ag71xx_ring_desc(ring, i);
1483  
1484  		if (!ag71xx_desc_empty(desc))
1485  			return -1;
1486  
1487  		if (cur_len > split) {
1488  			cur_len = split;
1489  
1490  			/*  TX will hang if DMA transfers <= 4 bytes,
1491  			 * make sure next segment is more than 4 bytes long.
1492  			 */
1493  			if (len <= split + 4)
1494  				cur_len -= 4;
1495  		}
1496  
1497  		desc->data = addr;
1498  		addr += cur_len;
1499  		len -= cur_len;
1500  
1501  		if (len > 0)
1502  			cur_len |= DESC_MORE;
1503  
1504  		/* prevent early tx attempt of this descriptor */
1505  		if (!ndesc)
1506  			cur_len |= DESC_EMPTY;
1507  
1508  		desc->ctrl = cur_len;
1509  		ndesc++;
1510  	}
1511  
1512  	return ndesc;
1513  }
1514  
ag71xx_hard_start_xmit(struct sk_buff * skb,struct net_device * ndev)1515  static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1516  					  struct net_device *ndev)
1517  {
1518  	int i, n, ring_min, ring_mask, ring_size;
1519  	struct ag71xx *ag = netdev_priv(ndev);
1520  	struct ag71xx_ring *ring;
1521  	struct ag71xx_desc *desc;
1522  	dma_addr_t dma_addr;
1523  
1524  	ring = &ag->tx_ring;
1525  	ring_mask = BIT(ring->order) - 1;
1526  	ring_size = BIT(ring->order);
1527  
1528  	if (skb->len <= 4) {
1529  		netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1530  		goto err_drop;
1531  	}
1532  
1533  	dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1534  				  DMA_TO_DEVICE);
1535  
1536  	i = ring->curr & ring_mask;
1537  	desc = ag71xx_ring_desc(ring, i);
1538  
1539  	/* setup descriptor fields */
1540  	n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1541  				 skb->len & ag->dcfg->desc_pktlen_mask);
1542  	if (n < 0)
1543  		goto err_drop_unmap;
1544  
1545  	i = (ring->curr + n - 1) & ring_mask;
1546  	ring->buf[i].tx.len = skb->len;
1547  	ring->buf[i].tx.skb = skb;
1548  
1549  	netdev_sent_queue(ndev, skb->len);
1550  
1551  	skb_tx_timestamp(skb);
1552  
1553  	desc->ctrl &= ~DESC_EMPTY;
1554  	ring->curr += n;
1555  
1556  	/* flush descriptor */
1557  	wmb();
1558  
1559  	ring_min = 2;
1560  	if (ring->desc_split)
1561  		ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1562  
1563  	if (ring->curr - ring->dirty >= ring_size - ring_min) {
1564  		netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1565  		netif_stop_queue(ndev);
1566  	}
1567  
1568  	netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1569  
1570  	/* enable TX engine */
1571  	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1572  
1573  	return NETDEV_TX_OK;
1574  
1575  err_drop_unmap:
1576  	dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1577  
1578  err_drop:
1579  	ndev->stats.tx_dropped++;
1580  
1581  	dev_kfree_skb(skb);
1582  	return NETDEV_TX_OK;
1583  }
1584  
ag71xx_oom_timer_handler(struct timer_list * t)1585  static void ag71xx_oom_timer_handler(struct timer_list *t)
1586  {
1587  	struct ag71xx *ag = from_timer(ag, t, oom_timer);
1588  
1589  	napi_schedule(&ag->napi);
1590  }
1591  
ag71xx_tx_timeout(struct net_device * ndev,unsigned int txqueue)1592  static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1593  {
1594  	struct ag71xx *ag = netdev_priv(ndev);
1595  
1596  	netif_err(ag, tx_err, ndev, "tx timeout\n");
1597  
1598  	schedule_delayed_work(&ag->restart_work, 1);
1599  }
1600  
ag71xx_restart_work_func(struct work_struct * work)1601  static void ag71xx_restart_work_func(struct work_struct *work)
1602  {
1603  	struct ag71xx *ag = container_of(work, struct ag71xx,
1604  					 restart_work.work);
1605  
1606  	rtnl_lock();
1607  	ag71xx_hw_disable(ag);
1608  	ag71xx_hw_enable(ag);
1609  
1610  	phylink_stop(ag->phylink);
1611  	phylink_start(ag->phylink);
1612  
1613  	rtnl_unlock();
1614  }
1615  
ag71xx_rx_packets(struct ag71xx * ag,int limit)1616  static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1617  {
1618  	struct net_device *ndev = ag->ndev;
1619  	int ring_mask, ring_size, done = 0;
1620  	unsigned int pktlen_mask, offset;
1621  	struct ag71xx_ring *ring;
1622  	struct list_head rx_list;
1623  	struct sk_buff *skb;
1624  
1625  	ring = &ag->rx_ring;
1626  	pktlen_mask = ag->dcfg->desc_pktlen_mask;
1627  	offset = ag->rx_buf_offset;
1628  	ring_mask = BIT(ring->order) - 1;
1629  	ring_size = BIT(ring->order);
1630  
1631  	netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1632  		  limit, ring->curr, ring->dirty);
1633  
1634  	INIT_LIST_HEAD(&rx_list);
1635  
1636  	while (done < limit) {
1637  		unsigned int i = ring->curr & ring_mask;
1638  		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1639  		int pktlen;
1640  		int err = 0;
1641  
1642  		if (ag71xx_desc_empty(desc))
1643  			break;
1644  
1645  		if ((ring->dirty + ring_size) == ring->curr) {
1646  			WARN_ONCE(1, "RX out of ring");
1647  			break;
1648  		}
1649  
1650  		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1651  
1652  		pktlen = desc->ctrl & pktlen_mask;
1653  		pktlen -= ETH_FCS_LEN;
1654  
1655  		dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1656  				 ag->rx_buf_size, DMA_FROM_DEVICE);
1657  
1658  		ndev->stats.rx_packets++;
1659  		ndev->stats.rx_bytes += pktlen;
1660  
1661  		skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1662  		if (!skb) {
1663  			skb_free_frag(ring->buf[i].rx.rx_buf);
1664  			goto next;
1665  		}
1666  
1667  		skb_reserve(skb, offset);
1668  		skb_put(skb, pktlen);
1669  
1670  		if (err) {
1671  			ndev->stats.rx_dropped++;
1672  			kfree_skb(skb);
1673  		} else {
1674  			skb->dev = ndev;
1675  			skb->ip_summed = CHECKSUM_NONE;
1676  			list_add_tail(&skb->list, &rx_list);
1677  		}
1678  
1679  next:
1680  		ring->buf[i].rx.rx_buf = NULL;
1681  		done++;
1682  
1683  		ring->curr++;
1684  	}
1685  
1686  	ag71xx_ring_rx_refill(ag);
1687  
1688  	list_for_each_entry(skb, &rx_list, list)
1689  		skb->protocol = eth_type_trans(skb, ndev);
1690  	netif_receive_skb_list(&rx_list);
1691  
1692  	netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1693  		  ring->curr, ring->dirty, done);
1694  
1695  	return done;
1696  }
1697  
ag71xx_poll(struct napi_struct * napi,int limit)1698  static int ag71xx_poll(struct napi_struct *napi, int limit)
1699  {
1700  	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1701  	struct ag71xx_ring *rx_ring = &ag->rx_ring;
1702  	int rx_ring_size = BIT(rx_ring->order);
1703  	struct net_device *ndev = ag->ndev;
1704  	int tx_done, rx_done;
1705  	u32 status;
1706  
1707  	tx_done = ag71xx_tx_packets(ag, false, limit);
1708  
1709  	netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1710  	rx_done = ag71xx_rx_packets(ag, limit);
1711  
1712  	if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1713  		goto oom;
1714  
1715  	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1716  	if (unlikely(status & RX_STATUS_OF)) {
1717  		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1718  		ndev->stats.rx_fifo_errors++;
1719  
1720  		/* restart RX */
1721  		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1722  	}
1723  
1724  	if (rx_done < limit) {
1725  		if (status & RX_STATUS_PR)
1726  			goto more;
1727  
1728  		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1729  		if (status & TX_STATUS_PS)
1730  			goto more;
1731  
1732  		netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1733  			  rx_done, tx_done, limit);
1734  
1735  		napi_complete(napi);
1736  
1737  		/* enable interrupts */
1738  		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1739  		return rx_done;
1740  	}
1741  
1742  more:
1743  	netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1744  		  rx_done, tx_done, limit);
1745  	return limit;
1746  
1747  oom:
1748  	netif_err(ag, rx_err, ndev, "out of memory\n");
1749  
1750  	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1751  	napi_complete(napi);
1752  	return 0;
1753  }
1754  
ag71xx_interrupt(int irq,void * dev_id)1755  static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1756  {
1757  	struct net_device *ndev = dev_id;
1758  	struct ag71xx *ag;
1759  	u32 status;
1760  
1761  	ag = netdev_priv(ndev);
1762  	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1763  
1764  	if (unlikely(!status))
1765  		return IRQ_NONE;
1766  
1767  	if (unlikely(status & AG71XX_INT_ERR)) {
1768  		if (status & AG71XX_INT_TX_BE) {
1769  			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1770  			netif_err(ag, intr, ndev, "TX BUS error\n");
1771  		}
1772  		if (status & AG71XX_INT_RX_BE) {
1773  			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1774  			netif_err(ag, intr, ndev, "RX BUS error\n");
1775  		}
1776  	}
1777  
1778  	if (likely(status & AG71XX_INT_POLL)) {
1779  		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1780  		netif_dbg(ag, intr, ndev, "enable polling mode\n");
1781  		napi_schedule(&ag->napi);
1782  	}
1783  
1784  	return IRQ_HANDLED;
1785  }
1786  
ag71xx_change_mtu(struct net_device * ndev,int new_mtu)1787  static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1788  {
1789  	struct ag71xx *ag = netdev_priv(ndev);
1790  
1791  	ndev->mtu = new_mtu;
1792  	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1793  		  ag71xx_max_frame_len(ndev->mtu));
1794  
1795  	return 0;
1796  }
1797  
1798  static const struct net_device_ops ag71xx_netdev_ops = {
1799  	.ndo_open		= ag71xx_open,
1800  	.ndo_stop		= ag71xx_stop,
1801  	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1802  	.ndo_eth_ioctl		= phy_do_ioctl,
1803  	.ndo_tx_timeout		= ag71xx_tx_timeout,
1804  	.ndo_change_mtu		= ag71xx_change_mtu,
1805  	.ndo_set_mac_address	= eth_mac_addr,
1806  	.ndo_validate_addr	= eth_validate_addr,
1807  };
1808  
1809  static const u32 ar71xx_addr_ar7100[] = {
1810  	0x19000000, 0x1a000000,
1811  };
1812  
ag71xx_probe(struct platform_device * pdev)1813  static int ag71xx_probe(struct platform_device *pdev)
1814  {
1815  	struct device_node *np = pdev->dev.of_node;
1816  	const struct ag71xx_dcfg *dcfg;
1817  	struct net_device *ndev;
1818  	struct resource *res;
1819  	int tx_size, err, i;
1820  	struct ag71xx *ag;
1821  
1822  	if (!np)
1823  		return -ENODEV;
1824  
1825  	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1826  	if (!ndev)
1827  		return -ENOMEM;
1828  
1829  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1830  	if (!res)
1831  		return -EINVAL;
1832  
1833  	dcfg = of_device_get_match_data(&pdev->dev);
1834  	if (!dcfg)
1835  		return -EINVAL;
1836  
1837  	ag = netdev_priv(ndev);
1838  	ag->mac_idx = -1;
1839  	for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1840  		if (ar71xx_addr_ar7100[i] == res->start)
1841  			ag->mac_idx = i;
1842  	}
1843  
1844  	if (ag->mac_idx < 0) {
1845  		netif_err(ag, probe, ndev, "unknown mac idx\n");
1846  		return -EINVAL;
1847  	}
1848  
1849  	ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1850  	if (IS_ERR(ag->clk_eth)) {
1851  		netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1852  		return PTR_ERR(ag->clk_eth);
1853  	}
1854  
1855  	SET_NETDEV_DEV(ndev, &pdev->dev);
1856  
1857  	ag->pdev = pdev;
1858  	ag->ndev = ndev;
1859  	ag->dcfg = dcfg;
1860  	ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1861  	memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1862  
1863  	ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1864  	if (IS_ERR(ag->mac_reset)) {
1865  		netif_err(ag, probe, ndev, "missing mac reset\n");
1866  		return PTR_ERR(ag->mac_reset);
1867  	}
1868  
1869  	ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1870  	if (!ag->mac_base)
1871  		return -ENOMEM;
1872  
1873  	ndev->irq = platform_get_irq(pdev, 0);
1874  	err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1875  			       0x0, dev_name(&pdev->dev), ndev);
1876  	if (err) {
1877  		netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1878  			  ndev->irq);
1879  		return err;
1880  	}
1881  
1882  	ndev->netdev_ops = &ag71xx_netdev_ops;
1883  	ndev->ethtool_ops = &ag71xx_ethtool_ops;
1884  
1885  	INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1886  	timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1887  
1888  	tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1889  	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1890  
1891  	ndev->min_mtu = 68;
1892  	ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1893  
1894  	ag->rx_buf_offset = NET_SKB_PAD;
1895  	if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1896  		ag->rx_buf_offset += NET_IP_ALIGN;
1897  
1898  	if (ag71xx_is(ag, AR7100)) {
1899  		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1900  		tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1901  	}
1902  	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1903  
1904  	ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1905  					    sizeof(struct ag71xx_desc),
1906  					    &ag->stop_desc_dma, GFP_KERNEL);
1907  	if (!ag->stop_desc)
1908  		return -ENOMEM;
1909  
1910  	ag->stop_desc->data = 0;
1911  	ag->stop_desc->ctrl = 0;
1912  	ag->stop_desc->next = (u32)ag->stop_desc_dma;
1913  
1914  	err = of_get_ethdev_address(np, ndev);
1915  	if (err) {
1916  		netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1917  		eth_hw_addr_random(ndev);
1918  	}
1919  
1920  	err = of_get_phy_mode(np, &ag->phy_if_mode);
1921  	if (err) {
1922  		netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1923  		return err;
1924  	}
1925  
1926  	netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
1927  			      AG71XX_NAPI_WEIGHT);
1928  
1929  	err = clk_prepare_enable(ag->clk_eth);
1930  	if (err) {
1931  		netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1932  		return err;
1933  	}
1934  
1935  	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1936  
1937  	ag71xx_hw_init(ag);
1938  
1939  	err = ag71xx_mdio_probe(ag);
1940  	if (err)
1941  		goto err_put_clk;
1942  
1943  	platform_set_drvdata(pdev, ndev);
1944  
1945  	err = ag71xx_phylink_setup(ag);
1946  	if (err) {
1947  		netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1948  		goto err_mdio_remove;
1949  	}
1950  
1951  	err = register_netdev(ndev);
1952  	if (err) {
1953  		netif_err(ag, probe, ndev, "unable to register net device\n");
1954  		platform_set_drvdata(pdev, NULL);
1955  		goto err_mdio_remove;
1956  	}
1957  
1958  	netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1959  		   (unsigned long)ag->mac_base, ndev->irq,
1960  		   phy_modes(ag->phy_if_mode));
1961  
1962  	return 0;
1963  
1964  err_mdio_remove:
1965  	ag71xx_mdio_remove(ag);
1966  err_put_clk:
1967  	clk_disable_unprepare(ag->clk_eth);
1968  	return err;
1969  }
1970  
ag71xx_remove(struct platform_device * pdev)1971  static int ag71xx_remove(struct platform_device *pdev)
1972  {
1973  	struct net_device *ndev = platform_get_drvdata(pdev);
1974  	struct ag71xx *ag;
1975  
1976  	if (!ndev)
1977  		return 0;
1978  
1979  	ag = netdev_priv(ndev);
1980  	unregister_netdev(ndev);
1981  	ag71xx_mdio_remove(ag);
1982  	clk_disable_unprepare(ag->clk_eth);
1983  	platform_set_drvdata(pdev, NULL);
1984  
1985  	return 0;
1986  }
1987  
1988  static const u32 ar71xx_fifo_ar7100[] = {
1989  	0x0fff0000, 0x00001fff, 0x00780fff,
1990  };
1991  
1992  static const u32 ar71xx_fifo_ar9130[] = {
1993  	0x0fff0000, 0x00001fff, 0x008001ff,
1994  };
1995  
1996  static const u32 ar71xx_fifo_ar9330[] = {
1997  	0x0010ffff, 0x015500aa, 0x01f00140,
1998  };
1999  
2000  static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
2001  	.type = AR7100,
2002  	.fifodata = ar71xx_fifo_ar7100,
2003  	.max_frame_len = 1540,
2004  	.desc_pktlen_mask = SZ_4K - 1,
2005  	.tx_hang_workaround = false,
2006  };
2007  
2008  static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
2009  	.type = AR7240,
2010  	.fifodata = ar71xx_fifo_ar7100,
2011  	.max_frame_len = 1540,
2012  	.desc_pktlen_mask = SZ_4K - 1,
2013  	.tx_hang_workaround = true,
2014  };
2015  
2016  static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
2017  	.type = AR9130,
2018  	.fifodata = ar71xx_fifo_ar9130,
2019  	.max_frame_len = 1540,
2020  	.desc_pktlen_mask = SZ_4K - 1,
2021  	.tx_hang_workaround = false,
2022  };
2023  
2024  static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
2025  	.type = AR9330,
2026  	.fifodata = ar71xx_fifo_ar9330,
2027  	.max_frame_len = 1540,
2028  	.desc_pktlen_mask = SZ_4K - 1,
2029  	.tx_hang_workaround = true,
2030  };
2031  
2032  static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
2033  	.type = AR9340,
2034  	.fifodata = ar71xx_fifo_ar9330,
2035  	.max_frame_len = SZ_16K - 1,
2036  	.desc_pktlen_mask = SZ_16K - 1,
2037  	.tx_hang_workaround = true,
2038  };
2039  
2040  static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
2041  	.type = QCA9530,
2042  	.fifodata = ar71xx_fifo_ar9330,
2043  	.max_frame_len = SZ_16K - 1,
2044  	.desc_pktlen_mask = SZ_16K - 1,
2045  	.tx_hang_workaround = true,
2046  };
2047  
2048  static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
2049  	.type = QCA9550,
2050  	.fifodata = ar71xx_fifo_ar9330,
2051  	.max_frame_len = 1540,
2052  	.desc_pktlen_mask = SZ_16K - 1,
2053  	.tx_hang_workaround = true,
2054  };
2055  
2056  static const struct of_device_id ag71xx_match[] = {
2057  	{ .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2058  	{ .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2059  	{ .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2060  	{ .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2061  	{ .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2062  	{ .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2063  	{ .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2064  	{ .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2065  	{ .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2066  	{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2067  	{}
2068  };
2069  
2070  static struct platform_driver ag71xx_driver = {
2071  	.probe		= ag71xx_probe,
2072  	.remove		= ag71xx_remove,
2073  	.driver = {
2074  		.name	= "ag71xx",
2075  		.of_match_table = ag71xx_match,
2076  	}
2077  };
2078  
2079  module_platform_driver(ag71xx_driver);
2080  MODULE_LICENSE("GPL v2");
2081