1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 MediaTek Corporation
4  * Copyright (c) 2020 BayLibre SAS
5  *
6  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/clk.h>
11 #include <linux/compiler.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/mii.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm.h>
24 #include <linux/regmap.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 
28 #define MTK_STAR_DRVNAME			"mtk_star_emac"
29 
30 #define MTK_STAR_WAIT_TIMEOUT			300
31 #define MTK_STAR_MAX_FRAME_SIZE			1514
32 #define MTK_STAR_SKB_ALIGNMENT			16
33 #define MTK_STAR_NAPI_WEIGHT			64
34 #define MTK_STAR_HASHTABLE_MC_LIMIT		256
35 #define MTK_STAR_HASHTABLE_SIZE_MAX		512
36 
37 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38  * work for this controller.
39  */
40 #define MTK_STAR_IP_ALIGN			2
41 
42 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44 
45 /* PHY Control Register 0 */
46 #define MTK_STAR_REG_PHY_CTRL0			0x0000
47 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
48 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
49 #define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
50 #define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
51 #define MTK_STAR_OFF_PHY_CTRL0_PREG		8
52 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
53 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
54 
55 /* PHY Control Register 1 */
56 #define MTK_STAR_REG_PHY_CTRL1			0x0004
57 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
58 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
59 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
60 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
62 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
63 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
65 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
66 
67 /* MAC Configuration Register */
68 #define MTK_STAR_REG_MAC_CFG			0x0008
69 #define MTK_STAR_OFF_MAC_CFG_IPG		10
70 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
71 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
72 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
73 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
74 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
75 #define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
76 
77 /* Flow-Control Configuration Register */
78 #define MTK_STAR_REG_FC_CFG			0x000c
79 #define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
80 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
81 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
82 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
83 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
84 
85 /* ARL Configuration Register */
86 #define MTK_STAR_REG_ARL_CFG			0x0010
87 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
88 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
89 
90 /* MAC High and Low Bytes Registers */
91 #define MTK_STAR_REG_MY_MAC_H			0x0014
92 #define MTK_STAR_REG_MY_MAC_L			0x0018
93 
94 /* Hash Table Control Register */
95 #define MTK_STAR_REG_HASH_CTRL			0x001c
96 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
97 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
98 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
99 #define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
100 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
101 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
102 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
103 
104 /* TX DMA Control Register */
105 #define MTK_STAR_REG_TX_DMA_CTRL		0x0034
106 #define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
107 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
108 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
109 
110 /* RX DMA Control Register */
111 #define MTK_STAR_REG_RX_DMA_CTRL		0x0038
112 #define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
113 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
114 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
115 
116 /* DMA Address Registers */
117 #define MTK_STAR_REG_TX_DPTR			0x003c
118 #define MTK_STAR_REG_RX_DPTR			0x0040
119 #define MTK_STAR_REG_TX_BASE_ADDR		0x0044
120 #define MTK_STAR_REG_RX_BASE_ADDR		0x0048
121 
122 /* Interrupt Status Register */
123 #define MTK_STAR_REG_INT_STS			0x0050
124 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
125 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
126 #define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
127 #define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
128 
129 /* Interrupt Mask Register */
130 #define MTK_STAR_REG_INT_MASK			0x0054
131 #define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
132 
133 /* Misc. Config Register */
134 #define MTK_STAR_REG_TEST1			0x005c
135 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
136 
137 /* Extended Configuration Register */
138 #define MTK_STAR_REG_EXT_CFG			0x0060
139 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
140 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
141 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
142 
143 /* EthSys Configuration Register */
144 #define MTK_STAR_REG_SYS_CONF			0x0094
145 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
146 #define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
147 #define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
148 
149 /* MAC Clock Configuration Register */
150 #define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
151 #define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
152 #define MTK_STAR_BIT_CLK_DIV_10			0x0a
153 
154 /* Counter registers. */
155 #define MTK_STAR_REG_C_RXOKPKT			0x0100
156 #define MTK_STAR_REG_C_RXOKBYTE			0x0104
157 #define MTK_STAR_REG_C_RXRUNT			0x0108
158 #define MTK_STAR_REG_C_RXLONG			0x010c
159 #define MTK_STAR_REG_C_RXDROP			0x0110
160 #define MTK_STAR_REG_C_RXCRC			0x0114
161 #define MTK_STAR_REG_C_RXARLDROP		0x0118
162 #define MTK_STAR_REG_C_RXVLANDROP		0x011c
163 #define MTK_STAR_REG_C_RXCSERR			0x0120
164 #define MTK_STAR_REG_C_RXPAUSE			0x0124
165 #define MTK_STAR_REG_C_TXOKPKT			0x0128
166 #define MTK_STAR_REG_C_TXOKBYTE			0x012c
167 #define MTK_STAR_REG_C_TXPAUSECOL		0x0130
168 #define MTK_STAR_REG_C_TXRTY			0x0134
169 #define MTK_STAR_REG_C_TXSKIP			0x0138
170 #define MTK_STAR_REG_C_TX_ARP			0x013c
171 #define MTK_STAR_REG_C_RX_RERR			0x01d8
172 #define MTK_STAR_REG_C_RX_UNI			0x01dc
173 #define MTK_STAR_REG_C_RX_MULTI			0x01e0
174 #define MTK_STAR_REG_C_RX_BROAD			0x01e4
175 #define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
176 #define MTK_STAR_REG_C_TX_UNI			0x01ec
177 #define MTK_STAR_REG_C_TX_MULTI			0x01f0
178 #define MTK_STAR_REG_C_TX_BROAD			0x01f4
179 #define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
180 #define MTK_STAR_REG_C_TX_LATECOL		0x01fc
181 #define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
182 #define MTK_STAR_REG_C_RX_TWIST			0x0218
183 
184 /* Ethernet CFG Control */
185 #define MTK_PERICFG_REG_NIC_CFG_CON		0x03c4
186 #define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII	GENMASK(3, 0)
187 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	BIT(0)
188 
189 /* Represents the actual structure of descriptors used by the MAC. We can
190  * reuse the same structure for both TX and RX - the layout is the same, only
191  * the flags differ slightly.
192  */
193 struct mtk_star_ring_desc {
194 	/* Contains both the status flags as well as packet length. */
195 	u32 status;
196 	u32 data_ptr;
197 	u32 vtag;
198 	u32 reserved;
199 };
200 
201 #define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
202 #define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
203 #define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
204 #define MTK_STAR_DESC_BIT_INT			BIT(27)
205 #define MTK_STAR_DESC_BIT_LS			BIT(28)
206 #define MTK_STAR_DESC_BIT_FS			BIT(29)
207 #define MTK_STAR_DESC_BIT_EOR			BIT(30)
208 #define MTK_STAR_DESC_BIT_COWN			BIT(31)
209 
210 /* Helper structure for storing data read from/written to descriptors in order
211  * to limit reads from/writes to DMA memory.
212  */
213 struct mtk_star_ring_desc_data {
214 	unsigned int len;
215 	unsigned int flags;
216 	dma_addr_t dma_addr;
217 	struct sk_buff *skb;
218 };
219 
220 #define MTK_STAR_RING_NUM_DESCS			128
221 #define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
222 #define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
223 #define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
224 #define MTK_STAR_DMA_SIZE \
225 		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
226 
227 struct mtk_star_ring {
228 	struct mtk_star_ring_desc *descs;
229 	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
230 	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
231 	unsigned int head;
232 	unsigned int tail;
233 };
234 
235 struct mtk_star_priv {
236 	struct net_device *ndev;
237 
238 	struct regmap *regs;
239 	struct regmap *pericfg;
240 
241 	struct clk_bulk_data clks[MTK_STAR_NCLKS];
242 
243 	void *ring_base;
244 	struct mtk_star_ring_desc *descs_base;
245 	dma_addr_t dma_addr;
246 	struct mtk_star_ring tx_ring;
247 	struct mtk_star_ring rx_ring;
248 
249 	struct mii_bus *mii;
250 	struct napi_struct napi;
251 
252 	struct device_node *phy_node;
253 	phy_interface_t phy_intf;
254 	struct phy_device *phydev;
255 	unsigned int link;
256 	int speed;
257 	int duplex;
258 	int pause;
259 
260 	/* Protects against concurrent descriptor access. */
261 	spinlock_t lock;
262 
263 	struct rtnl_link_stats64 stats;
264 };
265 
266 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
267 {
268 	return priv->ndev->dev.parent;
269 }
270 
271 static const struct regmap_config mtk_star_regmap_config = {
272 	.reg_bits		= 32,
273 	.val_bits		= 32,
274 	.reg_stride		= 4,
275 	.disable_locking	= true,
276 };
277 
278 static void mtk_star_ring_init(struct mtk_star_ring *ring,
279 			       struct mtk_star_ring_desc *descs)
280 {
281 	memset(ring, 0, sizeof(*ring));
282 	ring->descs = descs;
283 	ring->head = 0;
284 	ring->tail = 0;
285 }
286 
287 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
288 				  struct mtk_star_ring_desc_data *desc_data)
289 {
290 	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
291 	unsigned int status;
292 
293 	status = READ_ONCE(desc->status);
294 	dma_rmb(); /* Make sure we read the status bits before checking it. */
295 
296 	if (!(status & MTK_STAR_DESC_BIT_COWN))
297 		return -1;
298 
299 	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
300 	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
301 	desc_data->dma_addr = ring->dma_addrs[ring->tail];
302 	desc_data->skb = ring->skbs[ring->tail];
303 
304 	ring->dma_addrs[ring->tail] = 0;
305 	ring->skbs[ring->tail] = NULL;
306 
307 	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
308 
309 	WRITE_ONCE(desc->data_ptr, 0);
310 	WRITE_ONCE(desc->status, status);
311 
312 	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
313 
314 	return 0;
315 }
316 
317 static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
318 				    struct mtk_star_ring_desc_data *desc_data,
319 				    unsigned int flags)
320 {
321 	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
322 	unsigned int status;
323 
324 	status = READ_ONCE(desc->status);
325 
326 	ring->skbs[ring->head] = desc_data->skb;
327 	ring->dma_addrs[ring->head] = desc_data->dma_addr;
328 
329 	status |= desc_data->len;
330 	if (flags)
331 		status |= flags;
332 
333 	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
334 	WRITE_ONCE(desc->status, status);
335 	status &= ~MTK_STAR_DESC_BIT_COWN;
336 	/* Flush previous modifications before ownership change. */
337 	dma_wmb();
338 	WRITE_ONCE(desc->status, status);
339 
340 	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
341 }
342 
343 static void
344 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
345 			   struct mtk_star_ring_desc_data *desc_data)
346 {
347 	mtk_star_ring_push_head(ring, desc_data, 0);
348 }
349 
350 static void
351 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
352 			   struct mtk_star_ring_desc_data *desc_data)
353 {
354 	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
355 					  MTK_STAR_DESC_BIT_LS |
356 					  MTK_STAR_DESC_BIT_INT;
357 
358 	mtk_star_ring_push_head(ring, desc_data, flags);
359 }
360 
361 static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
362 {
363 	return abs(ring->head - ring->tail);
364 }
365 
366 static bool mtk_star_ring_full(struct mtk_star_ring *ring)
367 {
368 	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
369 }
370 
371 static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
372 {
373 	return mtk_star_ring_num_used_descs(ring) > 0;
374 }
375 
376 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
377 				      struct sk_buff *skb)
378 {
379 	struct device *dev = mtk_star_get_dev(priv);
380 
381 	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
382 	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
383 			      skb_tailroom(skb), DMA_FROM_DEVICE);
384 }
385 
386 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
387 				  struct mtk_star_ring_desc_data *desc_data)
388 {
389 	struct device *dev = mtk_star_get_dev(priv);
390 
391 	dma_unmap_single(dev, desc_data->dma_addr,
392 			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
393 }
394 
395 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
396 				      struct sk_buff *skb)
397 {
398 	struct device *dev = mtk_star_get_dev(priv);
399 
400 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
401 }
402 
403 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
404 				  struct mtk_star_ring_desc_data *desc_data)
405 {
406 	struct device *dev = mtk_star_get_dev(priv);
407 
408 	return dma_unmap_single(dev, desc_data->dma_addr,
409 				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
410 }
411 
412 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
413 {
414 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
415 			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
416 }
417 
418 /* Unmask the three interrupts we care about, mask all others. */
419 static void mtk_star_intr_enable(struct mtk_star_priv *priv)
420 {
421 	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
422 			   MTK_STAR_BIT_INT_STS_FNRC |
423 			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
424 
425 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
426 }
427 
428 static void mtk_star_intr_disable(struct mtk_star_priv *priv)
429 {
430 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
431 }
432 
433 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
434 {
435 	unsigned int val;
436 
437 	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
438 
439 	return val;
440 }
441 
442 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
443 {
444 	unsigned int val;
445 
446 	val = mtk_star_intr_read(priv);
447 	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
448 
449 	return val;
450 }
451 
452 static void mtk_star_dma_init(struct mtk_star_priv *priv)
453 {
454 	struct mtk_star_ring_desc *desc;
455 	unsigned int val;
456 	int i;
457 
458 	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
459 
460 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
461 		desc = &priv->descs_base[i];
462 
463 		memset(desc, 0, sizeof(*desc));
464 		desc->status = MTK_STAR_DESC_BIT_COWN;
465 		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
466 		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
467 			desc->status |= MTK_STAR_DESC_BIT_EOR;
468 	}
469 
470 	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
471 	mtk_star_ring_init(&priv->rx_ring,
472 			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
473 
474 	/* Set DMA pointers. */
475 	val = (unsigned int)priv->dma_addr;
476 	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
477 	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
478 
479 	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
480 	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
481 	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
482 }
483 
484 static void mtk_star_dma_start(struct mtk_star_priv *priv)
485 {
486 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
487 			MTK_STAR_BIT_TX_DMA_CTRL_START);
488 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
489 			MTK_STAR_BIT_RX_DMA_CTRL_START);
490 }
491 
492 static void mtk_star_dma_stop(struct mtk_star_priv *priv)
493 {
494 	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
495 		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
496 	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
497 		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
498 }
499 
500 static void mtk_star_dma_disable(struct mtk_star_priv *priv)
501 {
502 	int i;
503 
504 	mtk_star_dma_stop(priv);
505 
506 	/* Take back all descriptors. */
507 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
508 		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
509 }
510 
511 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
512 {
513 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
514 			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
515 }
516 
517 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
518 {
519 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
520 			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
521 }
522 
523 static void mtk_star_set_mac_addr(struct net_device *ndev)
524 {
525 	struct mtk_star_priv *priv = netdev_priv(ndev);
526 	u8 *mac_addr = ndev->dev_addr;
527 	unsigned int high, low;
528 
529 	high = mac_addr[0] << 8 | mac_addr[1] << 0;
530 	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
531 	      mac_addr[4] << 8 | mac_addr[5];
532 
533 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
534 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
535 }
536 
537 static void mtk_star_reset_counters(struct mtk_star_priv *priv)
538 {
539 	static const unsigned int counter_regs[] = {
540 		MTK_STAR_REG_C_RXOKPKT,
541 		MTK_STAR_REG_C_RXOKBYTE,
542 		MTK_STAR_REG_C_RXRUNT,
543 		MTK_STAR_REG_C_RXLONG,
544 		MTK_STAR_REG_C_RXDROP,
545 		MTK_STAR_REG_C_RXCRC,
546 		MTK_STAR_REG_C_RXARLDROP,
547 		MTK_STAR_REG_C_RXVLANDROP,
548 		MTK_STAR_REG_C_RXCSERR,
549 		MTK_STAR_REG_C_RXPAUSE,
550 		MTK_STAR_REG_C_TXOKPKT,
551 		MTK_STAR_REG_C_TXOKBYTE,
552 		MTK_STAR_REG_C_TXPAUSECOL,
553 		MTK_STAR_REG_C_TXRTY,
554 		MTK_STAR_REG_C_TXSKIP,
555 		MTK_STAR_REG_C_TX_ARP,
556 		MTK_STAR_REG_C_RX_RERR,
557 		MTK_STAR_REG_C_RX_UNI,
558 		MTK_STAR_REG_C_RX_MULTI,
559 		MTK_STAR_REG_C_RX_BROAD,
560 		MTK_STAR_REG_C_RX_ALIGNERR,
561 		MTK_STAR_REG_C_TX_UNI,
562 		MTK_STAR_REG_C_TX_MULTI,
563 		MTK_STAR_REG_C_TX_BROAD,
564 		MTK_STAR_REG_C_TX_TIMEOUT,
565 		MTK_STAR_REG_C_TX_LATECOL,
566 		MTK_STAR_REG_C_RX_LENGTHERR,
567 		MTK_STAR_REG_C_RX_TWIST,
568 	};
569 
570 	unsigned int i, val;
571 
572 	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
573 		regmap_read(priv->regs, counter_regs[i], &val);
574 }
575 
576 static void mtk_star_update_stat(struct mtk_star_priv *priv,
577 				 unsigned int reg, u64 *stat)
578 {
579 	unsigned int val;
580 
581 	regmap_read(priv->regs, reg, &val);
582 	*stat += val;
583 }
584 
585 /* Try to get as many stats as possible from the internal registers instead
586  * of tracking them ourselves.
587  */
588 static void mtk_star_update_stats(struct mtk_star_priv *priv)
589 {
590 	struct rtnl_link_stats64 *stats = &priv->stats;
591 
592 	/* OK packets and bytes. */
593 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
594 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
595 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
596 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
597 
598 	/* RX & TX multicast. */
599 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
600 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
601 
602 	/* Collisions. */
603 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
604 			     &stats->collisions);
605 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
606 			     &stats->collisions);
607 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
608 
609 	/* RX Errors. */
610 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
611 			     &stats->rx_length_errors);
612 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
613 			     &stats->rx_over_errors);
614 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
615 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
616 			     &stats->rx_frame_errors);
617 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
618 			     &stats->rx_fifo_errors);
619 	/* Sum of the general RX error counter + all of the above. */
620 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
621 	stats->rx_errors += stats->rx_length_errors;
622 	stats->rx_errors += stats->rx_over_errors;
623 	stats->rx_errors += stats->rx_crc_errors;
624 	stats->rx_errors += stats->rx_frame_errors;
625 	stats->rx_errors += stats->rx_fifo_errors;
626 }
627 
628 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
629 {
630 	uintptr_t tail, offset;
631 	struct sk_buff *skb;
632 
633 	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
634 	if (!skb)
635 		return NULL;
636 
637 	/* Align to 16 bytes. */
638 	tail = (uintptr_t)skb_tail_pointer(skb);
639 	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
640 		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
641 		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
642 	}
643 
644 	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
645 	 * extract the Ethernet header (14 bytes) so we need two more bytes.
646 	 */
647 	skb_reserve(skb, MTK_STAR_IP_ALIGN);
648 
649 	return skb;
650 }
651 
652 static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
653 {
654 	struct mtk_star_priv *priv = netdev_priv(ndev);
655 	struct mtk_star_ring *ring = &priv->rx_ring;
656 	struct device *dev = mtk_star_get_dev(priv);
657 	struct mtk_star_ring_desc *desc;
658 	struct sk_buff *skb;
659 	dma_addr_t dma_addr;
660 	int i;
661 
662 	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
663 		skb = mtk_star_alloc_skb(ndev);
664 		if (!skb)
665 			return -ENOMEM;
666 
667 		dma_addr = mtk_star_dma_map_rx(priv, skb);
668 		if (dma_mapping_error(dev, dma_addr)) {
669 			dev_kfree_skb(skb);
670 			return -ENOMEM;
671 		}
672 
673 		desc = &ring->descs[i];
674 		desc->data_ptr = dma_addr;
675 		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
676 		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
677 		ring->skbs[i] = skb;
678 		ring->dma_addrs[i] = dma_addr;
679 	}
680 
681 	return 0;
682 }
683 
684 static void
685 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
686 			void (*unmap_func)(struct mtk_star_priv *,
687 					   struct mtk_star_ring_desc_data *))
688 {
689 	struct mtk_star_ring_desc_data desc_data;
690 	int i;
691 
692 	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
693 		if (!ring->dma_addrs[i])
694 			continue;
695 
696 		desc_data.dma_addr = ring->dma_addrs[i];
697 		desc_data.skb = ring->skbs[i];
698 
699 		unmap_func(priv, &desc_data);
700 		dev_kfree_skb(desc_data.skb);
701 	}
702 }
703 
704 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
705 {
706 	struct mtk_star_ring *ring = &priv->rx_ring;
707 
708 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
709 }
710 
711 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
712 {
713 	struct mtk_star_ring *ring = &priv->tx_ring;
714 
715 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
716 }
717 
718 /* All processing for TX and RX happens in the napi poll callback.
719  *
720  * FIXME: The interrupt handling should be more fine-grained with each
721  * interrupt enabled/disabled independently when needed. Unfortunatly this
722  * turned out to impact the driver's stability and until we have something
723  * working properly, we're disabling all interrupts during TX & RX processing
724  * or when resetting the counter registers.
725  */
726 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
727 {
728 	struct mtk_star_priv *priv;
729 	struct net_device *ndev;
730 
731 	ndev = data;
732 	priv = netdev_priv(ndev);
733 
734 	if (netif_running(ndev)) {
735 		mtk_star_intr_disable(priv);
736 		napi_schedule(&priv->napi);
737 	}
738 
739 	return IRQ_HANDLED;
740 }
741 
742 /* Wait for the completion of any previous command - CMD_START bit must be
743  * cleared by hardware.
744  */
745 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
746 {
747 	unsigned int val;
748 
749 	return regmap_read_poll_timeout_atomic(priv->regs,
750 				MTK_STAR_REG_HASH_CTRL, val,
751 				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
752 				10, MTK_STAR_WAIT_TIMEOUT);
753 }
754 
755 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
756 {
757 	unsigned int val;
758 	int ret;
759 
760 	/* Wait for BIST_DONE bit. */
761 	ret = regmap_read_poll_timeout_atomic(priv->regs,
762 					MTK_STAR_REG_HASH_CTRL, val,
763 					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
764 					10, MTK_STAR_WAIT_TIMEOUT);
765 	if (ret)
766 		return ret;
767 
768 	/* Check the BIST_OK bit. */
769 	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
770 			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
771 		return -EIO;
772 
773 	return 0;
774 }
775 
776 static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
777 				unsigned int hash_addr)
778 {
779 	unsigned int val;
780 	int ret;
781 
782 	ret = mtk_star_hash_wait_cmd_start(priv);
783 	if (ret)
784 		return ret;
785 
786 	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
787 	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
788 	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
789 	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
790 	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
791 	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
792 
793 	return mtk_star_hash_wait_ok(priv);
794 }
795 
796 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
797 {
798 	int ret;
799 
800 	ret = mtk_star_hash_wait_cmd_start(priv);
801 	if (ret)
802 		return ret;
803 
804 	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
805 			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
806 	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
807 			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
808 
809 	return mtk_star_hash_wait_ok(priv);
810 }
811 
812 static void mtk_star_phy_config(struct mtk_star_priv *priv)
813 {
814 	unsigned int val;
815 
816 	if (priv->speed == SPEED_1000)
817 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
818 	else if (priv->speed == SPEED_100)
819 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
820 	else
821 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
822 	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
823 
824 	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
825 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
826 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
827 	/* Only full-duplex supported for now. */
828 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
829 
830 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
831 
832 	if (priv->pause) {
833 		val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
834 		val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
835 		val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
836 	} else {
837 		val = 0;
838 	}
839 
840 	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
841 			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
842 			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
843 
844 	if (priv->pause) {
845 		val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
846 		val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
847 	} else {
848 		val = 0;
849 	}
850 
851 	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
852 			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
853 }
854 
855 static void mtk_star_adjust_link(struct net_device *ndev)
856 {
857 	struct mtk_star_priv *priv = netdev_priv(ndev);
858 	struct phy_device *phydev = priv->phydev;
859 	bool new_state = false;
860 
861 	if (phydev->link) {
862 		if (!priv->link) {
863 			priv->link = phydev->link;
864 			new_state = true;
865 		}
866 
867 		if (priv->speed != phydev->speed) {
868 			priv->speed = phydev->speed;
869 			new_state = true;
870 		}
871 
872 		if (priv->pause != phydev->pause) {
873 			priv->pause = phydev->pause;
874 			new_state = true;
875 		}
876 	} else {
877 		if (priv->link) {
878 			priv->link = phydev->link;
879 			new_state = true;
880 		}
881 	}
882 
883 	if (new_state) {
884 		if (phydev->link)
885 			mtk_star_phy_config(priv);
886 
887 		phy_print_status(ndev->phydev);
888 	}
889 }
890 
891 static void mtk_star_init_config(struct mtk_star_priv *priv)
892 {
893 	unsigned int val;
894 
895 	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
896 	       MTK_STAR_BIT_EXT_MDC_MODE |
897 	       MTK_STAR_BIT_SWC_MII_MODE);
898 
899 	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
900 	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
901 			   MTK_STAR_MSK_MAC_CLK_CONF,
902 			   MTK_STAR_BIT_CLK_DIV_10);
903 }
904 
905 static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
906 {
907 	regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
908 			   MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
909 			   MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
910 }
911 
912 static int mtk_star_enable(struct net_device *ndev)
913 {
914 	struct mtk_star_priv *priv = netdev_priv(ndev);
915 	unsigned int val;
916 	int ret;
917 
918 	mtk_star_nic_disable_pd(priv);
919 	mtk_star_intr_disable(priv);
920 	mtk_star_dma_stop(priv);
921 
922 	mtk_star_set_mac_addr(ndev);
923 
924 	/* Configure the MAC */
925 	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
926 	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
927 	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
928 	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
929 	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
930 	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
931 
932 	/* Enable Hash Table BIST and reset it */
933 	ret = mtk_star_reset_hash_table(priv);
934 	if (ret)
935 		return ret;
936 
937 	/* Setup the hashing algorithm */
938 	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
939 			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
940 			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
941 
942 	/* Don't strip VLAN tags */
943 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
944 			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
945 
946 	/* Setup DMA */
947 	mtk_star_dma_init(priv);
948 
949 	ret = mtk_star_prepare_rx_skbs(ndev);
950 	if (ret)
951 		goto err_out;
952 
953 	/* Request the interrupt */
954 	ret = request_irq(ndev->irq, mtk_star_handle_irq,
955 			  IRQF_TRIGGER_FALLING, ndev->name, ndev);
956 	if (ret)
957 		goto err_free_skbs;
958 
959 	napi_enable(&priv->napi);
960 
961 	mtk_star_intr_ack_all(priv);
962 	mtk_star_intr_enable(priv);
963 
964 	/* Connect to and start PHY */
965 	priv->phydev = of_phy_connect(ndev, priv->phy_node,
966 				      mtk_star_adjust_link, 0, priv->phy_intf);
967 	if (!priv->phydev) {
968 		netdev_err(ndev, "failed to connect to PHY\n");
969 		ret = -ENODEV;
970 		goto err_free_irq;
971 	}
972 
973 	mtk_star_dma_start(priv);
974 	phy_start(priv->phydev);
975 	netif_start_queue(ndev);
976 
977 	return 0;
978 
979 err_free_irq:
980 	free_irq(ndev->irq, ndev);
981 err_free_skbs:
982 	mtk_star_free_rx_skbs(priv);
983 err_out:
984 	return ret;
985 }
986 
987 static void mtk_star_disable(struct net_device *ndev)
988 {
989 	struct mtk_star_priv *priv = netdev_priv(ndev);
990 
991 	netif_stop_queue(ndev);
992 	napi_disable(&priv->napi);
993 	mtk_star_intr_disable(priv);
994 	mtk_star_dma_disable(priv);
995 	mtk_star_intr_ack_all(priv);
996 	phy_stop(priv->phydev);
997 	phy_disconnect(priv->phydev);
998 	free_irq(ndev->irq, ndev);
999 	mtk_star_free_rx_skbs(priv);
1000 	mtk_star_free_tx_skbs(priv);
1001 }
1002 
1003 static int mtk_star_netdev_open(struct net_device *ndev)
1004 {
1005 	return mtk_star_enable(ndev);
1006 }
1007 
1008 static int mtk_star_netdev_stop(struct net_device *ndev)
1009 {
1010 	mtk_star_disable(ndev);
1011 
1012 	return 0;
1013 }
1014 
1015 static int mtk_star_netdev_ioctl(struct net_device *ndev,
1016 				 struct ifreq *req, int cmd)
1017 {
1018 	if (!netif_running(ndev))
1019 		return -EINVAL;
1020 
1021 	return phy_mii_ioctl(ndev->phydev, req, cmd);
1022 }
1023 
1024 static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1025 				      struct net_device *ndev)
1026 {
1027 	struct mtk_star_priv *priv = netdev_priv(ndev);
1028 	struct mtk_star_ring *ring = &priv->tx_ring;
1029 	struct device *dev = mtk_star_get_dev(priv);
1030 	struct mtk_star_ring_desc_data desc_data;
1031 
1032 	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1033 	if (dma_mapping_error(dev, desc_data.dma_addr))
1034 		goto err_drop_packet;
1035 
1036 	desc_data.skb = skb;
1037 	desc_data.len = skb->len;
1038 
1039 	spin_lock_bh(&priv->lock);
1040 
1041 	mtk_star_ring_push_head_tx(ring, &desc_data);
1042 
1043 	netdev_sent_queue(ndev, skb->len);
1044 
1045 	if (mtk_star_ring_full(ring))
1046 		netif_stop_queue(ndev);
1047 
1048 	spin_unlock_bh(&priv->lock);
1049 
1050 	mtk_star_dma_resume_tx(priv);
1051 
1052 	return NETDEV_TX_OK;
1053 
1054 err_drop_packet:
1055 	dev_kfree_skb(skb);
1056 	ndev->stats.tx_dropped++;
1057 	return NETDEV_TX_OK;
1058 }
1059 
1060 /* Returns the number of bytes sent or a negative number on the first
1061  * descriptor owned by DMA.
1062  */
1063 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1064 {
1065 	struct mtk_star_ring *ring = &priv->tx_ring;
1066 	struct mtk_star_ring_desc_data desc_data;
1067 	int ret;
1068 
1069 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1070 	if (ret)
1071 		return ret;
1072 
1073 	mtk_star_dma_unmap_tx(priv, &desc_data);
1074 	ret = desc_data.skb->len;
1075 	dev_kfree_skb_irq(desc_data.skb);
1076 
1077 	return ret;
1078 }
1079 
1080 static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1081 {
1082 	struct mtk_star_ring *ring = &priv->tx_ring;
1083 	struct net_device *ndev = priv->ndev;
1084 	int ret, pkts_compl, bytes_compl;
1085 	bool wake = false;
1086 
1087 	spin_lock(&priv->lock);
1088 
1089 	for (pkts_compl = 0, bytes_compl = 0;;
1090 	     pkts_compl++, bytes_compl += ret, wake = true) {
1091 		if (!mtk_star_ring_descs_available(ring))
1092 			break;
1093 
1094 		ret = mtk_star_tx_complete_one(priv);
1095 		if (ret < 0)
1096 			break;
1097 	}
1098 
1099 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1100 
1101 	if (wake && netif_queue_stopped(ndev))
1102 		netif_wake_queue(ndev);
1103 
1104 	spin_unlock(&priv->lock);
1105 }
1106 
1107 static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1108 					struct rtnl_link_stats64 *stats)
1109 {
1110 	struct mtk_star_priv *priv = netdev_priv(ndev);
1111 
1112 	mtk_star_update_stats(priv);
1113 
1114 	memcpy(stats, &priv->stats, sizeof(*stats));
1115 }
1116 
1117 static void mtk_star_set_rx_mode(struct net_device *ndev)
1118 {
1119 	struct mtk_star_priv *priv = netdev_priv(ndev);
1120 	struct netdev_hw_addr *hw_addr;
1121 	unsigned int hash_addr, i;
1122 	int ret;
1123 
1124 	if (ndev->flags & IFF_PROMISC) {
1125 		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1126 				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1127 	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1128 		   ndev->flags & IFF_ALLMULTI) {
1129 		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1130 			ret = mtk_star_set_hashbit(priv, i);
1131 			if (ret)
1132 				goto hash_fail;
1133 		}
1134 	} else {
1135 		/* Clear previous settings. */
1136 		ret = mtk_star_reset_hash_table(priv);
1137 		if (ret)
1138 			goto hash_fail;
1139 
1140 		netdev_for_each_mc_addr(hw_addr, ndev) {
1141 			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1142 			hash_addr += hw_addr->addr[5];
1143 			ret = mtk_star_set_hashbit(priv, hash_addr);
1144 			if (ret)
1145 				goto hash_fail;
1146 		}
1147 	}
1148 
1149 	return;
1150 
1151 hash_fail:
1152 	if (ret == -ETIMEDOUT)
1153 		netdev_err(ndev, "setting hash bit timed out\n");
1154 	else
1155 		/* Should be -EIO */
1156 		netdev_err(ndev, "unable to set hash bit");
1157 }
1158 
1159 static const struct net_device_ops mtk_star_netdev_ops = {
1160 	.ndo_open		= mtk_star_netdev_open,
1161 	.ndo_stop		= mtk_star_netdev_stop,
1162 	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1163 	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1164 	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1165 	.ndo_do_ioctl		= mtk_star_netdev_ioctl,
1166 	.ndo_set_mac_address	= eth_mac_addr,
1167 	.ndo_validate_addr	= eth_validate_addr,
1168 };
1169 
1170 static void mtk_star_get_drvinfo(struct net_device *dev,
1171 				 struct ethtool_drvinfo *info)
1172 {
1173 	strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1174 }
1175 
1176 /* TODO Add ethtool stats. */
1177 static const struct ethtool_ops mtk_star_ethtool_ops = {
1178 	.get_drvinfo		= mtk_star_get_drvinfo,
1179 	.get_link		= ethtool_op_get_link,
1180 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1181 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1182 };
1183 
1184 static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1185 {
1186 	struct mtk_star_ring *ring = &priv->rx_ring;
1187 	struct device *dev = mtk_star_get_dev(priv);
1188 	struct mtk_star_ring_desc_data desc_data;
1189 	struct net_device *ndev = priv->ndev;
1190 	struct sk_buff *curr_skb, *new_skb;
1191 	dma_addr_t new_dma_addr;
1192 	int ret;
1193 
1194 	spin_lock(&priv->lock);
1195 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1196 	spin_unlock(&priv->lock);
1197 	if (ret)
1198 		return -1;
1199 
1200 	curr_skb = desc_data.skb;
1201 
1202 	if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1203 	    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1204 		/* Error packet -> drop and reuse skb. */
1205 		new_skb = curr_skb;
1206 		goto push_new_skb;
1207 	}
1208 
1209 	/* Prepare new skb before receiving the current one. Reuse the current
1210 	 * skb if we fail at any point.
1211 	 */
1212 	new_skb = mtk_star_alloc_skb(ndev);
1213 	if (!new_skb) {
1214 		ndev->stats.rx_dropped++;
1215 		new_skb = curr_skb;
1216 		goto push_new_skb;
1217 	}
1218 
1219 	new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1220 	if (dma_mapping_error(dev, new_dma_addr)) {
1221 		ndev->stats.rx_dropped++;
1222 		dev_kfree_skb(new_skb);
1223 		new_skb = curr_skb;
1224 		netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1225 		goto push_new_skb;
1226 	}
1227 
1228 	desc_data.dma_addr = new_dma_addr;
1229 
1230 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
1231 	mtk_star_dma_unmap_rx(priv, &desc_data);
1232 
1233 	skb_put(desc_data.skb, desc_data.len);
1234 	desc_data.skb->ip_summed = CHECKSUM_NONE;
1235 	desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1236 	desc_data.skb->dev = ndev;
1237 	netif_receive_skb(desc_data.skb);
1238 
1239 push_new_skb:
1240 	desc_data.len = skb_tailroom(new_skb);
1241 	desc_data.skb = new_skb;
1242 
1243 	spin_lock(&priv->lock);
1244 	mtk_star_ring_push_head_rx(ring, &desc_data);
1245 	spin_unlock(&priv->lock);
1246 
1247 	return 0;
1248 }
1249 
1250 static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1251 {
1252 	int received, ret;
1253 
1254 	for (received = 0, ret = 0; received < budget && ret == 0; received++)
1255 		ret = mtk_star_receive_packet(priv);
1256 
1257 	mtk_star_dma_resume_rx(priv);
1258 
1259 	return received;
1260 }
1261 
1262 static int mtk_star_poll(struct napi_struct *napi, int budget)
1263 {
1264 	struct mtk_star_priv *priv;
1265 	unsigned int status;
1266 	int received = 0;
1267 
1268 	priv = container_of(napi, struct mtk_star_priv, napi);
1269 
1270 	status = mtk_star_intr_read(priv);
1271 	mtk_star_intr_ack_all(priv);
1272 
1273 	if (status & MTK_STAR_BIT_INT_STS_TNTC)
1274 		/* Clean-up all TX descriptors. */
1275 		mtk_star_tx_complete_all(priv);
1276 
1277 	if (status & MTK_STAR_BIT_INT_STS_FNRC)
1278 		/* Receive up to $budget packets. */
1279 		received = mtk_star_process_rx(priv, budget);
1280 
1281 	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1282 		mtk_star_update_stats(priv);
1283 		mtk_star_reset_counters(priv);
1284 	}
1285 
1286 	if (received < budget)
1287 		napi_complete_done(napi, received);
1288 
1289 	mtk_star_intr_enable(priv);
1290 
1291 	return received;
1292 }
1293 
1294 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1295 {
1296 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1297 		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1298 }
1299 
1300 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1301 {
1302 	unsigned int val;
1303 
1304 	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1305 					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1306 					10, MTK_STAR_WAIT_TIMEOUT);
1307 }
1308 
1309 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1310 {
1311 	struct mtk_star_priv *priv = mii->priv;
1312 	unsigned int val, data;
1313 	int ret;
1314 
1315 	if (regnum & MII_ADDR_C45)
1316 		return -EOPNOTSUPP;
1317 
1318 	mtk_star_mdio_rwok_clear(priv);
1319 
1320 	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1321 	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1322 	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1323 
1324 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1325 
1326 	ret = mtk_star_mdio_rwok_wait(priv);
1327 	if (ret)
1328 		return ret;
1329 
1330 	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1331 
1332 	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1333 	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1334 
1335 	return data;
1336 }
1337 
1338 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1339 			       int regnum, u16 data)
1340 {
1341 	struct mtk_star_priv *priv = mii->priv;
1342 	unsigned int val;
1343 
1344 	if (regnum & MII_ADDR_C45)
1345 		return -EOPNOTSUPP;
1346 
1347 	mtk_star_mdio_rwok_clear(priv);
1348 
1349 	val = data;
1350 	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1351 	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1352 	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1353 	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1354 	val |= regnum;
1355 	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1356 
1357 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1358 
1359 	return mtk_star_mdio_rwok_wait(priv);
1360 }
1361 
1362 static int mtk_star_mdio_init(struct net_device *ndev)
1363 {
1364 	struct mtk_star_priv *priv = netdev_priv(ndev);
1365 	struct device *dev = mtk_star_get_dev(priv);
1366 	struct device_node *of_node, *mdio_node;
1367 	int ret;
1368 
1369 	of_node = dev->of_node;
1370 
1371 	mdio_node = of_get_child_by_name(of_node, "mdio");
1372 	if (!mdio_node)
1373 		return -ENODEV;
1374 
1375 	if (!of_device_is_available(mdio_node)) {
1376 		ret = -ENODEV;
1377 		goto out_put_node;
1378 	}
1379 
1380 	priv->mii = devm_mdiobus_alloc(dev);
1381 	if (!priv->mii) {
1382 		ret = -ENOMEM;
1383 		goto out_put_node;
1384 	}
1385 
1386 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1387 	priv->mii->name = "mtk-mac-mdio";
1388 	priv->mii->parent = dev;
1389 	priv->mii->read = mtk_star_mdio_read;
1390 	priv->mii->write = mtk_star_mdio_write;
1391 	priv->mii->priv = priv;
1392 
1393 	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1394 
1395 out_put_node:
1396 	of_node_put(mdio_node);
1397 	return ret;
1398 }
1399 
1400 static __maybe_unused int mtk_star_suspend(struct device *dev)
1401 {
1402 	struct mtk_star_priv *priv;
1403 	struct net_device *ndev;
1404 
1405 	ndev = dev_get_drvdata(dev);
1406 	priv = netdev_priv(ndev);
1407 
1408 	if (netif_running(ndev))
1409 		mtk_star_disable(ndev);
1410 
1411 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1412 
1413 	return 0;
1414 }
1415 
1416 static __maybe_unused int mtk_star_resume(struct device *dev)
1417 {
1418 	struct mtk_star_priv *priv;
1419 	struct net_device *ndev;
1420 	int ret;
1421 
1422 	ndev = dev_get_drvdata(dev);
1423 	priv = netdev_priv(ndev);
1424 
1425 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1426 	if (ret)
1427 		return ret;
1428 
1429 	if (netif_running(ndev)) {
1430 		ret = mtk_star_enable(ndev);
1431 		if (ret)
1432 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1433 	}
1434 
1435 	return ret;
1436 }
1437 
1438 static void mtk_star_clk_disable_unprepare(void *data)
1439 {
1440 	struct mtk_star_priv *priv = data;
1441 
1442 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1443 }
1444 
1445 static int mtk_star_probe(struct platform_device *pdev)
1446 {
1447 	struct device_node *of_node;
1448 	struct mtk_star_priv *priv;
1449 	struct net_device *ndev;
1450 	struct device *dev;
1451 	void __iomem *base;
1452 	int ret, i;
1453 
1454 	dev = &pdev->dev;
1455 	of_node = dev->of_node;
1456 
1457 	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1458 	if (!ndev)
1459 		return -ENOMEM;
1460 
1461 	priv = netdev_priv(ndev);
1462 	priv->ndev = ndev;
1463 	SET_NETDEV_DEV(ndev, dev);
1464 	platform_set_drvdata(pdev, ndev);
1465 
1466 	ndev->min_mtu = ETH_ZLEN;
1467 	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1468 
1469 	spin_lock_init(&priv->lock);
1470 
1471 	base = devm_platform_ioremap_resource(pdev, 0);
1472 	if (IS_ERR(base))
1473 		return PTR_ERR(base);
1474 
1475 	/* We won't be checking the return values of regmap read & write
1476 	 * functions. They can only fail for mmio if there's a clock attached
1477 	 * to regmap which is not the case here.
1478 	 */
1479 	priv->regs = devm_regmap_init_mmio(dev, base,
1480 					   &mtk_star_regmap_config);
1481 	if (IS_ERR(priv->regs))
1482 		return PTR_ERR(priv->regs);
1483 
1484 	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1485 							"mediatek,pericfg");
1486 	if (IS_ERR(priv->pericfg)) {
1487 		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1488 		return PTR_ERR(priv->pericfg);
1489 	}
1490 
1491 	ndev->irq = platform_get_irq(pdev, 0);
1492 	if (ndev->irq < 0)
1493 		return ndev->irq;
1494 
1495 	for (i = 0; i < MTK_STAR_NCLKS; i++)
1496 		priv->clks[i].id = mtk_star_clk_names[i];
1497 	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1498 	if (ret)
1499 		return ret;
1500 
1501 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1502 	if (ret)
1503 		return ret;
1504 
1505 	ret = devm_add_action_or_reset(dev,
1506 				       mtk_star_clk_disable_unprepare, priv);
1507 	if (ret)
1508 		return ret;
1509 
1510 	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1511 	if (ret) {
1512 		return ret;
1513 	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1514 		dev_err(dev, "unsupported phy mode: %s\n",
1515 			phy_modes(priv->phy_intf));
1516 		return -EINVAL;
1517 	}
1518 
1519 	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1520 	if (!priv->phy_node) {
1521 		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1522 		return -ENODEV;
1523 	}
1524 
1525 	mtk_star_set_mode_rmii(priv);
1526 
1527 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1528 	if (ret) {
1529 		dev_err(dev, "unsupported DMA mask\n");
1530 		return ret;
1531 	}
1532 
1533 	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1534 					      &priv->dma_addr,
1535 					      GFP_KERNEL | GFP_DMA);
1536 	if (!priv->ring_base)
1537 		return -ENOMEM;
1538 
1539 	mtk_star_nic_disable_pd(priv);
1540 	mtk_star_init_config(priv);
1541 
1542 	ret = mtk_star_mdio_init(ndev);
1543 	if (ret)
1544 		return ret;
1545 
1546 	ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
1547 	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1548 		eth_hw_addr_random(ndev);
1549 
1550 	ndev->netdev_ops = &mtk_star_netdev_ops;
1551 	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1552 
1553 	netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
1554 
1555 	return devm_register_netdev(dev, ndev);
1556 }
1557 
1558 static const struct of_device_id mtk_star_of_match[] = {
1559 	{ .compatible = "mediatek,mt8516-eth", },
1560 	{ .compatible = "mediatek,mt8518-eth", },
1561 	{ .compatible = "mediatek,mt8175-eth", },
1562 	{ }
1563 };
1564 MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1565 
1566 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1567 			 mtk_star_suspend, mtk_star_resume);
1568 
1569 static struct platform_driver mtk_star_driver = {
1570 	.driver = {
1571 		.name = MTK_STAR_DRVNAME,
1572 		.pm = &mtk_star_pm_ops,
1573 		.of_match_table = of_match_ptr(mtk_star_of_match),
1574 	},
1575 	.probe = mtk_star_probe,
1576 };
1577 module_platform_driver(mtk_star_driver);
1578 
1579 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1580 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1581 MODULE_LICENSE("GPL");
1582