1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 MediaTek Corporation
4  * Copyright (c) 2020 BayLibre SAS
5  *
6  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/clk.h>
11 #include <linux/compiler.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/mii.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm.h>
24 #include <linux/regmap.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 
28 #define MTK_STAR_DRVNAME			"mtk_star_emac"
29 
30 #define MTK_STAR_WAIT_TIMEOUT			300
31 #define MTK_STAR_MAX_FRAME_SIZE			1514
32 #define MTK_STAR_SKB_ALIGNMENT			16
33 #define MTK_STAR_NAPI_WEIGHT			64
34 #define MTK_STAR_HASHTABLE_MC_LIMIT		256
35 #define MTK_STAR_HASHTABLE_SIZE_MAX		512
36 
37 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38  * work for this controller.
39  */
40 #define MTK_STAR_IP_ALIGN			2
41 
42 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44 
45 /* PHY Control Register 0 */
46 #define MTK_STAR_REG_PHY_CTRL0			0x0000
47 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
48 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
49 #define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
50 #define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
51 #define MTK_STAR_OFF_PHY_CTRL0_PREG		8
52 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
53 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
54 
55 /* PHY Control Register 1 */
56 #define MTK_STAR_REG_PHY_CTRL1			0x0004
57 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
58 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
59 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
60 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
62 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
63 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
65 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
66 
67 /* MAC Configuration Register */
68 #define MTK_STAR_REG_MAC_CFG			0x0008
69 #define MTK_STAR_OFF_MAC_CFG_IPG		10
70 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
71 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
72 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
73 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
74 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
75 #define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
76 
77 /* Flow-Control Configuration Register */
78 #define MTK_STAR_REG_FC_CFG			0x000c
79 #define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
80 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
81 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
82 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
83 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
84 
85 /* ARL Configuration Register */
86 #define MTK_STAR_REG_ARL_CFG			0x0010
87 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
88 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
89 
90 /* MAC High and Low Bytes Registers */
91 #define MTK_STAR_REG_MY_MAC_H			0x0014
92 #define MTK_STAR_REG_MY_MAC_L			0x0018
93 
94 /* Hash Table Control Register */
95 #define MTK_STAR_REG_HASH_CTRL			0x001c
96 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
97 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
98 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
99 #define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
100 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
101 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
102 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
103 
104 /* TX DMA Control Register */
105 #define MTK_STAR_REG_TX_DMA_CTRL		0x0034
106 #define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
107 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
108 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
109 
110 /* RX DMA Control Register */
111 #define MTK_STAR_REG_RX_DMA_CTRL		0x0038
112 #define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
113 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
114 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
115 
116 /* DMA Address Registers */
117 #define MTK_STAR_REG_TX_DPTR			0x003c
118 #define MTK_STAR_REG_RX_DPTR			0x0040
119 #define MTK_STAR_REG_TX_BASE_ADDR		0x0044
120 #define MTK_STAR_REG_RX_BASE_ADDR		0x0048
121 
122 /* Interrupt Status Register */
123 #define MTK_STAR_REG_INT_STS			0x0050
124 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
125 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
126 #define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
127 #define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
128 
129 /* Interrupt Mask Register */
130 #define MTK_STAR_REG_INT_MASK			0x0054
131 #define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
132 
133 /* Misc. Config Register */
134 #define MTK_STAR_REG_TEST1			0x005c
135 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
136 
137 /* Extended Configuration Register */
138 #define MTK_STAR_REG_EXT_CFG			0x0060
139 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
140 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
141 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
142 
143 /* EthSys Configuration Register */
144 #define MTK_STAR_REG_SYS_CONF			0x0094
145 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
146 #define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
147 #define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
148 
149 /* MAC Clock Configuration Register */
150 #define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
151 #define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
152 #define MTK_STAR_BIT_CLK_DIV_10			0x0a
153 
154 /* Counter registers. */
155 #define MTK_STAR_REG_C_RXOKPKT			0x0100
156 #define MTK_STAR_REG_C_RXOKBYTE			0x0104
157 #define MTK_STAR_REG_C_RXRUNT			0x0108
158 #define MTK_STAR_REG_C_RXLONG			0x010c
159 #define MTK_STAR_REG_C_RXDROP			0x0110
160 #define MTK_STAR_REG_C_RXCRC			0x0114
161 #define MTK_STAR_REG_C_RXARLDROP		0x0118
162 #define MTK_STAR_REG_C_RXVLANDROP		0x011c
163 #define MTK_STAR_REG_C_RXCSERR			0x0120
164 #define MTK_STAR_REG_C_RXPAUSE			0x0124
165 #define MTK_STAR_REG_C_TXOKPKT			0x0128
166 #define MTK_STAR_REG_C_TXOKBYTE			0x012c
167 #define MTK_STAR_REG_C_TXPAUSECOL		0x0130
168 #define MTK_STAR_REG_C_TXRTY			0x0134
169 #define MTK_STAR_REG_C_TXSKIP			0x0138
170 #define MTK_STAR_REG_C_TX_ARP			0x013c
171 #define MTK_STAR_REG_C_RX_RERR			0x01d8
172 #define MTK_STAR_REG_C_RX_UNI			0x01dc
173 #define MTK_STAR_REG_C_RX_MULTI			0x01e0
174 #define MTK_STAR_REG_C_RX_BROAD			0x01e4
175 #define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
176 #define MTK_STAR_REG_C_TX_UNI			0x01ec
177 #define MTK_STAR_REG_C_TX_MULTI			0x01f0
178 #define MTK_STAR_REG_C_TX_BROAD			0x01f4
179 #define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
180 #define MTK_STAR_REG_C_TX_LATECOL		0x01fc
181 #define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
182 #define MTK_STAR_REG_C_RX_TWIST			0x0218
183 
184 /* Ethernet CFG Control */
185 #define MTK_PERICFG_REG_NIC_CFG_CON		0x03c4
186 #define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII	GENMASK(3, 0)
187 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	BIT(0)
188 
189 /* Represents the actual structure of descriptors used by the MAC. We can
190  * reuse the same structure for both TX and RX - the layout is the same, only
191  * the flags differ slightly.
192  */
193 struct mtk_star_ring_desc {
194 	/* Contains both the status flags as well as packet length. */
195 	u32 status;
196 	u32 data_ptr;
197 	u32 vtag;
198 	u32 reserved;
199 };
200 
201 #define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
202 #define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
203 #define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
204 #define MTK_STAR_DESC_BIT_INT			BIT(27)
205 #define MTK_STAR_DESC_BIT_LS			BIT(28)
206 #define MTK_STAR_DESC_BIT_FS			BIT(29)
207 #define MTK_STAR_DESC_BIT_EOR			BIT(30)
208 #define MTK_STAR_DESC_BIT_COWN			BIT(31)
209 
210 /* Helper structure for storing data read from/written to descriptors in order
211  * to limit reads from/writes to DMA memory.
212  */
213 struct mtk_star_ring_desc_data {
214 	unsigned int len;
215 	unsigned int flags;
216 	dma_addr_t dma_addr;
217 	struct sk_buff *skb;
218 };
219 
220 #define MTK_STAR_RING_NUM_DESCS			128
221 #define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
222 #define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
223 #define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
224 #define MTK_STAR_DMA_SIZE \
225 		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
226 
227 struct mtk_star_ring {
228 	struct mtk_star_ring_desc *descs;
229 	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
230 	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
231 	unsigned int head;
232 	unsigned int tail;
233 };
234 
235 struct mtk_star_priv {
236 	struct net_device *ndev;
237 
238 	struct regmap *regs;
239 	struct regmap *pericfg;
240 
241 	struct clk_bulk_data clks[MTK_STAR_NCLKS];
242 
243 	void *ring_base;
244 	struct mtk_star_ring_desc *descs_base;
245 	dma_addr_t dma_addr;
246 	struct mtk_star_ring tx_ring;
247 	struct mtk_star_ring rx_ring;
248 
249 	struct mii_bus *mii;
250 	struct napi_struct napi;
251 
252 	struct device_node *phy_node;
253 	phy_interface_t phy_intf;
254 	struct phy_device *phydev;
255 	unsigned int link;
256 	int speed;
257 	int duplex;
258 	int pause;
259 
260 	/* Protects against concurrent descriptor access. */
261 	spinlock_t lock;
262 
263 	struct rtnl_link_stats64 stats;
264 };
265 
266 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
267 {
268 	return priv->ndev->dev.parent;
269 }
270 
271 static const struct regmap_config mtk_star_regmap_config = {
272 	.reg_bits		= 32,
273 	.val_bits		= 32,
274 	.reg_stride		= 4,
275 	.disable_locking	= true,
276 };
277 
278 static void mtk_star_ring_init(struct mtk_star_ring *ring,
279 			       struct mtk_star_ring_desc *descs)
280 {
281 	memset(ring, 0, sizeof(*ring));
282 	ring->descs = descs;
283 	ring->head = 0;
284 	ring->tail = 0;
285 }
286 
287 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
288 				  struct mtk_star_ring_desc_data *desc_data)
289 {
290 	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
291 	unsigned int status;
292 
293 	status = READ_ONCE(desc->status);
294 	dma_rmb(); /* Make sure we read the status bits before checking it. */
295 
296 	if (!(status & MTK_STAR_DESC_BIT_COWN))
297 		return -1;
298 
299 	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
300 	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
301 	desc_data->dma_addr = ring->dma_addrs[ring->tail];
302 	desc_data->skb = ring->skbs[ring->tail];
303 
304 	ring->dma_addrs[ring->tail] = 0;
305 	ring->skbs[ring->tail] = NULL;
306 
307 	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
308 
309 	WRITE_ONCE(desc->data_ptr, 0);
310 	WRITE_ONCE(desc->status, status);
311 
312 	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
313 
314 	return 0;
315 }
316 
317 static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
318 				    struct mtk_star_ring_desc_data *desc_data,
319 				    unsigned int flags)
320 {
321 	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
322 	unsigned int status;
323 
324 	status = READ_ONCE(desc->status);
325 
326 	ring->skbs[ring->head] = desc_data->skb;
327 	ring->dma_addrs[ring->head] = desc_data->dma_addr;
328 
329 	status |= desc_data->len;
330 	if (flags)
331 		status |= flags;
332 
333 	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
334 	WRITE_ONCE(desc->status, status);
335 	status &= ~MTK_STAR_DESC_BIT_COWN;
336 	/* Flush previous modifications before ownership change. */
337 	dma_wmb();
338 	WRITE_ONCE(desc->status, status);
339 
340 	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
341 }
342 
343 static void
344 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
345 			   struct mtk_star_ring_desc_data *desc_data)
346 {
347 	mtk_star_ring_push_head(ring, desc_data, 0);
348 }
349 
350 static void
351 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
352 			   struct mtk_star_ring_desc_data *desc_data)
353 {
354 	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
355 					  MTK_STAR_DESC_BIT_LS |
356 					  MTK_STAR_DESC_BIT_INT;
357 
358 	mtk_star_ring_push_head(ring, desc_data, flags);
359 }
360 
361 static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
362 {
363 	return abs(ring->head - ring->tail);
364 }
365 
366 static bool mtk_star_ring_full(struct mtk_star_ring *ring)
367 {
368 	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
369 }
370 
371 static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
372 {
373 	return mtk_star_ring_num_used_descs(ring) > 0;
374 }
375 
376 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
377 				      struct sk_buff *skb)
378 {
379 	struct device *dev = mtk_star_get_dev(priv);
380 
381 	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
382 	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
383 			      skb_tailroom(skb), DMA_FROM_DEVICE);
384 }
385 
386 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
387 				  struct mtk_star_ring_desc_data *desc_data)
388 {
389 	struct device *dev = mtk_star_get_dev(priv);
390 
391 	dma_unmap_single(dev, desc_data->dma_addr,
392 			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
393 }
394 
395 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
396 				      struct sk_buff *skb)
397 {
398 	struct device *dev = mtk_star_get_dev(priv);
399 
400 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
401 }
402 
403 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
404 				  struct mtk_star_ring_desc_data *desc_data)
405 {
406 	struct device *dev = mtk_star_get_dev(priv);
407 
408 	return dma_unmap_single(dev, desc_data->dma_addr,
409 				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
410 }
411 
412 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
413 {
414 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
415 			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
416 }
417 
418 /* Unmask the three interrupts we care about, mask all others. */
419 static void mtk_star_intr_enable(struct mtk_star_priv *priv)
420 {
421 	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
422 			   MTK_STAR_BIT_INT_STS_FNRC |
423 			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
424 
425 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
426 }
427 
428 static void mtk_star_intr_disable(struct mtk_star_priv *priv)
429 {
430 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
431 }
432 
433 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
434 {
435 	unsigned int val;
436 
437 	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
438 
439 	return val;
440 }
441 
442 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
443 {
444 	unsigned int val;
445 
446 	val = mtk_star_intr_read(priv);
447 	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
448 
449 	return val;
450 }
451 
452 static void mtk_star_dma_init(struct mtk_star_priv *priv)
453 {
454 	struct mtk_star_ring_desc *desc;
455 	unsigned int val;
456 	int i;
457 
458 	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
459 
460 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
461 		desc = &priv->descs_base[i];
462 
463 		memset(desc, 0, sizeof(*desc));
464 		desc->status = MTK_STAR_DESC_BIT_COWN;
465 		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
466 		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
467 			desc->status |= MTK_STAR_DESC_BIT_EOR;
468 	}
469 
470 	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
471 	mtk_star_ring_init(&priv->rx_ring,
472 			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
473 
474 	/* Set DMA pointers. */
475 	val = (unsigned int)priv->dma_addr;
476 	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
477 	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
478 
479 	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
480 	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
481 	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
482 }
483 
484 static void mtk_star_dma_start(struct mtk_star_priv *priv)
485 {
486 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
487 			MTK_STAR_BIT_TX_DMA_CTRL_START);
488 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
489 			MTK_STAR_BIT_RX_DMA_CTRL_START);
490 }
491 
492 static void mtk_star_dma_stop(struct mtk_star_priv *priv)
493 {
494 	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
495 		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
496 	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
497 		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
498 }
499 
500 static void mtk_star_dma_disable(struct mtk_star_priv *priv)
501 {
502 	int i;
503 
504 	mtk_star_dma_stop(priv);
505 
506 	/* Take back all descriptors. */
507 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
508 		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
509 }
510 
511 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
512 {
513 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
514 			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
515 }
516 
517 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
518 {
519 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
520 			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
521 }
522 
523 static void mtk_star_set_mac_addr(struct net_device *ndev)
524 {
525 	struct mtk_star_priv *priv = netdev_priv(ndev);
526 	u8 *mac_addr = ndev->dev_addr;
527 	unsigned int high, low;
528 
529 	high = mac_addr[0] << 8 | mac_addr[1] << 0;
530 	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
531 	      mac_addr[4] << 8 | mac_addr[5];
532 
533 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
534 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
535 }
536 
537 static void mtk_star_reset_counters(struct mtk_star_priv *priv)
538 {
539 	static const unsigned int counter_regs[] = {
540 		MTK_STAR_REG_C_RXOKPKT,
541 		MTK_STAR_REG_C_RXOKBYTE,
542 		MTK_STAR_REG_C_RXRUNT,
543 		MTK_STAR_REG_C_RXLONG,
544 		MTK_STAR_REG_C_RXDROP,
545 		MTK_STAR_REG_C_RXCRC,
546 		MTK_STAR_REG_C_RXARLDROP,
547 		MTK_STAR_REG_C_RXVLANDROP,
548 		MTK_STAR_REG_C_RXCSERR,
549 		MTK_STAR_REG_C_RXPAUSE,
550 		MTK_STAR_REG_C_TXOKPKT,
551 		MTK_STAR_REG_C_TXOKBYTE,
552 		MTK_STAR_REG_C_TXPAUSECOL,
553 		MTK_STAR_REG_C_TXRTY,
554 		MTK_STAR_REG_C_TXSKIP,
555 		MTK_STAR_REG_C_TX_ARP,
556 		MTK_STAR_REG_C_RX_RERR,
557 		MTK_STAR_REG_C_RX_UNI,
558 		MTK_STAR_REG_C_RX_MULTI,
559 		MTK_STAR_REG_C_RX_BROAD,
560 		MTK_STAR_REG_C_RX_ALIGNERR,
561 		MTK_STAR_REG_C_TX_UNI,
562 		MTK_STAR_REG_C_TX_MULTI,
563 		MTK_STAR_REG_C_TX_BROAD,
564 		MTK_STAR_REG_C_TX_TIMEOUT,
565 		MTK_STAR_REG_C_TX_LATECOL,
566 		MTK_STAR_REG_C_RX_LENGTHERR,
567 		MTK_STAR_REG_C_RX_TWIST,
568 	};
569 
570 	unsigned int i, val;
571 
572 	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
573 		regmap_read(priv->regs, counter_regs[i], &val);
574 }
575 
576 static void mtk_star_update_stat(struct mtk_star_priv *priv,
577 				 unsigned int reg, u64 *stat)
578 {
579 	unsigned int val;
580 
581 	regmap_read(priv->regs, reg, &val);
582 	*stat += val;
583 }
584 
585 /* Try to get as many stats as possible from the internal registers instead
586  * of tracking them ourselves.
587  */
588 static void mtk_star_update_stats(struct mtk_star_priv *priv)
589 {
590 	struct rtnl_link_stats64 *stats = &priv->stats;
591 
592 	/* OK packets and bytes. */
593 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
594 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
595 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
596 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
597 
598 	/* RX & TX multicast. */
599 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
600 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
601 
602 	/* Collisions. */
603 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
604 			     &stats->collisions);
605 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
606 			     &stats->collisions);
607 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
608 
609 	/* RX Errors. */
610 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
611 			     &stats->rx_length_errors);
612 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
613 			     &stats->rx_over_errors);
614 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
615 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
616 			     &stats->rx_frame_errors);
617 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
618 			     &stats->rx_fifo_errors);
619 	/* Sum of the general RX error counter + all of the above. */
620 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
621 	stats->rx_errors += stats->rx_length_errors;
622 	stats->rx_errors += stats->rx_over_errors;
623 	stats->rx_errors += stats->rx_crc_errors;
624 	stats->rx_errors += stats->rx_frame_errors;
625 	stats->rx_errors += stats->rx_fifo_errors;
626 }
627 
628 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
629 {
630 	uintptr_t tail, offset;
631 	struct sk_buff *skb;
632 
633 	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
634 	if (!skb)
635 		return NULL;
636 
637 	/* Align to 16 bytes. */
638 	tail = (uintptr_t)skb_tail_pointer(skb);
639 	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
640 		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
641 		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
642 	}
643 
644 	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
645 	 * extract the Ethernet header (14 bytes) so we need two more bytes.
646 	 */
647 	skb_reserve(skb, MTK_STAR_IP_ALIGN);
648 
649 	return skb;
650 }
651 
652 static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
653 {
654 	struct mtk_star_priv *priv = netdev_priv(ndev);
655 	struct mtk_star_ring *ring = &priv->rx_ring;
656 	struct device *dev = mtk_star_get_dev(priv);
657 	struct mtk_star_ring_desc *desc;
658 	struct sk_buff *skb;
659 	dma_addr_t dma_addr;
660 	int i;
661 
662 	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
663 		skb = mtk_star_alloc_skb(ndev);
664 		if (!skb)
665 			return -ENOMEM;
666 
667 		dma_addr = mtk_star_dma_map_rx(priv, skb);
668 		if (dma_mapping_error(dev, dma_addr)) {
669 			dev_kfree_skb(skb);
670 			return -ENOMEM;
671 		}
672 
673 		desc = &ring->descs[i];
674 		desc->data_ptr = dma_addr;
675 		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
676 		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
677 		ring->skbs[i] = skb;
678 		ring->dma_addrs[i] = dma_addr;
679 	}
680 
681 	return 0;
682 }
683 
684 static void
685 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
686 			void (*unmap_func)(struct mtk_star_priv *,
687 					   struct mtk_star_ring_desc_data *))
688 {
689 	struct mtk_star_ring_desc_data desc_data;
690 	int i;
691 
692 	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
693 		if (!ring->dma_addrs[i])
694 			continue;
695 
696 		desc_data.dma_addr = ring->dma_addrs[i];
697 		desc_data.skb = ring->skbs[i];
698 
699 		unmap_func(priv, &desc_data);
700 		dev_kfree_skb(desc_data.skb);
701 	}
702 }
703 
704 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
705 {
706 	struct mtk_star_ring *ring = &priv->rx_ring;
707 
708 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
709 }
710 
711 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
712 {
713 	struct mtk_star_ring *ring = &priv->tx_ring;
714 
715 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
716 }
717 
718 /* All processing for TX and RX happens in the napi poll callback.
719  *
720  * FIXME: The interrupt handling should be more fine-grained with each
721  * interrupt enabled/disabled independently when needed. Unfortunatly this
722  * turned out to impact the driver's stability and until we have something
723  * working properly, we're disabling all interrupts during TX & RX processing
724  * or when resetting the counter registers.
725  */
726 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
727 {
728 	struct mtk_star_priv *priv;
729 	struct net_device *ndev;
730 
731 	ndev = data;
732 	priv = netdev_priv(ndev);
733 
734 	if (netif_running(ndev)) {
735 		mtk_star_intr_disable(priv);
736 		napi_schedule(&priv->napi);
737 	}
738 
739 	return IRQ_HANDLED;
740 }
741 
742 /* Wait for the completion of any previous command - CMD_START bit must be
743  * cleared by hardware.
744  */
745 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
746 {
747 	unsigned int val;
748 
749 	return regmap_read_poll_timeout_atomic(priv->regs,
750 				MTK_STAR_REG_HASH_CTRL, val,
751 				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
752 				10, MTK_STAR_WAIT_TIMEOUT);
753 }
754 
755 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
756 {
757 	unsigned int val;
758 	int ret;
759 
760 	/* Wait for BIST_DONE bit. */
761 	ret = regmap_read_poll_timeout_atomic(priv->regs,
762 					MTK_STAR_REG_HASH_CTRL, val,
763 					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
764 					10, MTK_STAR_WAIT_TIMEOUT);
765 	if (ret)
766 		return ret;
767 
768 	/* Check the BIST_OK bit. */
769 	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
770 			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
771 		return -EIO;
772 
773 	return 0;
774 }
775 
776 static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
777 				unsigned int hash_addr)
778 {
779 	unsigned int val;
780 	int ret;
781 
782 	ret = mtk_star_hash_wait_cmd_start(priv);
783 	if (ret)
784 		return ret;
785 
786 	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
787 	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
788 	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
789 	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
790 	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
791 	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
792 
793 	return mtk_star_hash_wait_ok(priv);
794 }
795 
796 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
797 {
798 	int ret;
799 
800 	ret = mtk_star_hash_wait_cmd_start(priv);
801 	if (ret)
802 		return ret;
803 
804 	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
805 			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
806 	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
807 			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
808 
809 	return mtk_star_hash_wait_ok(priv);
810 }
811 
812 static void mtk_star_phy_config(struct mtk_star_priv *priv)
813 {
814 	unsigned int val;
815 
816 	if (priv->speed == SPEED_1000)
817 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
818 	else if (priv->speed == SPEED_100)
819 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
820 	else
821 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
822 	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
823 
824 	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
825 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
826 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
827 	/* Only full-duplex supported for now. */
828 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
829 
830 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
831 
832 	if (priv->pause) {
833 		val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
834 		val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
835 		val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
836 	} else {
837 		val = 0;
838 	}
839 
840 	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
841 			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
842 			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
843 
844 	if (priv->pause) {
845 		val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
846 		val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
847 	} else {
848 		val = 0;
849 	}
850 
851 	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
852 			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
853 }
854 
855 static void mtk_star_adjust_link(struct net_device *ndev)
856 {
857 	struct mtk_star_priv *priv = netdev_priv(ndev);
858 	struct phy_device *phydev = priv->phydev;
859 	bool new_state = false;
860 
861 	if (phydev->link) {
862 		if (!priv->link) {
863 			priv->link = phydev->link;
864 			new_state = true;
865 		}
866 
867 		if (priv->speed != phydev->speed) {
868 			priv->speed = phydev->speed;
869 			new_state = true;
870 		}
871 
872 		if (priv->pause != phydev->pause) {
873 			priv->pause = phydev->pause;
874 			new_state = true;
875 		}
876 	} else {
877 		if (priv->link) {
878 			priv->link = phydev->link;
879 			new_state = true;
880 		}
881 	}
882 
883 	if (new_state) {
884 		if (phydev->link)
885 			mtk_star_phy_config(priv);
886 
887 		phy_print_status(ndev->phydev);
888 	}
889 }
890 
891 static void mtk_star_init_config(struct mtk_star_priv *priv)
892 {
893 	unsigned int val;
894 
895 	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
896 	       MTK_STAR_BIT_EXT_MDC_MODE |
897 	       MTK_STAR_BIT_SWC_MII_MODE);
898 
899 	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
900 	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
901 			   MTK_STAR_MSK_MAC_CLK_CONF,
902 			   MTK_STAR_BIT_CLK_DIV_10);
903 }
904 
905 static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
906 {
907 	regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
908 			   MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
909 			   MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
910 }
911 
912 static int mtk_star_enable(struct net_device *ndev)
913 {
914 	struct mtk_star_priv *priv = netdev_priv(ndev);
915 	unsigned int val;
916 	int ret;
917 
918 	mtk_star_nic_disable_pd(priv);
919 	mtk_star_intr_disable(priv);
920 	mtk_star_dma_stop(priv);
921 
922 	mtk_star_set_mac_addr(ndev);
923 
924 	/* Configure the MAC */
925 	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
926 	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
927 	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
928 	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
929 	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
930 	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
931 
932 	/* Enable Hash Table BIST and reset it */
933 	ret = mtk_star_reset_hash_table(priv);
934 	if (ret)
935 		return ret;
936 
937 	/* Setup the hashing algorithm */
938 	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
939 			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
940 			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
941 
942 	/* Don't strip VLAN tags */
943 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
944 			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
945 
946 	/* Setup DMA */
947 	mtk_star_dma_init(priv);
948 
949 	ret = mtk_star_prepare_rx_skbs(ndev);
950 	if (ret)
951 		goto err_out;
952 
953 	/* Request the interrupt */
954 	ret = request_irq(ndev->irq, mtk_star_handle_irq,
955 			  IRQF_TRIGGER_FALLING, ndev->name, ndev);
956 	if (ret)
957 		goto err_free_skbs;
958 
959 	napi_enable(&priv->napi);
960 
961 	mtk_star_intr_ack_all(priv);
962 	mtk_star_intr_enable(priv);
963 
964 	/* Connect to and start PHY */
965 	priv->phydev = of_phy_connect(ndev, priv->phy_node,
966 				      mtk_star_adjust_link, 0, priv->phy_intf);
967 	if (!priv->phydev) {
968 		netdev_err(ndev, "failed to connect to PHY\n");
969 		goto err_free_irq;
970 	}
971 
972 	mtk_star_dma_start(priv);
973 	phy_start(priv->phydev);
974 	netif_start_queue(ndev);
975 
976 	return 0;
977 
978 err_free_irq:
979 	free_irq(ndev->irq, ndev);
980 err_free_skbs:
981 	mtk_star_free_rx_skbs(priv);
982 err_out:
983 	return ret;
984 }
985 
986 static void mtk_star_disable(struct net_device *ndev)
987 {
988 	struct mtk_star_priv *priv = netdev_priv(ndev);
989 
990 	netif_stop_queue(ndev);
991 	napi_disable(&priv->napi);
992 	mtk_star_intr_disable(priv);
993 	mtk_star_dma_disable(priv);
994 	mtk_star_intr_ack_all(priv);
995 	phy_stop(priv->phydev);
996 	phy_disconnect(priv->phydev);
997 	free_irq(ndev->irq, ndev);
998 	mtk_star_free_rx_skbs(priv);
999 	mtk_star_free_tx_skbs(priv);
1000 }
1001 
1002 static int mtk_star_netdev_open(struct net_device *ndev)
1003 {
1004 	return mtk_star_enable(ndev);
1005 }
1006 
1007 static int mtk_star_netdev_stop(struct net_device *ndev)
1008 {
1009 	mtk_star_disable(ndev);
1010 
1011 	return 0;
1012 }
1013 
1014 static int mtk_star_netdev_ioctl(struct net_device *ndev,
1015 				 struct ifreq *req, int cmd)
1016 {
1017 	if (!netif_running(ndev))
1018 		return -EINVAL;
1019 
1020 	return phy_mii_ioctl(ndev->phydev, req, cmd);
1021 }
1022 
1023 static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1024 				      struct net_device *ndev)
1025 {
1026 	struct mtk_star_priv *priv = netdev_priv(ndev);
1027 	struct mtk_star_ring *ring = &priv->tx_ring;
1028 	struct device *dev = mtk_star_get_dev(priv);
1029 	struct mtk_star_ring_desc_data desc_data;
1030 
1031 	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1032 	if (dma_mapping_error(dev, desc_data.dma_addr))
1033 		goto err_drop_packet;
1034 
1035 	desc_data.skb = skb;
1036 	desc_data.len = skb->len;
1037 
1038 	spin_lock_bh(&priv->lock);
1039 
1040 	mtk_star_ring_push_head_tx(ring, &desc_data);
1041 
1042 	netdev_sent_queue(ndev, skb->len);
1043 
1044 	if (mtk_star_ring_full(ring))
1045 		netif_stop_queue(ndev);
1046 
1047 	spin_unlock_bh(&priv->lock);
1048 
1049 	mtk_star_dma_resume_tx(priv);
1050 
1051 	return NETDEV_TX_OK;
1052 
1053 err_drop_packet:
1054 	dev_kfree_skb(skb);
1055 	ndev->stats.tx_dropped++;
1056 	return NETDEV_TX_BUSY;
1057 }
1058 
1059 /* Returns the number of bytes sent or a negative number on the first
1060  * descriptor owned by DMA.
1061  */
1062 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1063 {
1064 	struct mtk_star_ring *ring = &priv->tx_ring;
1065 	struct mtk_star_ring_desc_data desc_data;
1066 	int ret;
1067 
1068 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1069 	if (ret)
1070 		return ret;
1071 
1072 	mtk_star_dma_unmap_tx(priv, &desc_data);
1073 	ret = desc_data.skb->len;
1074 	dev_kfree_skb_irq(desc_data.skb);
1075 
1076 	return ret;
1077 }
1078 
1079 static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1080 {
1081 	struct mtk_star_ring *ring = &priv->tx_ring;
1082 	struct net_device *ndev = priv->ndev;
1083 	int ret, pkts_compl, bytes_compl;
1084 	bool wake = false;
1085 
1086 	spin_lock(&priv->lock);
1087 
1088 	for (pkts_compl = 0, bytes_compl = 0;;
1089 	     pkts_compl++, bytes_compl += ret, wake = true) {
1090 		if (!mtk_star_ring_descs_available(ring))
1091 			break;
1092 
1093 		ret = mtk_star_tx_complete_one(priv);
1094 		if (ret < 0)
1095 			break;
1096 	}
1097 
1098 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1099 
1100 	if (wake && netif_queue_stopped(ndev))
1101 		netif_wake_queue(ndev);
1102 
1103 	spin_unlock(&priv->lock);
1104 }
1105 
1106 static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1107 					struct rtnl_link_stats64 *stats)
1108 {
1109 	struct mtk_star_priv *priv = netdev_priv(ndev);
1110 
1111 	mtk_star_update_stats(priv);
1112 
1113 	memcpy(stats, &priv->stats, sizeof(*stats));
1114 }
1115 
1116 static void mtk_star_set_rx_mode(struct net_device *ndev)
1117 {
1118 	struct mtk_star_priv *priv = netdev_priv(ndev);
1119 	struct netdev_hw_addr *hw_addr;
1120 	unsigned int hash_addr, i;
1121 	int ret;
1122 
1123 	if (ndev->flags & IFF_PROMISC) {
1124 		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1125 				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1126 	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1127 		   ndev->flags & IFF_ALLMULTI) {
1128 		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1129 			ret = mtk_star_set_hashbit(priv, i);
1130 			if (ret)
1131 				goto hash_fail;
1132 		}
1133 	} else {
1134 		/* Clear previous settings. */
1135 		ret = mtk_star_reset_hash_table(priv);
1136 		if (ret)
1137 			goto hash_fail;
1138 
1139 		netdev_for_each_mc_addr(hw_addr, ndev) {
1140 			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1141 			hash_addr += hw_addr->addr[5];
1142 			ret = mtk_star_set_hashbit(priv, hash_addr);
1143 			if (ret)
1144 				goto hash_fail;
1145 		}
1146 	}
1147 
1148 	return;
1149 
1150 hash_fail:
1151 	if (ret == -ETIMEDOUT)
1152 		netdev_err(ndev, "setting hash bit timed out\n");
1153 	else
1154 		/* Should be -EIO */
1155 		netdev_err(ndev, "unable to set hash bit");
1156 }
1157 
1158 static const struct net_device_ops mtk_star_netdev_ops = {
1159 	.ndo_open		= mtk_star_netdev_open,
1160 	.ndo_stop		= mtk_star_netdev_stop,
1161 	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1162 	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1163 	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1164 	.ndo_do_ioctl		= mtk_star_netdev_ioctl,
1165 	.ndo_set_mac_address	= eth_mac_addr,
1166 	.ndo_validate_addr	= eth_validate_addr,
1167 };
1168 
1169 static void mtk_star_get_drvinfo(struct net_device *dev,
1170 				 struct ethtool_drvinfo *info)
1171 {
1172 	strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1173 }
1174 
1175 /* TODO Add ethtool stats. */
1176 static const struct ethtool_ops mtk_star_ethtool_ops = {
1177 	.get_drvinfo		= mtk_star_get_drvinfo,
1178 	.get_link		= ethtool_op_get_link,
1179 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1180 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1181 };
1182 
1183 static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1184 {
1185 	struct mtk_star_ring *ring = &priv->rx_ring;
1186 	struct device *dev = mtk_star_get_dev(priv);
1187 	struct mtk_star_ring_desc_data desc_data;
1188 	struct net_device *ndev = priv->ndev;
1189 	struct sk_buff *curr_skb, *new_skb;
1190 	dma_addr_t new_dma_addr;
1191 	int ret;
1192 
1193 	spin_lock(&priv->lock);
1194 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1195 	spin_unlock(&priv->lock);
1196 	if (ret)
1197 		return -1;
1198 
1199 	curr_skb = desc_data.skb;
1200 
1201 	if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1202 	    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1203 		/* Error packet -> drop and reuse skb. */
1204 		new_skb = curr_skb;
1205 		goto push_new_skb;
1206 	}
1207 
1208 	/* Prepare new skb before receiving the current one. Reuse the current
1209 	 * skb if we fail at any point.
1210 	 */
1211 	new_skb = mtk_star_alloc_skb(ndev);
1212 	if (!new_skb) {
1213 		ndev->stats.rx_dropped++;
1214 		new_skb = curr_skb;
1215 		goto push_new_skb;
1216 	}
1217 
1218 	new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1219 	if (dma_mapping_error(dev, new_dma_addr)) {
1220 		ndev->stats.rx_dropped++;
1221 		dev_kfree_skb(new_skb);
1222 		new_skb = curr_skb;
1223 		netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1224 		goto push_new_skb;
1225 	}
1226 
1227 	desc_data.dma_addr = new_dma_addr;
1228 
1229 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
1230 	mtk_star_dma_unmap_rx(priv, &desc_data);
1231 
1232 	skb_put(desc_data.skb, desc_data.len);
1233 	desc_data.skb->ip_summed = CHECKSUM_NONE;
1234 	desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1235 	desc_data.skb->dev = ndev;
1236 	netif_receive_skb(desc_data.skb);
1237 
1238 push_new_skb:
1239 	desc_data.len = skb_tailroom(new_skb);
1240 	desc_data.skb = new_skb;
1241 
1242 	spin_lock(&priv->lock);
1243 	mtk_star_ring_push_head_rx(ring, &desc_data);
1244 	spin_unlock(&priv->lock);
1245 
1246 	return 0;
1247 }
1248 
1249 static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1250 {
1251 	int received, ret;
1252 
1253 	for (received = 0, ret = 0; received < budget && ret == 0; received++)
1254 		ret = mtk_star_receive_packet(priv);
1255 
1256 	mtk_star_dma_resume_rx(priv);
1257 
1258 	return received;
1259 }
1260 
1261 static int mtk_star_poll(struct napi_struct *napi, int budget)
1262 {
1263 	struct mtk_star_priv *priv;
1264 	unsigned int status;
1265 	int received = 0;
1266 
1267 	priv = container_of(napi, struct mtk_star_priv, napi);
1268 
1269 	status = mtk_star_intr_read(priv);
1270 	mtk_star_intr_ack_all(priv);
1271 
1272 	if (status & MTK_STAR_BIT_INT_STS_TNTC)
1273 		/* Clean-up all TX descriptors. */
1274 		mtk_star_tx_complete_all(priv);
1275 
1276 	if (status & MTK_STAR_BIT_INT_STS_FNRC)
1277 		/* Receive up to $budget packets. */
1278 		received = mtk_star_process_rx(priv, budget);
1279 
1280 	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1281 		mtk_star_update_stats(priv);
1282 		mtk_star_reset_counters(priv);
1283 	}
1284 
1285 	if (received < budget)
1286 		napi_complete_done(napi, received);
1287 
1288 	mtk_star_intr_enable(priv);
1289 
1290 	return received;
1291 }
1292 
1293 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1294 {
1295 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1296 		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1297 }
1298 
1299 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1300 {
1301 	unsigned int val;
1302 
1303 	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1304 					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1305 					10, MTK_STAR_WAIT_TIMEOUT);
1306 }
1307 
1308 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1309 {
1310 	struct mtk_star_priv *priv = mii->priv;
1311 	unsigned int val, data;
1312 	int ret;
1313 
1314 	if (regnum & MII_ADDR_C45)
1315 		return -EOPNOTSUPP;
1316 
1317 	mtk_star_mdio_rwok_clear(priv);
1318 
1319 	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1320 	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1321 	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1322 
1323 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1324 
1325 	ret = mtk_star_mdio_rwok_wait(priv);
1326 	if (ret)
1327 		return ret;
1328 
1329 	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1330 
1331 	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1332 	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1333 
1334 	return data;
1335 }
1336 
1337 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1338 			       int regnum, u16 data)
1339 {
1340 	struct mtk_star_priv *priv = mii->priv;
1341 	unsigned int val;
1342 
1343 	if (regnum & MII_ADDR_C45)
1344 		return -EOPNOTSUPP;
1345 
1346 	mtk_star_mdio_rwok_clear(priv);
1347 
1348 	val = data;
1349 	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1350 	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1351 	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1352 	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1353 	val |= regnum;
1354 	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1355 
1356 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1357 
1358 	return mtk_star_mdio_rwok_wait(priv);
1359 }
1360 
1361 static int mtk_star_mdio_init(struct net_device *ndev)
1362 {
1363 	struct mtk_star_priv *priv = netdev_priv(ndev);
1364 	struct device *dev = mtk_star_get_dev(priv);
1365 	struct device_node *of_node, *mdio_node;
1366 	int ret;
1367 
1368 	of_node = dev->of_node;
1369 
1370 	mdio_node = of_get_child_by_name(of_node, "mdio");
1371 	if (!mdio_node)
1372 		return -ENODEV;
1373 
1374 	if (!of_device_is_available(mdio_node)) {
1375 		ret = -ENODEV;
1376 		goto out_put_node;
1377 	}
1378 
1379 	priv->mii = devm_mdiobus_alloc(dev);
1380 	if (!priv->mii) {
1381 		ret = -ENOMEM;
1382 		goto out_put_node;
1383 	}
1384 
1385 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1386 	priv->mii->name = "mtk-mac-mdio";
1387 	priv->mii->parent = dev;
1388 	priv->mii->read = mtk_star_mdio_read;
1389 	priv->mii->write = mtk_star_mdio_write;
1390 	priv->mii->priv = priv;
1391 
1392 	ret = of_mdiobus_register(priv->mii, mdio_node);
1393 
1394 out_put_node:
1395 	of_node_put(mdio_node);
1396 	return ret;
1397 }
1398 
1399 static __maybe_unused int mtk_star_suspend(struct device *dev)
1400 {
1401 	struct mtk_star_priv *priv;
1402 	struct net_device *ndev;
1403 
1404 	ndev = dev_get_drvdata(dev);
1405 	priv = netdev_priv(ndev);
1406 
1407 	if (netif_running(ndev))
1408 		mtk_star_disable(ndev);
1409 
1410 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1411 
1412 	return 0;
1413 }
1414 
1415 static __maybe_unused int mtk_star_resume(struct device *dev)
1416 {
1417 	struct mtk_star_priv *priv;
1418 	struct net_device *ndev;
1419 	int ret;
1420 
1421 	ndev = dev_get_drvdata(dev);
1422 	priv = netdev_priv(ndev);
1423 
1424 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1425 	if (ret)
1426 		return ret;
1427 
1428 	if (netif_running(ndev)) {
1429 		ret = mtk_star_enable(ndev);
1430 		if (ret)
1431 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1432 	}
1433 
1434 	return ret;
1435 }
1436 
1437 static void mtk_star_clk_disable_unprepare(void *data)
1438 {
1439 	struct mtk_star_priv *priv = data;
1440 
1441 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1442 }
1443 
1444 static void mtk_star_mdiobus_unregister(void *data)
1445 {
1446 	struct mtk_star_priv *priv = data;
1447 
1448 	mdiobus_unregister(priv->mii);
1449 }
1450 
1451 static int mtk_star_probe(struct platform_device *pdev)
1452 {
1453 	struct device_node *of_node;
1454 	struct mtk_star_priv *priv;
1455 	struct net_device *ndev;
1456 	struct device *dev;
1457 	void __iomem *base;
1458 	int ret, i;
1459 
1460 	dev = &pdev->dev;
1461 	of_node = dev->of_node;
1462 
1463 	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1464 	if (!ndev)
1465 		return -ENOMEM;
1466 
1467 	priv = netdev_priv(ndev);
1468 	priv->ndev = ndev;
1469 	SET_NETDEV_DEV(ndev, dev);
1470 	platform_set_drvdata(pdev, ndev);
1471 
1472 	ndev->min_mtu = ETH_ZLEN;
1473 	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1474 
1475 	spin_lock_init(&priv->lock);
1476 
1477 	base = devm_platform_ioremap_resource(pdev, 0);
1478 	if (IS_ERR(base))
1479 		return PTR_ERR(base);
1480 
1481 	/* We won't be checking the return values of regmap read & write
1482 	 * functions. They can only fail for mmio if there's a clock attached
1483 	 * to regmap which is not the case here.
1484 	 */
1485 	priv->regs = devm_regmap_init_mmio(dev, base,
1486 					   &mtk_star_regmap_config);
1487 	if (IS_ERR(priv->regs))
1488 		return PTR_ERR(priv->regs);
1489 
1490 	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1491 							"mediatek,pericfg");
1492 	if (IS_ERR(priv->pericfg)) {
1493 		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1494 		return PTR_ERR(priv->pericfg);
1495 	}
1496 
1497 	ndev->irq = platform_get_irq(pdev, 0);
1498 	if (ndev->irq < 0)
1499 		return ndev->irq;
1500 
1501 	for (i = 0; i < MTK_STAR_NCLKS; i++)
1502 		priv->clks[i].id = mtk_star_clk_names[i];
1503 	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1504 	if (ret)
1505 		return ret;
1506 
1507 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1508 	if (ret)
1509 		return ret;
1510 
1511 	ret = devm_add_action_or_reset(dev,
1512 				       mtk_star_clk_disable_unprepare, priv);
1513 	if (ret)
1514 		return ret;
1515 
1516 	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1517 	if (ret) {
1518 		return ret;
1519 	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1520 		dev_err(dev, "unsupported phy mode: %s\n",
1521 			phy_modes(priv->phy_intf));
1522 		return -EINVAL;
1523 	}
1524 
1525 	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1526 	if (!priv->phy_node) {
1527 		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1528 		return -ENODEV;
1529 	}
1530 
1531 	mtk_star_set_mode_rmii(priv);
1532 
1533 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1534 	if (ret) {
1535 		dev_err(dev, "unsupported DMA mask\n");
1536 		return ret;
1537 	}
1538 
1539 	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1540 					      &priv->dma_addr,
1541 					      GFP_KERNEL | GFP_DMA);
1542 	if (!priv->ring_base)
1543 		return -ENOMEM;
1544 
1545 	mtk_star_nic_disable_pd(priv);
1546 	mtk_star_init_config(priv);
1547 
1548 	ret = mtk_star_mdio_init(ndev);
1549 	if (ret)
1550 		return ret;
1551 
1552 	ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv);
1553 	if (ret)
1554 		return ret;
1555 
1556 	ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
1557 	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1558 		eth_hw_addr_random(ndev);
1559 
1560 	ndev->netdev_ops = &mtk_star_netdev_ops;
1561 	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1562 
1563 	netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
1564 
1565 	return devm_register_netdev(dev, ndev);
1566 }
1567 
1568 static const struct of_device_id mtk_star_of_match[] = {
1569 	{ .compatible = "mediatek,mt8516-eth", },
1570 	{ .compatible = "mediatek,mt8518-eth", },
1571 	{ .compatible = "mediatek,mt8175-eth", },
1572 	{ }
1573 };
1574 MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1575 
1576 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1577 			 mtk_star_suspend, mtk_star_resume);
1578 
1579 static struct platform_driver mtk_star_driver = {
1580 	.driver = {
1581 		.name = MTK_STAR_DRVNAME,
1582 		.pm = &mtk_star_pm_ops,
1583 		.of_match_table = of_match_ptr(mtk_star_of_match),
1584 	},
1585 	.probe = mtk_star_probe,
1586 };
1587 module_platform_driver(mtk_star_driver);
1588 
1589 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1590 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1591 MODULE_LICENSE("GPL");
1592