1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 MediaTek Corporation
4  * Copyright (c) 2020 BayLibre SAS
5  *
6  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/clk.h>
11 #include <linux/compiler.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/mii.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_mdio.h>
22 #include <linux/of_net.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm.h>
25 #include <linux/regmap.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 
29 #define MTK_STAR_DRVNAME			"mtk_star_emac"
30 
31 #define MTK_STAR_WAIT_TIMEOUT			300
32 #define MTK_STAR_MAX_FRAME_SIZE			1514
33 #define MTK_STAR_SKB_ALIGNMENT			16
34 #define MTK_STAR_HASHTABLE_MC_LIMIT		256
35 #define MTK_STAR_HASHTABLE_SIZE_MAX		512
36 
37 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38  * work for this controller.
39  */
40 #define MTK_STAR_IP_ALIGN			2
41 
42 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44 
45 /* PHY Control Register 0 */
46 #define MTK_STAR_REG_PHY_CTRL0			0x0000
47 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
48 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
49 #define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
50 #define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
51 #define MTK_STAR_OFF_PHY_CTRL0_PREG		8
52 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
53 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
54 
55 /* PHY Control Register 1 */
56 #define MTK_STAR_REG_PHY_CTRL1			0x0004
57 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
58 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
59 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
60 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
62 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
63 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
65 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
66 
67 /* MAC Configuration Register */
68 #define MTK_STAR_REG_MAC_CFG			0x0008
69 #define MTK_STAR_OFF_MAC_CFG_IPG		10
70 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
71 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
72 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
73 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
74 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
75 #define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
76 
77 /* Flow-Control Configuration Register */
78 #define MTK_STAR_REG_FC_CFG			0x000c
79 #define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
80 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
81 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
82 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
83 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
84 
85 /* ARL Configuration Register */
86 #define MTK_STAR_REG_ARL_CFG			0x0010
87 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
88 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
89 
90 /* MAC High and Low Bytes Registers */
91 #define MTK_STAR_REG_MY_MAC_H			0x0014
92 #define MTK_STAR_REG_MY_MAC_L			0x0018
93 
94 /* Hash Table Control Register */
95 #define MTK_STAR_REG_HASH_CTRL			0x001c
96 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
97 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
98 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
99 #define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
100 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
101 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
102 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
103 
104 /* TX DMA Control Register */
105 #define MTK_STAR_REG_TX_DMA_CTRL		0x0034
106 #define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
107 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
108 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
109 
110 /* RX DMA Control Register */
111 #define MTK_STAR_REG_RX_DMA_CTRL		0x0038
112 #define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
113 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
114 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
115 
116 /* DMA Address Registers */
117 #define MTK_STAR_REG_TX_DPTR			0x003c
118 #define MTK_STAR_REG_RX_DPTR			0x0040
119 #define MTK_STAR_REG_TX_BASE_ADDR		0x0044
120 #define MTK_STAR_REG_RX_BASE_ADDR		0x0048
121 
122 /* Interrupt Status Register */
123 #define MTK_STAR_REG_INT_STS			0x0050
124 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
125 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
126 #define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
127 #define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
128 
129 /* Interrupt Mask Register */
130 #define MTK_STAR_REG_INT_MASK			0x0054
131 #define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
132 
133 /* Delay-Macro Register */
134 #define MTK_STAR_REG_TEST0			0x0058
135 #define MTK_STAR_BIT_INV_RX_CLK			BIT(30)
136 #define MTK_STAR_BIT_INV_TX_CLK			BIT(31)
137 
138 /* Misc. Config Register */
139 #define MTK_STAR_REG_TEST1			0x005c
140 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
141 
142 /* Extended Configuration Register */
143 #define MTK_STAR_REG_EXT_CFG			0x0060
144 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
145 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
146 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
147 
148 /* EthSys Configuration Register */
149 #define MTK_STAR_REG_SYS_CONF			0x0094
150 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
151 #define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
152 #define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
153 
154 /* MAC Clock Configuration Register */
155 #define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
156 #define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
157 #define MTK_STAR_BIT_CLK_DIV_10			0x0a
158 #define MTK_STAR_BIT_CLK_DIV_50			0x32
159 
160 /* Counter registers. */
161 #define MTK_STAR_REG_C_RXOKPKT			0x0100
162 #define MTK_STAR_REG_C_RXOKBYTE			0x0104
163 #define MTK_STAR_REG_C_RXRUNT			0x0108
164 #define MTK_STAR_REG_C_RXLONG			0x010c
165 #define MTK_STAR_REG_C_RXDROP			0x0110
166 #define MTK_STAR_REG_C_RXCRC			0x0114
167 #define MTK_STAR_REG_C_RXARLDROP		0x0118
168 #define MTK_STAR_REG_C_RXVLANDROP		0x011c
169 #define MTK_STAR_REG_C_RXCSERR			0x0120
170 #define MTK_STAR_REG_C_RXPAUSE			0x0124
171 #define MTK_STAR_REG_C_TXOKPKT			0x0128
172 #define MTK_STAR_REG_C_TXOKBYTE			0x012c
173 #define MTK_STAR_REG_C_TXPAUSECOL		0x0130
174 #define MTK_STAR_REG_C_TXRTY			0x0134
175 #define MTK_STAR_REG_C_TXSKIP			0x0138
176 #define MTK_STAR_REG_C_TX_ARP			0x013c
177 #define MTK_STAR_REG_C_RX_RERR			0x01d8
178 #define MTK_STAR_REG_C_RX_UNI			0x01dc
179 #define MTK_STAR_REG_C_RX_MULTI			0x01e0
180 #define MTK_STAR_REG_C_RX_BROAD			0x01e4
181 #define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
182 #define MTK_STAR_REG_C_TX_UNI			0x01ec
183 #define MTK_STAR_REG_C_TX_MULTI			0x01f0
184 #define MTK_STAR_REG_C_TX_BROAD			0x01f4
185 #define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
186 #define MTK_STAR_REG_C_TX_LATECOL		0x01fc
187 #define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
188 #define MTK_STAR_REG_C_RX_TWIST			0x0218
189 
190 /* Ethernet CFG Control */
191 #define MTK_PERICFG_REG_NIC_CFG0_CON		0x03c4
192 #define MTK_PERICFG_REG_NIC_CFG1_CON		0x03c8
193 #define MTK_PERICFG_REG_NIC_CFG_CON_V2		0x0c10
194 #define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF	GENMASK(3, 0)
195 #define MTK_PERICFG_BIT_NIC_CFG_CON_MII		0
196 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	1
197 #define MTK_PERICFG_BIT_NIC_CFG_CON_CLK		BIT(0)
198 #define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2	BIT(8)
199 
200 /* Represents the actual structure of descriptors used by the MAC. We can
201  * reuse the same structure for both TX and RX - the layout is the same, only
202  * the flags differ slightly.
203  */
204 struct mtk_star_ring_desc {
205 	/* Contains both the status flags as well as packet length. */
206 	u32 status;
207 	u32 data_ptr;
208 	u32 vtag;
209 	u32 reserved;
210 };
211 
212 #define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
213 #define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
214 #define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
215 #define MTK_STAR_DESC_BIT_INT			BIT(27)
216 #define MTK_STAR_DESC_BIT_LS			BIT(28)
217 #define MTK_STAR_DESC_BIT_FS			BIT(29)
218 #define MTK_STAR_DESC_BIT_EOR			BIT(30)
219 #define MTK_STAR_DESC_BIT_COWN			BIT(31)
220 
221 /* Helper structure for storing data read from/written to descriptors in order
222  * to limit reads from/writes to DMA memory.
223  */
224 struct mtk_star_ring_desc_data {
225 	unsigned int len;
226 	unsigned int flags;
227 	dma_addr_t dma_addr;
228 	struct sk_buff *skb;
229 };
230 
231 #define MTK_STAR_RING_NUM_DESCS			128
232 #define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
233 #define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
234 #define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
235 #define MTK_STAR_DMA_SIZE \
236 		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
237 
238 struct mtk_star_ring {
239 	struct mtk_star_ring_desc *descs;
240 	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
241 	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
242 	unsigned int head;
243 	unsigned int tail;
244 };
245 
246 struct mtk_star_compat {
247 	int (*set_interface_mode)(struct net_device *ndev);
248 	unsigned char bit_clk_div;
249 };
250 
251 struct mtk_star_priv {
252 	struct net_device *ndev;
253 
254 	struct regmap *regs;
255 	struct regmap *pericfg;
256 
257 	struct clk_bulk_data clks[MTK_STAR_NCLKS];
258 
259 	void *ring_base;
260 	struct mtk_star_ring_desc *descs_base;
261 	dma_addr_t dma_addr;
262 	struct mtk_star_ring tx_ring;
263 	struct mtk_star_ring rx_ring;
264 
265 	struct mii_bus *mii;
266 	struct napi_struct napi;
267 
268 	struct device_node *phy_node;
269 	phy_interface_t phy_intf;
270 	struct phy_device *phydev;
271 	unsigned int link;
272 	int speed;
273 	int duplex;
274 	int pause;
275 	bool rmii_rxc;
276 	bool rx_inv;
277 	bool tx_inv;
278 
279 	const struct mtk_star_compat *compat_data;
280 
281 	/* Protects against concurrent descriptor access. */
282 	spinlock_t lock;
283 
284 	struct rtnl_link_stats64 stats;
285 };
286 
287 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
288 {
289 	return priv->ndev->dev.parent;
290 }
291 
292 static const struct regmap_config mtk_star_regmap_config = {
293 	.reg_bits		= 32,
294 	.val_bits		= 32,
295 	.reg_stride		= 4,
296 	.disable_locking	= true,
297 };
298 
299 static void mtk_star_ring_init(struct mtk_star_ring *ring,
300 			       struct mtk_star_ring_desc *descs)
301 {
302 	memset(ring, 0, sizeof(*ring));
303 	ring->descs = descs;
304 	ring->head = 0;
305 	ring->tail = 0;
306 }
307 
308 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
309 				  struct mtk_star_ring_desc_data *desc_data)
310 {
311 	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
312 	unsigned int status;
313 
314 	status = READ_ONCE(desc->status);
315 	dma_rmb(); /* Make sure we read the status bits before checking it. */
316 
317 	if (!(status & MTK_STAR_DESC_BIT_COWN))
318 		return -1;
319 
320 	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
321 	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
322 	desc_data->dma_addr = ring->dma_addrs[ring->tail];
323 	desc_data->skb = ring->skbs[ring->tail];
324 
325 	ring->dma_addrs[ring->tail] = 0;
326 	ring->skbs[ring->tail] = NULL;
327 
328 	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
329 
330 	WRITE_ONCE(desc->data_ptr, 0);
331 	WRITE_ONCE(desc->status, status);
332 
333 	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
334 
335 	return 0;
336 }
337 
338 static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
339 				    struct mtk_star_ring_desc_data *desc_data,
340 				    unsigned int flags)
341 {
342 	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
343 	unsigned int status;
344 
345 	status = READ_ONCE(desc->status);
346 
347 	ring->skbs[ring->head] = desc_data->skb;
348 	ring->dma_addrs[ring->head] = desc_data->dma_addr;
349 
350 	status |= desc_data->len;
351 	if (flags)
352 		status |= flags;
353 
354 	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
355 	WRITE_ONCE(desc->status, status);
356 	status &= ~MTK_STAR_DESC_BIT_COWN;
357 	/* Flush previous modifications before ownership change. */
358 	dma_wmb();
359 	WRITE_ONCE(desc->status, status);
360 
361 	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
362 }
363 
364 static void
365 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
366 			   struct mtk_star_ring_desc_data *desc_data)
367 {
368 	mtk_star_ring_push_head(ring, desc_data, 0);
369 }
370 
371 static void
372 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
373 			   struct mtk_star_ring_desc_data *desc_data)
374 {
375 	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
376 					  MTK_STAR_DESC_BIT_LS |
377 					  MTK_STAR_DESC_BIT_INT;
378 
379 	mtk_star_ring_push_head(ring, desc_data, flags);
380 }
381 
382 static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
383 {
384 	return abs(ring->head - ring->tail);
385 }
386 
387 static bool mtk_star_ring_full(struct mtk_star_ring *ring)
388 {
389 	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
390 }
391 
392 static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
393 {
394 	return mtk_star_ring_num_used_descs(ring) > 0;
395 }
396 
397 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
398 				      struct sk_buff *skb)
399 {
400 	struct device *dev = mtk_star_get_dev(priv);
401 
402 	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
403 	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
404 			      skb_tailroom(skb), DMA_FROM_DEVICE);
405 }
406 
407 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
408 				  struct mtk_star_ring_desc_data *desc_data)
409 {
410 	struct device *dev = mtk_star_get_dev(priv);
411 
412 	dma_unmap_single(dev, desc_data->dma_addr,
413 			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
414 }
415 
416 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
417 				      struct sk_buff *skb)
418 {
419 	struct device *dev = mtk_star_get_dev(priv);
420 
421 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
422 }
423 
424 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
425 				  struct mtk_star_ring_desc_data *desc_data)
426 {
427 	struct device *dev = mtk_star_get_dev(priv);
428 
429 	return dma_unmap_single(dev, desc_data->dma_addr,
430 				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
431 }
432 
433 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
434 {
435 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
436 			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
437 }
438 
439 /* Unmask the three interrupts we care about, mask all others. */
440 static void mtk_star_intr_enable(struct mtk_star_priv *priv)
441 {
442 	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
443 			   MTK_STAR_BIT_INT_STS_FNRC |
444 			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
445 
446 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
447 }
448 
449 static void mtk_star_intr_disable(struct mtk_star_priv *priv)
450 {
451 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
452 }
453 
454 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
455 {
456 	unsigned int val;
457 
458 	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
459 
460 	return val;
461 }
462 
463 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
464 {
465 	unsigned int val;
466 
467 	val = mtk_star_intr_read(priv);
468 	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
469 
470 	return val;
471 }
472 
473 static void mtk_star_dma_init(struct mtk_star_priv *priv)
474 {
475 	struct mtk_star_ring_desc *desc;
476 	unsigned int val;
477 	int i;
478 
479 	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
480 
481 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
482 		desc = &priv->descs_base[i];
483 
484 		memset(desc, 0, sizeof(*desc));
485 		desc->status = MTK_STAR_DESC_BIT_COWN;
486 		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
487 		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
488 			desc->status |= MTK_STAR_DESC_BIT_EOR;
489 	}
490 
491 	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
492 	mtk_star_ring_init(&priv->rx_ring,
493 			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
494 
495 	/* Set DMA pointers. */
496 	val = (unsigned int)priv->dma_addr;
497 	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
498 	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
499 
500 	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
501 	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
502 	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
503 }
504 
505 static void mtk_star_dma_start(struct mtk_star_priv *priv)
506 {
507 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
508 			MTK_STAR_BIT_TX_DMA_CTRL_START);
509 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
510 			MTK_STAR_BIT_RX_DMA_CTRL_START);
511 }
512 
513 static void mtk_star_dma_stop(struct mtk_star_priv *priv)
514 {
515 	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
516 		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
517 	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
518 		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
519 }
520 
521 static void mtk_star_dma_disable(struct mtk_star_priv *priv)
522 {
523 	int i;
524 
525 	mtk_star_dma_stop(priv);
526 
527 	/* Take back all descriptors. */
528 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
529 		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
530 }
531 
532 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
533 {
534 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
535 			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
536 }
537 
538 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
539 {
540 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
541 			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
542 }
543 
544 static void mtk_star_set_mac_addr(struct net_device *ndev)
545 {
546 	struct mtk_star_priv *priv = netdev_priv(ndev);
547 	const u8 *mac_addr = ndev->dev_addr;
548 	unsigned int high, low;
549 
550 	high = mac_addr[0] << 8 | mac_addr[1] << 0;
551 	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
552 	      mac_addr[4] << 8 | mac_addr[5];
553 
554 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
555 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
556 }
557 
558 static void mtk_star_reset_counters(struct mtk_star_priv *priv)
559 {
560 	static const unsigned int counter_regs[] = {
561 		MTK_STAR_REG_C_RXOKPKT,
562 		MTK_STAR_REG_C_RXOKBYTE,
563 		MTK_STAR_REG_C_RXRUNT,
564 		MTK_STAR_REG_C_RXLONG,
565 		MTK_STAR_REG_C_RXDROP,
566 		MTK_STAR_REG_C_RXCRC,
567 		MTK_STAR_REG_C_RXARLDROP,
568 		MTK_STAR_REG_C_RXVLANDROP,
569 		MTK_STAR_REG_C_RXCSERR,
570 		MTK_STAR_REG_C_RXPAUSE,
571 		MTK_STAR_REG_C_TXOKPKT,
572 		MTK_STAR_REG_C_TXOKBYTE,
573 		MTK_STAR_REG_C_TXPAUSECOL,
574 		MTK_STAR_REG_C_TXRTY,
575 		MTK_STAR_REG_C_TXSKIP,
576 		MTK_STAR_REG_C_TX_ARP,
577 		MTK_STAR_REG_C_RX_RERR,
578 		MTK_STAR_REG_C_RX_UNI,
579 		MTK_STAR_REG_C_RX_MULTI,
580 		MTK_STAR_REG_C_RX_BROAD,
581 		MTK_STAR_REG_C_RX_ALIGNERR,
582 		MTK_STAR_REG_C_TX_UNI,
583 		MTK_STAR_REG_C_TX_MULTI,
584 		MTK_STAR_REG_C_TX_BROAD,
585 		MTK_STAR_REG_C_TX_TIMEOUT,
586 		MTK_STAR_REG_C_TX_LATECOL,
587 		MTK_STAR_REG_C_RX_LENGTHERR,
588 		MTK_STAR_REG_C_RX_TWIST,
589 	};
590 
591 	unsigned int i, val;
592 
593 	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
594 		regmap_read(priv->regs, counter_regs[i], &val);
595 }
596 
597 static void mtk_star_update_stat(struct mtk_star_priv *priv,
598 				 unsigned int reg, u64 *stat)
599 {
600 	unsigned int val;
601 
602 	regmap_read(priv->regs, reg, &val);
603 	*stat += val;
604 }
605 
606 /* Try to get as many stats as possible from the internal registers instead
607  * of tracking them ourselves.
608  */
609 static void mtk_star_update_stats(struct mtk_star_priv *priv)
610 {
611 	struct rtnl_link_stats64 *stats = &priv->stats;
612 
613 	/* OK packets and bytes. */
614 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
615 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
616 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
617 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
618 
619 	/* RX & TX multicast. */
620 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
621 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
622 
623 	/* Collisions. */
624 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
625 			     &stats->collisions);
626 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
627 			     &stats->collisions);
628 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
629 
630 	/* RX Errors. */
631 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
632 			     &stats->rx_length_errors);
633 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
634 			     &stats->rx_over_errors);
635 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
636 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
637 			     &stats->rx_frame_errors);
638 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
639 			     &stats->rx_fifo_errors);
640 	/* Sum of the general RX error counter + all of the above. */
641 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
642 	stats->rx_errors += stats->rx_length_errors;
643 	stats->rx_errors += stats->rx_over_errors;
644 	stats->rx_errors += stats->rx_crc_errors;
645 	stats->rx_errors += stats->rx_frame_errors;
646 	stats->rx_errors += stats->rx_fifo_errors;
647 }
648 
649 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
650 {
651 	uintptr_t tail, offset;
652 	struct sk_buff *skb;
653 
654 	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
655 	if (!skb)
656 		return NULL;
657 
658 	/* Align to 16 bytes. */
659 	tail = (uintptr_t)skb_tail_pointer(skb);
660 	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
661 		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
662 		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
663 	}
664 
665 	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
666 	 * extract the Ethernet header (14 bytes) so we need two more bytes.
667 	 */
668 	skb_reserve(skb, MTK_STAR_IP_ALIGN);
669 
670 	return skb;
671 }
672 
673 static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
674 {
675 	struct mtk_star_priv *priv = netdev_priv(ndev);
676 	struct mtk_star_ring *ring = &priv->rx_ring;
677 	struct device *dev = mtk_star_get_dev(priv);
678 	struct mtk_star_ring_desc *desc;
679 	struct sk_buff *skb;
680 	dma_addr_t dma_addr;
681 	int i;
682 
683 	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
684 		skb = mtk_star_alloc_skb(ndev);
685 		if (!skb)
686 			return -ENOMEM;
687 
688 		dma_addr = mtk_star_dma_map_rx(priv, skb);
689 		if (dma_mapping_error(dev, dma_addr)) {
690 			dev_kfree_skb(skb);
691 			return -ENOMEM;
692 		}
693 
694 		desc = &ring->descs[i];
695 		desc->data_ptr = dma_addr;
696 		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
697 		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
698 		ring->skbs[i] = skb;
699 		ring->dma_addrs[i] = dma_addr;
700 	}
701 
702 	return 0;
703 }
704 
705 static void
706 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
707 			void (*unmap_func)(struct mtk_star_priv *,
708 					   struct mtk_star_ring_desc_data *))
709 {
710 	struct mtk_star_ring_desc_data desc_data;
711 	int i;
712 
713 	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
714 		if (!ring->dma_addrs[i])
715 			continue;
716 
717 		desc_data.dma_addr = ring->dma_addrs[i];
718 		desc_data.skb = ring->skbs[i];
719 
720 		unmap_func(priv, &desc_data);
721 		dev_kfree_skb(desc_data.skb);
722 	}
723 }
724 
725 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
726 {
727 	struct mtk_star_ring *ring = &priv->rx_ring;
728 
729 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
730 }
731 
732 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
733 {
734 	struct mtk_star_ring *ring = &priv->tx_ring;
735 
736 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
737 }
738 
739 /* All processing for TX and RX happens in the napi poll callback.
740  *
741  * FIXME: The interrupt handling should be more fine-grained with each
742  * interrupt enabled/disabled independently when needed. Unfortunatly this
743  * turned out to impact the driver's stability and until we have something
744  * working properly, we're disabling all interrupts during TX & RX processing
745  * or when resetting the counter registers.
746  */
747 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
748 {
749 	struct mtk_star_priv *priv;
750 	struct net_device *ndev;
751 
752 	ndev = data;
753 	priv = netdev_priv(ndev);
754 
755 	if (netif_running(ndev)) {
756 		mtk_star_intr_disable(priv);
757 		napi_schedule(&priv->napi);
758 	}
759 
760 	return IRQ_HANDLED;
761 }
762 
763 /* Wait for the completion of any previous command - CMD_START bit must be
764  * cleared by hardware.
765  */
766 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
767 {
768 	unsigned int val;
769 
770 	return regmap_read_poll_timeout_atomic(priv->regs,
771 				MTK_STAR_REG_HASH_CTRL, val,
772 				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
773 				10, MTK_STAR_WAIT_TIMEOUT);
774 }
775 
776 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
777 {
778 	unsigned int val;
779 	int ret;
780 
781 	/* Wait for BIST_DONE bit. */
782 	ret = regmap_read_poll_timeout_atomic(priv->regs,
783 					MTK_STAR_REG_HASH_CTRL, val,
784 					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
785 					10, MTK_STAR_WAIT_TIMEOUT);
786 	if (ret)
787 		return ret;
788 
789 	/* Check the BIST_OK bit. */
790 	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
791 			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
792 		return -EIO;
793 
794 	return 0;
795 }
796 
797 static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
798 				unsigned int hash_addr)
799 {
800 	unsigned int val;
801 	int ret;
802 
803 	ret = mtk_star_hash_wait_cmd_start(priv);
804 	if (ret)
805 		return ret;
806 
807 	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
808 	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
809 	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
810 	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
811 	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
812 	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
813 
814 	return mtk_star_hash_wait_ok(priv);
815 }
816 
817 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
818 {
819 	int ret;
820 
821 	ret = mtk_star_hash_wait_cmd_start(priv);
822 	if (ret)
823 		return ret;
824 
825 	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
826 			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
827 	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
828 			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
829 
830 	return mtk_star_hash_wait_ok(priv);
831 }
832 
833 static void mtk_star_phy_config(struct mtk_star_priv *priv)
834 {
835 	unsigned int val;
836 
837 	if (priv->speed == SPEED_1000)
838 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
839 	else if (priv->speed == SPEED_100)
840 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
841 	else
842 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
843 	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
844 
845 	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
846 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
847 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
848 	/* Only full-duplex supported for now. */
849 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
850 
851 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
852 
853 	if (priv->pause) {
854 		val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
855 		val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
856 		val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
857 	} else {
858 		val = 0;
859 	}
860 
861 	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
862 			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
863 			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
864 
865 	if (priv->pause) {
866 		val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
867 		val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
868 	} else {
869 		val = 0;
870 	}
871 
872 	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
873 			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
874 }
875 
876 static void mtk_star_adjust_link(struct net_device *ndev)
877 {
878 	struct mtk_star_priv *priv = netdev_priv(ndev);
879 	struct phy_device *phydev = priv->phydev;
880 	bool new_state = false;
881 
882 	if (phydev->link) {
883 		if (!priv->link) {
884 			priv->link = phydev->link;
885 			new_state = true;
886 		}
887 
888 		if (priv->speed != phydev->speed) {
889 			priv->speed = phydev->speed;
890 			new_state = true;
891 		}
892 
893 		if (priv->pause != phydev->pause) {
894 			priv->pause = phydev->pause;
895 			new_state = true;
896 		}
897 	} else {
898 		if (priv->link) {
899 			priv->link = phydev->link;
900 			new_state = true;
901 		}
902 	}
903 
904 	if (new_state) {
905 		if (phydev->link)
906 			mtk_star_phy_config(priv);
907 
908 		phy_print_status(ndev->phydev);
909 	}
910 }
911 
912 static void mtk_star_init_config(struct mtk_star_priv *priv)
913 {
914 	unsigned int val;
915 
916 	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
917 	       MTK_STAR_BIT_EXT_MDC_MODE |
918 	       MTK_STAR_BIT_SWC_MII_MODE);
919 
920 	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
921 	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
922 			   MTK_STAR_MSK_MAC_CLK_CONF,
923 			   priv->compat_data->bit_clk_div);
924 }
925 
926 static int mtk_star_enable(struct net_device *ndev)
927 {
928 	struct mtk_star_priv *priv = netdev_priv(ndev);
929 	unsigned int val;
930 	int ret;
931 
932 	mtk_star_nic_disable_pd(priv);
933 	mtk_star_intr_disable(priv);
934 	mtk_star_dma_stop(priv);
935 
936 	mtk_star_set_mac_addr(ndev);
937 
938 	/* Configure the MAC */
939 	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
940 	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
941 	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
942 	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
943 	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
944 	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
945 
946 	/* Enable Hash Table BIST and reset it */
947 	ret = mtk_star_reset_hash_table(priv);
948 	if (ret)
949 		return ret;
950 
951 	/* Setup the hashing algorithm */
952 	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
953 			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
954 			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
955 
956 	/* Don't strip VLAN tags */
957 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
958 			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
959 
960 	/* Setup DMA */
961 	mtk_star_dma_init(priv);
962 
963 	ret = mtk_star_prepare_rx_skbs(ndev);
964 	if (ret)
965 		goto err_out;
966 
967 	/* Request the interrupt */
968 	ret = request_irq(ndev->irq, mtk_star_handle_irq,
969 			  IRQF_TRIGGER_NONE, ndev->name, ndev);
970 	if (ret)
971 		goto err_free_skbs;
972 
973 	napi_enable(&priv->napi);
974 
975 	mtk_star_intr_ack_all(priv);
976 	mtk_star_intr_enable(priv);
977 
978 	/* Connect to and start PHY */
979 	priv->phydev = of_phy_connect(ndev, priv->phy_node,
980 				      mtk_star_adjust_link, 0, priv->phy_intf);
981 	if (!priv->phydev) {
982 		netdev_err(ndev, "failed to connect to PHY\n");
983 		ret = -ENODEV;
984 		goto err_free_irq;
985 	}
986 
987 	mtk_star_dma_start(priv);
988 	phy_start(priv->phydev);
989 	netif_start_queue(ndev);
990 
991 	return 0;
992 
993 err_free_irq:
994 	free_irq(ndev->irq, ndev);
995 err_free_skbs:
996 	mtk_star_free_rx_skbs(priv);
997 err_out:
998 	return ret;
999 }
1000 
1001 static void mtk_star_disable(struct net_device *ndev)
1002 {
1003 	struct mtk_star_priv *priv = netdev_priv(ndev);
1004 
1005 	netif_stop_queue(ndev);
1006 	napi_disable(&priv->napi);
1007 	mtk_star_intr_disable(priv);
1008 	mtk_star_dma_disable(priv);
1009 	mtk_star_intr_ack_all(priv);
1010 	phy_stop(priv->phydev);
1011 	phy_disconnect(priv->phydev);
1012 	free_irq(ndev->irq, ndev);
1013 	mtk_star_free_rx_skbs(priv);
1014 	mtk_star_free_tx_skbs(priv);
1015 }
1016 
1017 static int mtk_star_netdev_open(struct net_device *ndev)
1018 {
1019 	return mtk_star_enable(ndev);
1020 }
1021 
1022 static int mtk_star_netdev_stop(struct net_device *ndev)
1023 {
1024 	mtk_star_disable(ndev);
1025 
1026 	return 0;
1027 }
1028 
1029 static int mtk_star_netdev_ioctl(struct net_device *ndev,
1030 				 struct ifreq *req, int cmd)
1031 {
1032 	if (!netif_running(ndev))
1033 		return -EINVAL;
1034 
1035 	return phy_mii_ioctl(ndev->phydev, req, cmd);
1036 }
1037 
1038 static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1039 				      struct net_device *ndev)
1040 {
1041 	struct mtk_star_priv *priv = netdev_priv(ndev);
1042 	struct mtk_star_ring *ring = &priv->tx_ring;
1043 	struct device *dev = mtk_star_get_dev(priv);
1044 	struct mtk_star_ring_desc_data desc_data;
1045 
1046 	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1047 	if (dma_mapping_error(dev, desc_data.dma_addr))
1048 		goto err_drop_packet;
1049 
1050 	desc_data.skb = skb;
1051 	desc_data.len = skb->len;
1052 
1053 	spin_lock_bh(&priv->lock);
1054 
1055 	mtk_star_ring_push_head_tx(ring, &desc_data);
1056 
1057 	netdev_sent_queue(ndev, skb->len);
1058 
1059 	if (mtk_star_ring_full(ring))
1060 		netif_stop_queue(ndev);
1061 
1062 	spin_unlock_bh(&priv->lock);
1063 
1064 	mtk_star_dma_resume_tx(priv);
1065 
1066 	return NETDEV_TX_OK;
1067 
1068 err_drop_packet:
1069 	dev_kfree_skb(skb);
1070 	ndev->stats.tx_dropped++;
1071 	return NETDEV_TX_OK;
1072 }
1073 
1074 /* Returns the number of bytes sent or a negative number on the first
1075  * descriptor owned by DMA.
1076  */
1077 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1078 {
1079 	struct mtk_star_ring *ring = &priv->tx_ring;
1080 	struct mtk_star_ring_desc_data desc_data;
1081 	int ret;
1082 
1083 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1084 	if (ret)
1085 		return ret;
1086 
1087 	mtk_star_dma_unmap_tx(priv, &desc_data);
1088 	ret = desc_data.skb->len;
1089 	dev_kfree_skb_irq(desc_data.skb);
1090 
1091 	return ret;
1092 }
1093 
1094 static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1095 {
1096 	struct mtk_star_ring *ring = &priv->tx_ring;
1097 	struct net_device *ndev = priv->ndev;
1098 	int ret, pkts_compl, bytes_compl;
1099 	bool wake = false;
1100 
1101 	spin_lock(&priv->lock);
1102 
1103 	for (pkts_compl = 0, bytes_compl = 0;;
1104 	     pkts_compl++, bytes_compl += ret, wake = true) {
1105 		if (!mtk_star_ring_descs_available(ring))
1106 			break;
1107 
1108 		ret = mtk_star_tx_complete_one(priv);
1109 		if (ret < 0)
1110 			break;
1111 	}
1112 
1113 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1114 
1115 	if (wake && netif_queue_stopped(ndev))
1116 		netif_wake_queue(ndev);
1117 
1118 	spin_unlock(&priv->lock);
1119 }
1120 
1121 static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1122 					struct rtnl_link_stats64 *stats)
1123 {
1124 	struct mtk_star_priv *priv = netdev_priv(ndev);
1125 
1126 	mtk_star_update_stats(priv);
1127 
1128 	memcpy(stats, &priv->stats, sizeof(*stats));
1129 }
1130 
1131 static void mtk_star_set_rx_mode(struct net_device *ndev)
1132 {
1133 	struct mtk_star_priv *priv = netdev_priv(ndev);
1134 	struct netdev_hw_addr *hw_addr;
1135 	unsigned int hash_addr, i;
1136 	int ret;
1137 
1138 	if (ndev->flags & IFF_PROMISC) {
1139 		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1140 				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1141 	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1142 		   ndev->flags & IFF_ALLMULTI) {
1143 		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1144 			ret = mtk_star_set_hashbit(priv, i);
1145 			if (ret)
1146 				goto hash_fail;
1147 		}
1148 	} else {
1149 		/* Clear previous settings. */
1150 		ret = mtk_star_reset_hash_table(priv);
1151 		if (ret)
1152 			goto hash_fail;
1153 
1154 		netdev_for_each_mc_addr(hw_addr, ndev) {
1155 			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1156 			hash_addr += hw_addr->addr[5];
1157 			ret = mtk_star_set_hashbit(priv, hash_addr);
1158 			if (ret)
1159 				goto hash_fail;
1160 		}
1161 	}
1162 
1163 	return;
1164 
1165 hash_fail:
1166 	if (ret == -ETIMEDOUT)
1167 		netdev_err(ndev, "setting hash bit timed out\n");
1168 	else
1169 		/* Should be -EIO */
1170 		netdev_err(ndev, "unable to set hash bit");
1171 }
1172 
1173 static const struct net_device_ops mtk_star_netdev_ops = {
1174 	.ndo_open		= mtk_star_netdev_open,
1175 	.ndo_stop		= mtk_star_netdev_stop,
1176 	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1177 	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1178 	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1179 	.ndo_eth_ioctl		= mtk_star_netdev_ioctl,
1180 	.ndo_set_mac_address	= eth_mac_addr,
1181 	.ndo_validate_addr	= eth_validate_addr,
1182 };
1183 
1184 static void mtk_star_get_drvinfo(struct net_device *dev,
1185 				 struct ethtool_drvinfo *info)
1186 {
1187 	strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1188 }
1189 
1190 /* TODO Add ethtool stats. */
1191 static const struct ethtool_ops mtk_star_ethtool_ops = {
1192 	.get_drvinfo		= mtk_star_get_drvinfo,
1193 	.get_link		= ethtool_op_get_link,
1194 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1195 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1196 };
1197 
1198 static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1199 {
1200 	struct mtk_star_ring *ring = &priv->rx_ring;
1201 	struct device *dev = mtk_star_get_dev(priv);
1202 	struct mtk_star_ring_desc_data desc_data;
1203 	struct net_device *ndev = priv->ndev;
1204 	struct sk_buff *curr_skb, *new_skb;
1205 	dma_addr_t new_dma_addr;
1206 	int ret;
1207 
1208 	spin_lock(&priv->lock);
1209 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1210 	spin_unlock(&priv->lock);
1211 	if (ret)
1212 		return -1;
1213 
1214 	curr_skb = desc_data.skb;
1215 
1216 	if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1217 	    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1218 		/* Error packet -> drop and reuse skb. */
1219 		new_skb = curr_skb;
1220 		goto push_new_skb;
1221 	}
1222 
1223 	/* Prepare new skb before receiving the current one. Reuse the current
1224 	 * skb if we fail at any point.
1225 	 */
1226 	new_skb = mtk_star_alloc_skb(ndev);
1227 	if (!new_skb) {
1228 		ndev->stats.rx_dropped++;
1229 		new_skb = curr_skb;
1230 		goto push_new_skb;
1231 	}
1232 
1233 	new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1234 	if (dma_mapping_error(dev, new_dma_addr)) {
1235 		ndev->stats.rx_dropped++;
1236 		dev_kfree_skb(new_skb);
1237 		new_skb = curr_skb;
1238 		netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1239 		goto push_new_skb;
1240 	}
1241 
1242 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
1243 	mtk_star_dma_unmap_rx(priv, &desc_data);
1244 
1245 	skb_put(desc_data.skb, desc_data.len);
1246 	desc_data.skb->ip_summed = CHECKSUM_NONE;
1247 	desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1248 	desc_data.skb->dev = ndev;
1249 	netif_receive_skb(desc_data.skb);
1250 
1251 	/* update dma_addr for new skb */
1252 	desc_data.dma_addr = new_dma_addr;
1253 
1254 push_new_skb:
1255 	desc_data.len = skb_tailroom(new_skb);
1256 	desc_data.skb = new_skb;
1257 
1258 	spin_lock(&priv->lock);
1259 	mtk_star_ring_push_head_rx(ring, &desc_data);
1260 	spin_unlock(&priv->lock);
1261 
1262 	return 0;
1263 }
1264 
1265 static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1266 {
1267 	int received, ret;
1268 
1269 	for (received = 0, ret = 0; received < budget && ret == 0; received++)
1270 		ret = mtk_star_receive_packet(priv);
1271 
1272 	mtk_star_dma_resume_rx(priv);
1273 
1274 	return received;
1275 }
1276 
1277 static int mtk_star_poll(struct napi_struct *napi, int budget)
1278 {
1279 	struct mtk_star_priv *priv;
1280 	unsigned int status;
1281 	int received = 0;
1282 
1283 	priv = container_of(napi, struct mtk_star_priv, napi);
1284 
1285 	status = mtk_star_intr_read(priv);
1286 	mtk_star_intr_ack_all(priv);
1287 
1288 	if (status & MTK_STAR_BIT_INT_STS_TNTC)
1289 		/* Clean-up all TX descriptors. */
1290 		mtk_star_tx_complete_all(priv);
1291 
1292 	if (status & MTK_STAR_BIT_INT_STS_FNRC)
1293 		/* Receive up to $budget packets. */
1294 		received = mtk_star_process_rx(priv, budget);
1295 
1296 	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1297 		mtk_star_update_stats(priv);
1298 		mtk_star_reset_counters(priv);
1299 	}
1300 
1301 	if (received < budget)
1302 		napi_complete_done(napi, received);
1303 
1304 	mtk_star_intr_enable(priv);
1305 
1306 	return received;
1307 }
1308 
1309 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1310 {
1311 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1312 		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1313 }
1314 
1315 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1316 {
1317 	unsigned int val;
1318 
1319 	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1320 					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1321 					10, MTK_STAR_WAIT_TIMEOUT);
1322 }
1323 
1324 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1325 {
1326 	struct mtk_star_priv *priv = mii->priv;
1327 	unsigned int val, data;
1328 	int ret;
1329 
1330 	if (regnum & MII_ADDR_C45)
1331 		return -EOPNOTSUPP;
1332 
1333 	mtk_star_mdio_rwok_clear(priv);
1334 
1335 	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1336 	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1337 	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1338 
1339 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1340 
1341 	ret = mtk_star_mdio_rwok_wait(priv);
1342 	if (ret)
1343 		return ret;
1344 
1345 	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1346 
1347 	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1348 	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1349 
1350 	return data;
1351 }
1352 
1353 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1354 			       int regnum, u16 data)
1355 {
1356 	struct mtk_star_priv *priv = mii->priv;
1357 	unsigned int val;
1358 
1359 	if (regnum & MII_ADDR_C45)
1360 		return -EOPNOTSUPP;
1361 
1362 	mtk_star_mdio_rwok_clear(priv);
1363 
1364 	val = data;
1365 	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1366 	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1367 	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1368 	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1369 	val |= regnum;
1370 	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1371 
1372 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1373 
1374 	return mtk_star_mdio_rwok_wait(priv);
1375 }
1376 
1377 static int mtk_star_mdio_init(struct net_device *ndev)
1378 {
1379 	struct mtk_star_priv *priv = netdev_priv(ndev);
1380 	struct device *dev = mtk_star_get_dev(priv);
1381 	struct device_node *of_node, *mdio_node;
1382 	int ret;
1383 
1384 	of_node = dev->of_node;
1385 
1386 	mdio_node = of_get_child_by_name(of_node, "mdio");
1387 	if (!mdio_node)
1388 		return -ENODEV;
1389 
1390 	if (!of_device_is_available(mdio_node)) {
1391 		ret = -ENODEV;
1392 		goto out_put_node;
1393 	}
1394 
1395 	priv->mii = devm_mdiobus_alloc(dev);
1396 	if (!priv->mii) {
1397 		ret = -ENOMEM;
1398 		goto out_put_node;
1399 	}
1400 
1401 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1402 	priv->mii->name = "mtk-mac-mdio";
1403 	priv->mii->parent = dev;
1404 	priv->mii->read = mtk_star_mdio_read;
1405 	priv->mii->write = mtk_star_mdio_write;
1406 	priv->mii->priv = priv;
1407 
1408 	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1409 
1410 out_put_node:
1411 	of_node_put(mdio_node);
1412 	return ret;
1413 }
1414 
1415 static __maybe_unused int mtk_star_suspend(struct device *dev)
1416 {
1417 	struct mtk_star_priv *priv;
1418 	struct net_device *ndev;
1419 
1420 	ndev = dev_get_drvdata(dev);
1421 	priv = netdev_priv(ndev);
1422 
1423 	if (netif_running(ndev))
1424 		mtk_star_disable(ndev);
1425 
1426 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1427 
1428 	return 0;
1429 }
1430 
1431 static __maybe_unused int mtk_star_resume(struct device *dev)
1432 {
1433 	struct mtk_star_priv *priv;
1434 	struct net_device *ndev;
1435 	int ret;
1436 
1437 	ndev = dev_get_drvdata(dev);
1438 	priv = netdev_priv(ndev);
1439 
1440 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1441 	if (ret)
1442 		return ret;
1443 
1444 	if (netif_running(ndev)) {
1445 		ret = mtk_star_enable(ndev);
1446 		if (ret)
1447 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1448 	}
1449 
1450 	return ret;
1451 }
1452 
1453 static void mtk_star_clk_disable_unprepare(void *data)
1454 {
1455 	struct mtk_star_priv *priv = data;
1456 
1457 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1458 }
1459 
1460 static int mtk_star_set_timing(struct mtk_star_priv *priv)
1461 {
1462 	struct device *dev = mtk_star_get_dev(priv);
1463 	unsigned int delay_val = 0;
1464 
1465 	switch (priv->phy_intf) {
1466 	case PHY_INTERFACE_MODE_MII:
1467 	case PHY_INTERFACE_MODE_RMII:
1468 		delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
1469 		delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
1470 		break;
1471 	default:
1472 		dev_err(dev, "This interface not supported\n");
1473 		return -EINVAL;
1474 	}
1475 
1476 	return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
1477 }
1478 
1479 static int mtk_star_probe(struct platform_device *pdev)
1480 {
1481 	struct device_node *of_node;
1482 	struct mtk_star_priv *priv;
1483 	struct net_device *ndev;
1484 	struct device *dev;
1485 	void __iomem *base;
1486 	int ret, i;
1487 
1488 	dev = &pdev->dev;
1489 	of_node = dev->of_node;
1490 
1491 	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1492 	if (!ndev)
1493 		return -ENOMEM;
1494 
1495 	priv = netdev_priv(ndev);
1496 	priv->ndev = ndev;
1497 	priv->compat_data = of_device_get_match_data(&pdev->dev);
1498 	SET_NETDEV_DEV(ndev, dev);
1499 	platform_set_drvdata(pdev, ndev);
1500 
1501 	ndev->min_mtu = ETH_ZLEN;
1502 	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1503 
1504 	spin_lock_init(&priv->lock);
1505 
1506 	base = devm_platform_ioremap_resource(pdev, 0);
1507 	if (IS_ERR(base))
1508 		return PTR_ERR(base);
1509 
1510 	/* We won't be checking the return values of regmap read & write
1511 	 * functions. They can only fail for mmio if there's a clock attached
1512 	 * to regmap which is not the case here.
1513 	 */
1514 	priv->regs = devm_regmap_init_mmio(dev, base,
1515 					   &mtk_star_regmap_config);
1516 	if (IS_ERR(priv->regs))
1517 		return PTR_ERR(priv->regs);
1518 
1519 	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1520 							"mediatek,pericfg");
1521 	if (IS_ERR(priv->pericfg)) {
1522 		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1523 		return PTR_ERR(priv->pericfg);
1524 	}
1525 
1526 	ndev->irq = platform_get_irq(pdev, 0);
1527 	if (ndev->irq < 0)
1528 		return ndev->irq;
1529 
1530 	for (i = 0; i < MTK_STAR_NCLKS; i++)
1531 		priv->clks[i].id = mtk_star_clk_names[i];
1532 	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1533 	if (ret)
1534 		return ret;
1535 
1536 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1537 	if (ret)
1538 		return ret;
1539 
1540 	ret = devm_add_action_or_reset(dev,
1541 				       mtk_star_clk_disable_unprepare, priv);
1542 	if (ret)
1543 		return ret;
1544 
1545 	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1546 	if (ret) {
1547 		return ret;
1548 	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
1549 		   priv->phy_intf != PHY_INTERFACE_MODE_MII) {
1550 		dev_err(dev, "unsupported phy mode: %s\n",
1551 			phy_modes(priv->phy_intf));
1552 		return -EINVAL;
1553 	}
1554 
1555 	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1556 	if (!priv->phy_node) {
1557 		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1558 		return -ENODEV;
1559 	}
1560 
1561 	priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
1562 	priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
1563 	priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
1564 
1565 	if (priv->compat_data->set_interface_mode) {
1566 		ret = priv->compat_data->set_interface_mode(ndev);
1567 		if (ret) {
1568 			dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
1569 			return -EINVAL;
1570 		}
1571 	}
1572 
1573 	ret = mtk_star_set_timing(priv);
1574 	if (ret) {
1575 		dev_err(dev, "Failed to set timing, err = %d\n", ret);
1576 		return -EINVAL;
1577 	}
1578 
1579 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1580 	if (ret) {
1581 		dev_err(dev, "unsupported DMA mask\n");
1582 		return ret;
1583 	}
1584 
1585 	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1586 					      &priv->dma_addr,
1587 					      GFP_KERNEL | GFP_DMA);
1588 	if (!priv->ring_base)
1589 		return -ENOMEM;
1590 
1591 	mtk_star_nic_disable_pd(priv);
1592 	mtk_star_init_config(priv);
1593 
1594 	ret = mtk_star_mdio_init(ndev);
1595 	if (ret)
1596 		return ret;
1597 
1598 	ret = platform_get_ethdev_address(dev, ndev);
1599 	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1600 		eth_hw_addr_random(ndev);
1601 
1602 	ndev->netdev_ops = &mtk_star_netdev_ops;
1603 	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1604 
1605 	netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
1606 
1607 	return devm_register_netdev(dev, ndev);
1608 }
1609 
1610 #ifdef CONFIG_OF
1611 static int mt8516_set_interface_mode(struct net_device *ndev)
1612 {
1613 	struct mtk_star_priv *priv = netdev_priv(ndev);
1614 	struct device *dev = mtk_star_get_dev(priv);
1615 	unsigned int intf_val, ret, rmii_rxc;
1616 
1617 	switch (priv->phy_intf) {
1618 	case PHY_INTERFACE_MODE_MII:
1619 		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1620 		rmii_rxc = 0;
1621 		break;
1622 	case PHY_INTERFACE_MODE_RMII:
1623 		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1624 		rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
1625 		break;
1626 	default:
1627 		dev_err(dev, "This interface not supported\n");
1628 		return -EINVAL;
1629 	}
1630 
1631 	ret = regmap_update_bits(priv->pericfg,
1632 				 MTK_PERICFG_REG_NIC_CFG1_CON,
1633 				 MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
1634 				 rmii_rxc);
1635 	if (ret)
1636 		return ret;
1637 
1638 	return regmap_update_bits(priv->pericfg,
1639 				  MTK_PERICFG_REG_NIC_CFG0_CON,
1640 				  MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
1641 				  intf_val);
1642 }
1643 
1644 static int mt8365_set_interface_mode(struct net_device *ndev)
1645 {
1646 	struct mtk_star_priv *priv = netdev_priv(ndev);
1647 	struct device *dev = mtk_star_get_dev(priv);
1648 	unsigned int intf_val;
1649 
1650 	switch (priv->phy_intf) {
1651 	case PHY_INTERFACE_MODE_MII:
1652 		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
1653 		break;
1654 	case PHY_INTERFACE_MODE_RMII:
1655 		intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
1656 		intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
1657 		break;
1658 	default:
1659 		dev_err(dev, "This interface not supported\n");
1660 		return -EINVAL;
1661 	}
1662 
1663 	return regmap_update_bits(priv->pericfg,
1664 				  MTK_PERICFG_REG_NIC_CFG_CON_V2,
1665 				  MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
1666 				  MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
1667 				  intf_val);
1668 }
1669 
1670 static const struct mtk_star_compat mtk_star_mt8516_compat = {
1671 	.set_interface_mode = mt8516_set_interface_mode,
1672 	.bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
1673 };
1674 
1675 static const struct mtk_star_compat mtk_star_mt8365_compat = {
1676 	.set_interface_mode = mt8365_set_interface_mode,
1677 	.bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
1678 };
1679 
1680 static const struct of_device_id mtk_star_of_match[] = {
1681 	{ .compatible = "mediatek,mt8516-eth",
1682 	  .data = &mtk_star_mt8516_compat },
1683 	{ .compatible = "mediatek,mt8518-eth",
1684 	  .data = &mtk_star_mt8516_compat },
1685 	{ .compatible = "mediatek,mt8175-eth",
1686 	  .data = &mtk_star_mt8516_compat },
1687 	{ .compatible = "mediatek,mt8365-eth",
1688 	  .data = &mtk_star_mt8365_compat },
1689 	{ }
1690 };
1691 MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1692 #endif
1693 
1694 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1695 			 mtk_star_suspend, mtk_star_resume);
1696 
1697 static struct platform_driver mtk_star_driver = {
1698 	.driver = {
1699 		.name = MTK_STAR_DRVNAME,
1700 		.pm = &mtk_star_pm_ops,
1701 		.of_match_table = of_match_ptr(mtk_star_of_match),
1702 	},
1703 	.probe = mtk_star_probe,
1704 };
1705 module_platform_driver(mtk_star_driver);
1706 
1707 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1708 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1709 MODULE_LICENSE("GPL");
1710