1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2020 MediaTek Corporation
4  * Copyright (c) 2020 BayLibre SAS
5  *
6  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/clk.h>
11 #include <linux/compiler.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/mii.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm.h>
24 #include <linux/regmap.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 
28 #define MTK_STAR_DRVNAME			"mtk_star_emac"
29 
30 #define MTK_STAR_WAIT_TIMEOUT			300
31 #define MTK_STAR_MAX_FRAME_SIZE			1514
32 #define MTK_STAR_SKB_ALIGNMENT			16
33 #define MTK_STAR_HASHTABLE_MC_LIMIT		256
34 #define MTK_STAR_HASHTABLE_SIZE_MAX		512
35 
36 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
37  * work for this controller.
38  */
39 #define MTK_STAR_IP_ALIGN			2
40 
41 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
42 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
43 
44 /* PHY Control Register 0 */
45 #define MTK_STAR_REG_PHY_CTRL0			0x0000
46 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
47 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
48 #define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
49 #define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
50 #define MTK_STAR_OFF_PHY_CTRL0_PREG		8
51 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
52 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
53 
54 /* PHY Control Register 1 */
55 #define MTK_STAR_REG_PHY_CTRL1			0x0004
56 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
57 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
58 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
59 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
60 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
62 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
63 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
65 
66 /* MAC Configuration Register */
67 #define MTK_STAR_REG_MAC_CFG			0x0008
68 #define MTK_STAR_OFF_MAC_CFG_IPG		10
69 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
70 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
71 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
72 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
73 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
74 #define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
75 
76 /* Flow-Control Configuration Register */
77 #define MTK_STAR_REG_FC_CFG			0x000c
78 #define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
79 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
80 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
81 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
82 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
83 
84 /* ARL Configuration Register */
85 #define MTK_STAR_REG_ARL_CFG			0x0010
86 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
87 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
88 
89 /* MAC High and Low Bytes Registers */
90 #define MTK_STAR_REG_MY_MAC_H			0x0014
91 #define MTK_STAR_REG_MY_MAC_L			0x0018
92 
93 /* Hash Table Control Register */
94 #define MTK_STAR_REG_HASH_CTRL			0x001c
95 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
96 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
97 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
98 #define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
99 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
100 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
101 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
102 
103 /* TX DMA Control Register */
104 #define MTK_STAR_REG_TX_DMA_CTRL		0x0034
105 #define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
106 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
107 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
108 
109 /* RX DMA Control Register */
110 #define MTK_STAR_REG_RX_DMA_CTRL		0x0038
111 #define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
112 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
113 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
114 
115 /* DMA Address Registers */
116 #define MTK_STAR_REG_TX_DPTR			0x003c
117 #define MTK_STAR_REG_RX_DPTR			0x0040
118 #define MTK_STAR_REG_TX_BASE_ADDR		0x0044
119 #define MTK_STAR_REG_RX_BASE_ADDR		0x0048
120 
121 /* Interrupt Status Register */
122 #define MTK_STAR_REG_INT_STS			0x0050
123 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
124 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
125 #define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
126 #define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
127 
128 /* Interrupt Mask Register */
129 #define MTK_STAR_REG_INT_MASK			0x0054
130 #define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
131 
132 /* Misc. Config Register */
133 #define MTK_STAR_REG_TEST1			0x005c
134 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
135 
136 /* Extended Configuration Register */
137 #define MTK_STAR_REG_EXT_CFG			0x0060
138 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
139 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
140 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
141 
142 /* EthSys Configuration Register */
143 #define MTK_STAR_REG_SYS_CONF			0x0094
144 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
145 #define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
146 #define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
147 
148 /* MAC Clock Configuration Register */
149 #define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
150 #define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
151 #define MTK_STAR_BIT_CLK_DIV_10			0x0a
152 
153 /* Counter registers. */
154 #define MTK_STAR_REG_C_RXOKPKT			0x0100
155 #define MTK_STAR_REG_C_RXOKBYTE			0x0104
156 #define MTK_STAR_REG_C_RXRUNT			0x0108
157 #define MTK_STAR_REG_C_RXLONG			0x010c
158 #define MTK_STAR_REG_C_RXDROP			0x0110
159 #define MTK_STAR_REG_C_RXCRC			0x0114
160 #define MTK_STAR_REG_C_RXARLDROP		0x0118
161 #define MTK_STAR_REG_C_RXVLANDROP		0x011c
162 #define MTK_STAR_REG_C_RXCSERR			0x0120
163 #define MTK_STAR_REG_C_RXPAUSE			0x0124
164 #define MTK_STAR_REG_C_TXOKPKT			0x0128
165 #define MTK_STAR_REG_C_TXOKBYTE			0x012c
166 #define MTK_STAR_REG_C_TXPAUSECOL		0x0130
167 #define MTK_STAR_REG_C_TXRTY			0x0134
168 #define MTK_STAR_REG_C_TXSKIP			0x0138
169 #define MTK_STAR_REG_C_TX_ARP			0x013c
170 #define MTK_STAR_REG_C_RX_RERR			0x01d8
171 #define MTK_STAR_REG_C_RX_UNI			0x01dc
172 #define MTK_STAR_REG_C_RX_MULTI			0x01e0
173 #define MTK_STAR_REG_C_RX_BROAD			0x01e4
174 #define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
175 #define MTK_STAR_REG_C_TX_UNI			0x01ec
176 #define MTK_STAR_REG_C_TX_MULTI			0x01f0
177 #define MTK_STAR_REG_C_TX_BROAD			0x01f4
178 #define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
179 #define MTK_STAR_REG_C_TX_LATECOL		0x01fc
180 #define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
181 #define MTK_STAR_REG_C_RX_TWIST			0x0218
182 
183 /* Ethernet CFG Control */
184 #define MTK_PERICFG_REG_NIC_CFG_CON		0x03c4
185 #define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII	GENMASK(3, 0)
186 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	BIT(0)
187 
188 /* Represents the actual structure of descriptors used by the MAC. We can
189  * reuse the same structure for both TX and RX - the layout is the same, only
190  * the flags differ slightly.
191  */
192 struct mtk_star_ring_desc {
193 	/* Contains both the status flags as well as packet length. */
194 	u32 status;
195 	u32 data_ptr;
196 	u32 vtag;
197 	u32 reserved;
198 };
199 
200 #define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
201 #define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
202 #define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
203 #define MTK_STAR_DESC_BIT_INT			BIT(27)
204 #define MTK_STAR_DESC_BIT_LS			BIT(28)
205 #define MTK_STAR_DESC_BIT_FS			BIT(29)
206 #define MTK_STAR_DESC_BIT_EOR			BIT(30)
207 #define MTK_STAR_DESC_BIT_COWN			BIT(31)
208 
209 /* Helper structure for storing data read from/written to descriptors in order
210  * to limit reads from/writes to DMA memory.
211  */
212 struct mtk_star_ring_desc_data {
213 	unsigned int len;
214 	unsigned int flags;
215 	dma_addr_t dma_addr;
216 	struct sk_buff *skb;
217 };
218 
219 #define MTK_STAR_RING_NUM_DESCS			128
220 #define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
221 #define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
222 #define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
223 #define MTK_STAR_DMA_SIZE \
224 		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
225 
226 struct mtk_star_ring {
227 	struct mtk_star_ring_desc *descs;
228 	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
229 	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
230 	unsigned int head;
231 	unsigned int tail;
232 };
233 
234 struct mtk_star_priv {
235 	struct net_device *ndev;
236 
237 	struct regmap *regs;
238 	struct regmap *pericfg;
239 
240 	struct clk_bulk_data clks[MTK_STAR_NCLKS];
241 
242 	void *ring_base;
243 	struct mtk_star_ring_desc *descs_base;
244 	dma_addr_t dma_addr;
245 	struct mtk_star_ring tx_ring;
246 	struct mtk_star_ring rx_ring;
247 
248 	struct mii_bus *mii;
249 	struct napi_struct napi;
250 
251 	struct device_node *phy_node;
252 	phy_interface_t phy_intf;
253 	struct phy_device *phydev;
254 	unsigned int link;
255 	int speed;
256 	int duplex;
257 	int pause;
258 
259 	/* Protects against concurrent descriptor access. */
260 	spinlock_t lock;
261 
262 	struct rtnl_link_stats64 stats;
263 };
264 
265 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
266 {
267 	return priv->ndev->dev.parent;
268 }
269 
270 static const struct regmap_config mtk_star_regmap_config = {
271 	.reg_bits		= 32,
272 	.val_bits		= 32,
273 	.reg_stride		= 4,
274 	.disable_locking	= true,
275 };
276 
277 static void mtk_star_ring_init(struct mtk_star_ring *ring,
278 			       struct mtk_star_ring_desc *descs)
279 {
280 	memset(ring, 0, sizeof(*ring));
281 	ring->descs = descs;
282 	ring->head = 0;
283 	ring->tail = 0;
284 }
285 
286 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
287 				  struct mtk_star_ring_desc_data *desc_data)
288 {
289 	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
290 	unsigned int status;
291 
292 	status = READ_ONCE(desc->status);
293 	dma_rmb(); /* Make sure we read the status bits before checking it. */
294 
295 	if (!(status & MTK_STAR_DESC_BIT_COWN))
296 		return -1;
297 
298 	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
299 	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
300 	desc_data->dma_addr = ring->dma_addrs[ring->tail];
301 	desc_data->skb = ring->skbs[ring->tail];
302 
303 	ring->dma_addrs[ring->tail] = 0;
304 	ring->skbs[ring->tail] = NULL;
305 
306 	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
307 
308 	WRITE_ONCE(desc->data_ptr, 0);
309 	WRITE_ONCE(desc->status, status);
310 
311 	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
312 
313 	return 0;
314 }
315 
316 static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
317 				    struct mtk_star_ring_desc_data *desc_data,
318 				    unsigned int flags)
319 {
320 	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
321 	unsigned int status;
322 
323 	status = READ_ONCE(desc->status);
324 
325 	ring->skbs[ring->head] = desc_data->skb;
326 	ring->dma_addrs[ring->head] = desc_data->dma_addr;
327 
328 	status |= desc_data->len;
329 	if (flags)
330 		status |= flags;
331 
332 	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
333 	WRITE_ONCE(desc->status, status);
334 	status &= ~MTK_STAR_DESC_BIT_COWN;
335 	/* Flush previous modifications before ownership change. */
336 	dma_wmb();
337 	WRITE_ONCE(desc->status, status);
338 
339 	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
340 }
341 
342 static void
343 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
344 			   struct mtk_star_ring_desc_data *desc_data)
345 {
346 	mtk_star_ring_push_head(ring, desc_data, 0);
347 }
348 
349 static void
350 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
351 			   struct mtk_star_ring_desc_data *desc_data)
352 {
353 	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
354 					  MTK_STAR_DESC_BIT_LS |
355 					  MTK_STAR_DESC_BIT_INT;
356 
357 	mtk_star_ring_push_head(ring, desc_data, flags);
358 }
359 
360 static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
361 {
362 	return abs(ring->head - ring->tail);
363 }
364 
365 static bool mtk_star_ring_full(struct mtk_star_ring *ring)
366 {
367 	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
368 }
369 
370 static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
371 {
372 	return mtk_star_ring_num_used_descs(ring) > 0;
373 }
374 
375 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
376 				      struct sk_buff *skb)
377 {
378 	struct device *dev = mtk_star_get_dev(priv);
379 
380 	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
381 	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
382 			      skb_tailroom(skb), DMA_FROM_DEVICE);
383 }
384 
385 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
386 				  struct mtk_star_ring_desc_data *desc_data)
387 {
388 	struct device *dev = mtk_star_get_dev(priv);
389 
390 	dma_unmap_single(dev, desc_data->dma_addr,
391 			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
392 }
393 
394 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
395 				      struct sk_buff *skb)
396 {
397 	struct device *dev = mtk_star_get_dev(priv);
398 
399 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
400 }
401 
402 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
403 				  struct mtk_star_ring_desc_data *desc_data)
404 {
405 	struct device *dev = mtk_star_get_dev(priv);
406 
407 	return dma_unmap_single(dev, desc_data->dma_addr,
408 				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
409 }
410 
411 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
412 {
413 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
414 			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
415 }
416 
417 /* Unmask the three interrupts we care about, mask all others. */
418 static void mtk_star_intr_enable(struct mtk_star_priv *priv)
419 {
420 	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
421 			   MTK_STAR_BIT_INT_STS_FNRC |
422 			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
423 
424 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
425 }
426 
427 static void mtk_star_intr_disable(struct mtk_star_priv *priv)
428 {
429 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
430 }
431 
432 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
433 {
434 	unsigned int val;
435 
436 	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
437 
438 	return val;
439 }
440 
441 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
442 {
443 	unsigned int val;
444 
445 	val = mtk_star_intr_read(priv);
446 	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
447 
448 	return val;
449 }
450 
451 static void mtk_star_dma_init(struct mtk_star_priv *priv)
452 {
453 	struct mtk_star_ring_desc *desc;
454 	unsigned int val;
455 	int i;
456 
457 	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
458 
459 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
460 		desc = &priv->descs_base[i];
461 
462 		memset(desc, 0, sizeof(*desc));
463 		desc->status = MTK_STAR_DESC_BIT_COWN;
464 		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
465 		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
466 			desc->status |= MTK_STAR_DESC_BIT_EOR;
467 	}
468 
469 	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
470 	mtk_star_ring_init(&priv->rx_ring,
471 			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
472 
473 	/* Set DMA pointers. */
474 	val = (unsigned int)priv->dma_addr;
475 	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
476 	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
477 
478 	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
479 	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
480 	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
481 }
482 
483 static void mtk_star_dma_start(struct mtk_star_priv *priv)
484 {
485 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
486 			MTK_STAR_BIT_TX_DMA_CTRL_START);
487 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
488 			MTK_STAR_BIT_RX_DMA_CTRL_START);
489 }
490 
491 static void mtk_star_dma_stop(struct mtk_star_priv *priv)
492 {
493 	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
494 		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
495 	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
496 		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
497 }
498 
499 static void mtk_star_dma_disable(struct mtk_star_priv *priv)
500 {
501 	int i;
502 
503 	mtk_star_dma_stop(priv);
504 
505 	/* Take back all descriptors. */
506 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
507 		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
508 }
509 
510 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
511 {
512 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
513 			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
514 }
515 
516 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
517 {
518 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
519 			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
520 }
521 
522 static void mtk_star_set_mac_addr(struct net_device *ndev)
523 {
524 	struct mtk_star_priv *priv = netdev_priv(ndev);
525 	const u8 *mac_addr = ndev->dev_addr;
526 	unsigned int high, low;
527 
528 	high = mac_addr[0] << 8 | mac_addr[1] << 0;
529 	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
530 	      mac_addr[4] << 8 | mac_addr[5];
531 
532 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
533 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
534 }
535 
536 static void mtk_star_reset_counters(struct mtk_star_priv *priv)
537 {
538 	static const unsigned int counter_regs[] = {
539 		MTK_STAR_REG_C_RXOKPKT,
540 		MTK_STAR_REG_C_RXOKBYTE,
541 		MTK_STAR_REG_C_RXRUNT,
542 		MTK_STAR_REG_C_RXLONG,
543 		MTK_STAR_REG_C_RXDROP,
544 		MTK_STAR_REG_C_RXCRC,
545 		MTK_STAR_REG_C_RXARLDROP,
546 		MTK_STAR_REG_C_RXVLANDROP,
547 		MTK_STAR_REG_C_RXCSERR,
548 		MTK_STAR_REG_C_RXPAUSE,
549 		MTK_STAR_REG_C_TXOKPKT,
550 		MTK_STAR_REG_C_TXOKBYTE,
551 		MTK_STAR_REG_C_TXPAUSECOL,
552 		MTK_STAR_REG_C_TXRTY,
553 		MTK_STAR_REG_C_TXSKIP,
554 		MTK_STAR_REG_C_TX_ARP,
555 		MTK_STAR_REG_C_RX_RERR,
556 		MTK_STAR_REG_C_RX_UNI,
557 		MTK_STAR_REG_C_RX_MULTI,
558 		MTK_STAR_REG_C_RX_BROAD,
559 		MTK_STAR_REG_C_RX_ALIGNERR,
560 		MTK_STAR_REG_C_TX_UNI,
561 		MTK_STAR_REG_C_TX_MULTI,
562 		MTK_STAR_REG_C_TX_BROAD,
563 		MTK_STAR_REG_C_TX_TIMEOUT,
564 		MTK_STAR_REG_C_TX_LATECOL,
565 		MTK_STAR_REG_C_RX_LENGTHERR,
566 		MTK_STAR_REG_C_RX_TWIST,
567 	};
568 
569 	unsigned int i, val;
570 
571 	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
572 		regmap_read(priv->regs, counter_regs[i], &val);
573 }
574 
575 static void mtk_star_update_stat(struct mtk_star_priv *priv,
576 				 unsigned int reg, u64 *stat)
577 {
578 	unsigned int val;
579 
580 	regmap_read(priv->regs, reg, &val);
581 	*stat += val;
582 }
583 
584 /* Try to get as many stats as possible from the internal registers instead
585  * of tracking them ourselves.
586  */
587 static void mtk_star_update_stats(struct mtk_star_priv *priv)
588 {
589 	struct rtnl_link_stats64 *stats = &priv->stats;
590 
591 	/* OK packets and bytes. */
592 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
593 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
594 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
595 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
596 
597 	/* RX & TX multicast. */
598 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
599 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
600 
601 	/* Collisions. */
602 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
603 			     &stats->collisions);
604 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
605 			     &stats->collisions);
606 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
607 
608 	/* RX Errors. */
609 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
610 			     &stats->rx_length_errors);
611 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
612 			     &stats->rx_over_errors);
613 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
614 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
615 			     &stats->rx_frame_errors);
616 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
617 			     &stats->rx_fifo_errors);
618 	/* Sum of the general RX error counter + all of the above. */
619 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
620 	stats->rx_errors += stats->rx_length_errors;
621 	stats->rx_errors += stats->rx_over_errors;
622 	stats->rx_errors += stats->rx_crc_errors;
623 	stats->rx_errors += stats->rx_frame_errors;
624 	stats->rx_errors += stats->rx_fifo_errors;
625 }
626 
627 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
628 {
629 	uintptr_t tail, offset;
630 	struct sk_buff *skb;
631 
632 	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
633 	if (!skb)
634 		return NULL;
635 
636 	/* Align to 16 bytes. */
637 	tail = (uintptr_t)skb_tail_pointer(skb);
638 	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
639 		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
640 		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
641 	}
642 
643 	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
644 	 * extract the Ethernet header (14 bytes) so we need two more bytes.
645 	 */
646 	skb_reserve(skb, MTK_STAR_IP_ALIGN);
647 
648 	return skb;
649 }
650 
651 static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
652 {
653 	struct mtk_star_priv *priv = netdev_priv(ndev);
654 	struct mtk_star_ring *ring = &priv->rx_ring;
655 	struct device *dev = mtk_star_get_dev(priv);
656 	struct mtk_star_ring_desc *desc;
657 	struct sk_buff *skb;
658 	dma_addr_t dma_addr;
659 	int i;
660 
661 	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
662 		skb = mtk_star_alloc_skb(ndev);
663 		if (!skb)
664 			return -ENOMEM;
665 
666 		dma_addr = mtk_star_dma_map_rx(priv, skb);
667 		if (dma_mapping_error(dev, dma_addr)) {
668 			dev_kfree_skb(skb);
669 			return -ENOMEM;
670 		}
671 
672 		desc = &ring->descs[i];
673 		desc->data_ptr = dma_addr;
674 		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
675 		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
676 		ring->skbs[i] = skb;
677 		ring->dma_addrs[i] = dma_addr;
678 	}
679 
680 	return 0;
681 }
682 
683 static void
684 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
685 			void (*unmap_func)(struct mtk_star_priv *,
686 					   struct mtk_star_ring_desc_data *))
687 {
688 	struct mtk_star_ring_desc_data desc_data;
689 	int i;
690 
691 	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
692 		if (!ring->dma_addrs[i])
693 			continue;
694 
695 		desc_data.dma_addr = ring->dma_addrs[i];
696 		desc_data.skb = ring->skbs[i];
697 
698 		unmap_func(priv, &desc_data);
699 		dev_kfree_skb(desc_data.skb);
700 	}
701 }
702 
703 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
704 {
705 	struct mtk_star_ring *ring = &priv->rx_ring;
706 
707 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
708 }
709 
710 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
711 {
712 	struct mtk_star_ring *ring = &priv->tx_ring;
713 
714 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
715 }
716 
717 /* All processing for TX and RX happens in the napi poll callback.
718  *
719  * FIXME: The interrupt handling should be more fine-grained with each
720  * interrupt enabled/disabled independently when needed. Unfortunatly this
721  * turned out to impact the driver's stability and until we have something
722  * working properly, we're disabling all interrupts during TX & RX processing
723  * or when resetting the counter registers.
724  */
725 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
726 {
727 	struct mtk_star_priv *priv;
728 	struct net_device *ndev;
729 
730 	ndev = data;
731 	priv = netdev_priv(ndev);
732 
733 	if (netif_running(ndev)) {
734 		mtk_star_intr_disable(priv);
735 		napi_schedule(&priv->napi);
736 	}
737 
738 	return IRQ_HANDLED;
739 }
740 
741 /* Wait for the completion of any previous command - CMD_START bit must be
742  * cleared by hardware.
743  */
744 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
745 {
746 	unsigned int val;
747 
748 	return regmap_read_poll_timeout_atomic(priv->regs,
749 				MTK_STAR_REG_HASH_CTRL, val,
750 				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
751 				10, MTK_STAR_WAIT_TIMEOUT);
752 }
753 
754 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
755 {
756 	unsigned int val;
757 	int ret;
758 
759 	/* Wait for BIST_DONE bit. */
760 	ret = regmap_read_poll_timeout_atomic(priv->regs,
761 					MTK_STAR_REG_HASH_CTRL, val,
762 					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
763 					10, MTK_STAR_WAIT_TIMEOUT);
764 	if (ret)
765 		return ret;
766 
767 	/* Check the BIST_OK bit. */
768 	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
769 			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
770 		return -EIO;
771 
772 	return 0;
773 }
774 
775 static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
776 				unsigned int hash_addr)
777 {
778 	unsigned int val;
779 	int ret;
780 
781 	ret = mtk_star_hash_wait_cmd_start(priv);
782 	if (ret)
783 		return ret;
784 
785 	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
786 	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
787 	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
788 	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
789 	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
790 	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
791 
792 	return mtk_star_hash_wait_ok(priv);
793 }
794 
795 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
796 {
797 	int ret;
798 
799 	ret = mtk_star_hash_wait_cmd_start(priv);
800 	if (ret)
801 		return ret;
802 
803 	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
804 			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
805 	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
806 			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
807 
808 	return mtk_star_hash_wait_ok(priv);
809 }
810 
811 static void mtk_star_phy_config(struct mtk_star_priv *priv)
812 {
813 	unsigned int val;
814 
815 	if (priv->speed == SPEED_1000)
816 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
817 	else if (priv->speed == SPEED_100)
818 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
819 	else
820 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
821 	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
822 
823 	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
824 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
825 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
826 	/* Only full-duplex supported for now. */
827 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
828 
829 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
830 
831 	if (priv->pause) {
832 		val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
833 		val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
834 		val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
835 	} else {
836 		val = 0;
837 	}
838 
839 	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
840 			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
841 			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
842 
843 	if (priv->pause) {
844 		val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
845 		val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
846 	} else {
847 		val = 0;
848 	}
849 
850 	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
851 			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
852 }
853 
854 static void mtk_star_adjust_link(struct net_device *ndev)
855 {
856 	struct mtk_star_priv *priv = netdev_priv(ndev);
857 	struct phy_device *phydev = priv->phydev;
858 	bool new_state = false;
859 
860 	if (phydev->link) {
861 		if (!priv->link) {
862 			priv->link = phydev->link;
863 			new_state = true;
864 		}
865 
866 		if (priv->speed != phydev->speed) {
867 			priv->speed = phydev->speed;
868 			new_state = true;
869 		}
870 
871 		if (priv->pause != phydev->pause) {
872 			priv->pause = phydev->pause;
873 			new_state = true;
874 		}
875 	} else {
876 		if (priv->link) {
877 			priv->link = phydev->link;
878 			new_state = true;
879 		}
880 	}
881 
882 	if (new_state) {
883 		if (phydev->link)
884 			mtk_star_phy_config(priv);
885 
886 		phy_print_status(ndev->phydev);
887 	}
888 }
889 
890 static void mtk_star_init_config(struct mtk_star_priv *priv)
891 {
892 	unsigned int val;
893 
894 	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
895 	       MTK_STAR_BIT_EXT_MDC_MODE |
896 	       MTK_STAR_BIT_SWC_MII_MODE);
897 
898 	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
899 	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
900 			   MTK_STAR_MSK_MAC_CLK_CONF,
901 			   MTK_STAR_BIT_CLK_DIV_10);
902 }
903 
904 static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
905 {
906 	regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
907 			   MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
908 			   MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
909 }
910 
911 static int mtk_star_enable(struct net_device *ndev)
912 {
913 	struct mtk_star_priv *priv = netdev_priv(ndev);
914 	unsigned int val;
915 	int ret;
916 
917 	mtk_star_nic_disable_pd(priv);
918 	mtk_star_intr_disable(priv);
919 	mtk_star_dma_stop(priv);
920 
921 	mtk_star_set_mac_addr(ndev);
922 
923 	/* Configure the MAC */
924 	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
925 	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
926 	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
927 	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
928 	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
929 	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
930 
931 	/* Enable Hash Table BIST and reset it */
932 	ret = mtk_star_reset_hash_table(priv);
933 	if (ret)
934 		return ret;
935 
936 	/* Setup the hashing algorithm */
937 	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
938 			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
939 			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
940 
941 	/* Don't strip VLAN tags */
942 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
943 			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
944 
945 	/* Setup DMA */
946 	mtk_star_dma_init(priv);
947 
948 	ret = mtk_star_prepare_rx_skbs(ndev);
949 	if (ret)
950 		goto err_out;
951 
952 	/* Request the interrupt */
953 	ret = request_irq(ndev->irq, mtk_star_handle_irq,
954 			  IRQF_TRIGGER_FALLING, ndev->name, ndev);
955 	if (ret)
956 		goto err_free_skbs;
957 
958 	napi_enable(&priv->napi);
959 
960 	mtk_star_intr_ack_all(priv);
961 	mtk_star_intr_enable(priv);
962 
963 	/* Connect to and start PHY */
964 	priv->phydev = of_phy_connect(ndev, priv->phy_node,
965 				      mtk_star_adjust_link, 0, priv->phy_intf);
966 	if (!priv->phydev) {
967 		netdev_err(ndev, "failed to connect to PHY\n");
968 		ret = -ENODEV;
969 		goto err_free_irq;
970 	}
971 
972 	mtk_star_dma_start(priv);
973 	phy_start(priv->phydev);
974 	netif_start_queue(ndev);
975 
976 	return 0;
977 
978 err_free_irq:
979 	free_irq(ndev->irq, ndev);
980 err_free_skbs:
981 	mtk_star_free_rx_skbs(priv);
982 err_out:
983 	return ret;
984 }
985 
986 static void mtk_star_disable(struct net_device *ndev)
987 {
988 	struct mtk_star_priv *priv = netdev_priv(ndev);
989 
990 	netif_stop_queue(ndev);
991 	napi_disable(&priv->napi);
992 	mtk_star_intr_disable(priv);
993 	mtk_star_dma_disable(priv);
994 	mtk_star_intr_ack_all(priv);
995 	phy_stop(priv->phydev);
996 	phy_disconnect(priv->phydev);
997 	free_irq(ndev->irq, ndev);
998 	mtk_star_free_rx_skbs(priv);
999 	mtk_star_free_tx_skbs(priv);
1000 }
1001 
1002 static int mtk_star_netdev_open(struct net_device *ndev)
1003 {
1004 	return mtk_star_enable(ndev);
1005 }
1006 
1007 static int mtk_star_netdev_stop(struct net_device *ndev)
1008 {
1009 	mtk_star_disable(ndev);
1010 
1011 	return 0;
1012 }
1013 
1014 static int mtk_star_netdev_ioctl(struct net_device *ndev,
1015 				 struct ifreq *req, int cmd)
1016 {
1017 	if (!netif_running(ndev))
1018 		return -EINVAL;
1019 
1020 	return phy_mii_ioctl(ndev->phydev, req, cmd);
1021 }
1022 
1023 static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1024 				      struct net_device *ndev)
1025 {
1026 	struct mtk_star_priv *priv = netdev_priv(ndev);
1027 	struct mtk_star_ring *ring = &priv->tx_ring;
1028 	struct device *dev = mtk_star_get_dev(priv);
1029 	struct mtk_star_ring_desc_data desc_data;
1030 
1031 	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1032 	if (dma_mapping_error(dev, desc_data.dma_addr))
1033 		goto err_drop_packet;
1034 
1035 	desc_data.skb = skb;
1036 	desc_data.len = skb->len;
1037 
1038 	spin_lock_bh(&priv->lock);
1039 
1040 	mtk_star_ring_push_head_tx(ring, &desc_data);
1041 
1042 	netdev_sent_queue(ndev, skb->len);
1043 
1044 	if (mtk_star_ring_full(ring))
1045 		netif_stop_queue(ndev);
1046 
1047 	spin_unlock_bh(&priv->lock);
1048 
1049 	mtk_star_dma_resume_tx(priv);
1050 
1051 	return NETDEV_TX_OK;
1052 
1053 err_drop_packet:
1054 	dev_kfree_skb(skb);
1055 	ndev->stats.tx_dropped++;
1056 	return NETDEV_TX_OK;
1057 }
1058 
1059 /* Returns the number of bytes sent or a negative number on the first
1060  * descriptor owned by DMA.
1061  */
1062 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1063 {
1064 	struct mtk_star_ring *ring = &priv->tx_ring;
1065 	struct mtk_star_ring_desc_data desc_data;
1066 	int ret;
1067 
1068 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1069 	if (ret)
1070 		return ret;
1071 
1072 	mtk_star_dma_unmap_tx(priv, &desc_data);
1073 	ret = desc_data.skb->len;
1074 	dev_kfree_skb_irq(desc_data.skb);
1075 
1076 	return ret;
1077 }
1078 
1079 static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1080 {
1081 	struct mtk_star_ring *ring = &priv->tx_ring;
1082 	struct net_device *ndev = priv->ndev;
1083 	int ret, pkts_compl, bytes_compl;
1084 	bool wake = false;
1085 
1086 	spin_lock(&priv->lock);
1087 
1088 	for (pkts_compl = 0, bytes_compl = 0;;
1089 	     pkts_compl++, bytes_compl += ret, wake = true) {
1090 		if (!mtk_star_ring_descs_available(ring))
1091 			break;
1092 
1093 		ret = mtk_star_tx_complete_one(priv);
1094 		if (ret < 0)
1095 			break;
1096 	}
1097 
1098 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1099 
1100 	if (wake && netif_queue_stopped(ndev))
1101 		netif_wake_queue(ndev);
1102 
1103 	spin_unlock(&priv->lock);
1104 }
1105 
1106 static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1107 					struct rtnl_link_stats64 *stats)
1108 {
1109 	struct mtk_star_priv *priv = netdev_priv(ndev);
1110 
1111 	mtk_star_update_stats(priv);
1112 
1113 	memcpy(stats, &priv->stats, sizeof(*stats));
1114 }
1115 
1116 static void mtk_star_set_rx_mode(struct net_device *ndev)
1117 {
1118 	struct mtk_star_priv *priv = netdev_priv(ndev);
1119 	struct netdev_hw_addr *hw_addr;
1120 	unsigned int hash_addr, i;
1121 	int ret;
1122 
1123 	if (ndev->flags & IFF_PROMISC) {
1124 		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1125 				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1126 	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1127 		   ndev->flags & IFF_ALLMULTI) {
1128 		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1129 			ret = mtk_star_set_hashbit(priv, i);
1130 			if (ret)
1131 				goto hash_fail;
1132 		}
1133 	} else {
1134 		/* Clear previous settings. */
1135 		ret = mtk_star_reset_hash_table(priv);
1136 		if (ret)
1137 			goto hash_fail;
1138 
1139 		netdev_for_each_mc_addr(hw_addr, ndev) {
1140 			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1141 			hash_addr += hw_addr->addr[5];
1142 			ret = mtk_star_set_hashbit(priv, hash_addr);
1143 			if (ret)
1144 				goto hash_fail;
1145 		}
1146 	}
1147 
1148 	return;
1149 
1150 hash_fail:
1151 	if (ret == -ETIMEDOUT)
1152 		netdev_err(ndev, "setting hash bit timed out\n");
1153 	else
1154 		/* Should be -EIO */
1155 		netdev_err(ndev, "unable to set hash bit");
1156 }
1157 
1158 static const struct net_device_ops mtk_star_netdev_ops = {
1159 	.ndo_open		= mtk_star_netdev_open,
1160 	.ndo_stop		= mtk_star_netdev_stop,
1161 	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1162 	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1163 	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1164 	.ndo_eth_ioctl		= mtk_star_netdev_ioctl,
1165 	.ndo_set_mac_address	= eth_mac_addr,
1166 	.ndo_validate_addr	= eth_validate_addr,
1167 };
1168 
1169 static void mtk_star_get_drvinfo(struct net_device *dev,
1170 				 struct ethtool_drvinfo *info)
1171 {
1172 	strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1173 }
1174 
1175 /* TODO Add ethtool stats. */
1176 static const struct ethtool_ops mtk_star_ethtool_ops = {
1177 	.get_drvinfo		= mtk_star_get_drvinfo,
1178 	.get_link		= ethtool_op_get_link,
1179 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1180 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1181 };
1182 
1183 static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1184 {
1185 	struct mtk_star_ring *ring = &priv->rx_ring;
1186 	struct device *dev = mtk_star_get_dev(priv);
1187 	struct mtk_star_ring_desc_data desc_data;
1188 	struct net_device *ndev = priv->ndev;
1189 	struct sk_buff *curr_skb, *new_skb;
1190 	dma_addr_t new_dma_addr;
1191 	int ret;
1192 
1193 	spin_lock(&priv->lock);
1194 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1195 	spin_unlock(&priv->lock);
1196 	if (ret)
1197 		return -1;
1198 
1199 	curr_skb = desc_data.skb;
1200 
1201 	if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1202 	    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1203 		/* Error packet -> drop and reuse skb. */
1204 		new_skb = curr_skb;
1205 		goto push_new_skb;
1206 	}
1207 
1208 	/* Prepare new skb before receiving the current one. Reuse the current
1209 	 * skb if we fail at any point.
1210 	 */
1211 	new_skb = mtk_star_alloc_skb(ndev);
1212 	if (!new_skb) {
1213 		ndev->stats.rx_dropped++;
1214 		new_skb = curr_skb;
1215 		goto push_new_skb;
1216 	}
1217 
1218 	new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1219 	if (dma_mapping_error(dev, new_dma_addr)) {
1220 		ndev->stats.rx_dropped++;
1221 		dev_kfree_skb(new_skb);
1222 		new_skb = curr_skb;
1223 		netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1224 		goto push_new_skb;
1225 	}
1226 
1227 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
1228 	mtk_star_dma_unmap_rx(priv, &desc_data);
1229 
1230 	skb_put(desc_data.skb, desc_data.len);
1231 	desc_data.skb->ip_summed = CHECKSUM_NONE;
1232 	desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1233 	desc_data.skb->dev = ndev;
1234 	netif_receive_skb(desc_data.skb);
1235 
1236 	/* update dma_addr for new skb */
1237 	desc_data.dma_addr = new_dma_addr;
1238 
1239 push_new_skb:
1240 	desc_data.len = skb_tailroom(new_skb);
1241 	desc_data.skb = new_skb;
1242 
1243 	spin_lock(&priv->lock);
1244 	mtk_star_ring_push_head_rx(ring, &desc_data);
1245 	spin_unlock(&priv->lock);
1246 
1247 	return 0;
1248 }
1249 
1250 static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1251 {
1252 	int received, ret;
1253 
1254 	for (received = 0, ret = 0; received < budget && ret == 0; received++)
1255 		ret = mtk_star_receive_packet(priv);
1256 
1257 	mtk_star_dma_resume_rx(priv);
1258 
1259 	return received;
1260 }
1261 
1262 static int mtk_star_poll(struct napi_struct *napi, int budget)
1263 {
1264 	struct mtk_star_priv *priv;
1265 	unsigned int status;
1266 	int received = 0;
1267 
1268 	priv = container_of(napi, struct mtk_star_priv, napi);
1269 
1270 	status = mtk_star_intr_read(priv);
1271 	mtk_star_intr_ack_all(priv);
1272 
1273 	if (status & MTK_STAR_BIT_INT_STS_TNTC)
1274 		/* Clean-up all TX descriptors. */
1275 		mtk_star_tx_complete_all(priv);
1276 
1277 	if (status & MTK_STAR_BIT_INT_STS_FNRC)
1278 		/* Receive up to $budget packets. */
1279 		received = mtk_star_process_rx(priv, budget);
1280 
1281 	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1282 		mtk_star_update_stats(priv);
1283 		mtk_star_reset_counters(priv);
1284 	}
1285 
1286 	if (received < budget)
1287 		napi_complete_done(napi, received);
1288 
1289 	mtk_star_intr_enable(priv);
1290 
1291 	return received;
1292 }
1293 
1294 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1295 {
1296 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1297 		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1298 }
1299 
1300 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1301 {
1302 	unsigned int val;
1303 
1304 	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1305 					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1306 					10, MTK_STAR_WAIT_TIMEOUT);
1307 }
1308 
1309 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1310 {
1311 	struct mtk_star_priv *priv = mii->priv;
1312 	unsigned int val, data;
1313 	int ret;
1314 
1315 	if (regnum & MII_ADDR_C45)
1316 		return -EOPNOTSUPP;
1317 
1318 	mtk_star_mdio_rwok_clear(priv);
1319 
1320 	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1321 	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1322 	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1323 
1324 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1325 
1326 	ret = mtk_star_mdio_rwok_wait(priv);
1327 	if (ret)
1328 		return ret;
1329 
1330 	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1331 
1332 	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1333 	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1334 
1335 	return data;
1336 }
1337 
1338 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1339 			       int regnum, u16 data)
1340 {
1341 	struct mtk_star_priv *priv = mii->priv;
1342 	unsigned int val;
1343 
1344 	if (regnum & MII_ADDR_C45)
1345 		return -EOPNOTSUPP;
1346 
1347 	mtk_star_mdio_rwok_clear(priv);
1348 
1349 	val = data;
1350 	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1351 	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1352 	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1353 	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1354 	val |= regnum;
1355 	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1356 
1357 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1358 
1359 	return mtk_star_mdio_rwok_wait(priv);
1360 }
1361 
1362 static int mtk_star_mdio_init(struct net_device *ndev)
1363 {
1364 	struct mtk_star_priv *priv = netdev_priv(ndev);
1365 	struct device *dev = mtk_star_get_dev(priv);
1366 	struct device_node *of_node, *mdio_node;
1367 	int ret;
1368 
1369 	of_node = dev->of_node;
1370 
1371 	mdio_node = of_get_child_by_name(of_node, "mdio");
1372 	if (!mdio_node)
1373 		return -ENODEV;
1374 
1375 	if (!of_device_is_available(mdio_node)) {
1376 		ret = -ENODEV;
1377 		goto out_put_node;
1378 	}
1379 
1380 	priv->mii = devm_mdiobus_alloc(dev);
1381 	if (!priv->mii) {
1382 		ret = -ENOMEM;
1383 		goto out_put_node;
1384 	}
1385 
1386 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1387 	priv->mii->name = "mtk-mac-mdio";
1388 	priv->mii->parent = dev;
1389 	priv->mii->read = mtk_star_mdio_read;
1390 	priv->mii->write = mtk_star_mdio_write;
1391 	priv->mii->priv = priv;
1392 
1393 	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1394 
1395 out_put_node:
1396 	of_node_put(mdio_node);
1397 	return ret;
1398 }
1399 
1400 static __maybe_unused int mtk_star_suspend(struct device *dev)
1401 {
1402 	struct mtk_star_priv *priv;
1403 	struct net_device *ndev;
1404 
1405 	ndev = dev_get_drvdata(dev);
1406 	priv = netdev_priv(ndev);
1407 
1408 	if (netif_running(ndev))
1409 		mtk_star_disable(ndev);
1410 
1411 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1412 
1413 	return 0;
1414 }
1415 
1416 static __maybe_unused int mtk_star_resume(struct device *dev)
1417 {
1418 	struct mtk_star_priv *priv;
1419 	struct net_device *ndev;
1420 	int ret;
1421 
1422 	ndev = dev_get_drvdata(dev);
1423 	priv = netdev_priv(ndev);
1424 
1425 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1426 	if (ret)
1427 		return ret;
1428 
1429 	if (netif_running(ndev)) {
1430 		ret = mtk_star_enable(ndev);
1431 		if (ret)
1432 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1433 	}
1434 
1435 	return ret;
1436 }
1437 
1438 static void mtk_star_clk_disable_unprepare(void *data)
1439 {
1440 	struct mtk_star_priv *priv = data;
1441 
1442 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1443 }
1444 
1445 static int mtk_star_probe(struct platform_device *pdev)
1446 {
1447 	struct device_node *of_node;
1448 	struct mtk_star_priv *priv;
1449 	struct net_device *ndev;
1450 	struct device *dev;
1451 	void __iomem *base;
1452 	int ret, i;
1453 
1454 	dev = &pdev->dev;
1455 	of_node = dev->of_node;
1456 
1457 	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1458 	if (!ndev)
1459 		return -ENOMEM;
1460 
1461 	priv = netdev_priv(ndev);
1462 	priv->ndev = ndev;
1463 	SET_NETDEV_DEV(ndev, dev);
1464 	platform_set_drvdata(pdev, ndev);
1465 
1466 	ndev->min_mtu = ETH_ZLEN;
1467 	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1468 
1469 	spin_lock_init(&priv->lock);
1470 
1471 	base = devm_platform_ioremap_resource(pdev, 0);
1472 	if (IS_ERR(base))
1473 		return PTR_ERR(base);
1474 
1475 	/* We won't be checking the return values of regmap read & write
1476 	 * functions. They can only fail for mmio if there's a clock attached
1477 	 * to regmap which is not the case here.
1478 	 */
1479 	priv->regs = devm_regmap_init_mmio(dev, base,
1480 					   &mtk_star_regmap_config);
1481 	if (IS_ERR(priv->regs))
1482 		return PTR_ERR(priv->regs);
1483 
1484 	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1485 							"mediatek,pericfg");
1486 	if (IS_ERR(priv->pericfg)) {
1487 		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1488 		return PTR_ERR(priv->pericfg);
1489 	}
1490 
1491 	ndev->irq = platform_get_irq(pdev, 0);
1492 	if (ndev->irq < 0)
1493 		return ndev->irq;
1494 
1495 	for (i = 0; i < MTK_STAR_NCLKS; i++)
1496 		priv->clks[i].id = mtk_star_clk_names[i];
1497 	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1498 	if (ret)
1499 		return ret;
1500 
1501 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1502 	if (ret)
1503 		return ret;
1504 
1505 	ret = devm_add_action_or_reset(dev,
1506 				       mtk_star_clk_disable_unprepare, priv);
1507 	if (ret)
1508 		return ret;
1509 
1510 	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1511 	if (ret) {
1512 		return ret;
1513 	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1514 		dev_err(dev, "unsupported phy mode: %s\n",
1515 			phy_modes(priv->phy_intf));
1516 		return -EINVAL;
1517 	}
1518 
1519 	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1520 	if (!priv->phy_node) {
1521 		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1522 		return -ENODEV;
1523 	}
1524 
1525 	mtk_star_set_mode_rmii(priv);
1526 
1527 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1528 	if (ret) {
1529 		dev_err(dev, "unsupported DMA mask\n");
1530 		return ret;
1531 	}
1532 
1533 	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1534 					      &priv->dma_addr,
1535 					      GFP_KERNEL | GFP_DMA);
1536 	if (!priv->ring_base)
1537 		return -ENOMEM;
1538 
1539 	mtk_star_nic_disable_pd(priv);
1540 	mtk_star_init_config(priv);
1541 
1542 	ret = mtk_star_mdio_init(ndev);
1543 	if (ret)
1544 		return ret;
1545 
1546 	ret = platform_get_ethdev_address(dev, ndev);
1547 	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1548 		eth_hw_addr_random(ndev);
1549 
1550 	ndev->netdev_ops = &mtk_star_netdev_ops;
1551 	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1552 
1553 	netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
1554 
1555 	return devm_register_netdev(dev, ndev);
1556 }
1557 
1558 #ifdef CONFIG_OF
1559 static const struct of_device_id mtk_star_of_match[] = {
1560 	{ .compatible = "mediatek,mt8516-eth", },
1561 	{ .compatible = "mediatek,mt8518-eth", },
1562 	{ .compatible = "mediatek,mt8175-eth", },
1563 	{ }
1564 };
1565 MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1566 #endif
1567 
1568 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1569 			 mtk_star_suspend, mtk_star_resume);
1570 
1571 static struct platform_driver mtk_star_driver = {
1572 	.driver = {
1573 		.name = MTK_STAR_DRVNAME,
1574 		.pm = &mtk_star_pm_ops,
1575 		.of_match_table = of_match_ptr(mtk_star_of_match),
1576 	},
1577 	.probe = mtk_star_probe,
1578 };
1579 module_platform_driver(mtk_star_driver);
1580 
1581 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1582 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1583 MODULE_LICENSE("GPL");
1584