xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c (revision a9ca9f9ceff382b58b488248f0c0da9e157f5d06)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33 
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37 
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40 
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 				  sizeof(u64) }
44 
45 static const struct mtk_reg_map mtk_reg_map = {
46 	.tx_irq_mask		= 0x1a1c,
47 	.tx_irq_status		= 0x1a18,
48 	.pdma = {
49 		.rx_ptr		= 0x0900,
50 		.rx_cnt_cfg	= 0x0904,
51 		.pcrx_ptr	= 0x0908,
52 		.glo_cfg	= 0x0a04,
53 		.rst_idx	= 0x0a08,
54 		.delay_irq	= 0x0a0c,
55 		.irq_status	= 0x0a20,
56 		.irq_mask	= 0x0a28,
57 		.adma_rx_dbg0	= 0x0a38,
58 		.int_grp	= 0x0a50,
59 	},
60 	.qdma = {
61 		.qtx_cfg	= 0x1800,
62 		.qtx_sch	= 0x1804,
63 		.rx_ptr		= 0x1900,
64 		.rx_cnt_cfg	= 0x1904,
65 		.qcrx_ptr	= 0x1908,
66 		.glo_cfg	= 0x1a04,
67 		.rst_idx	= 0x1a08,
68 		.delay_irq	= 0x1a0c,
69 		.fc_th		= 0x1a10,
70 		.tx_sch_rate	= 0x1a14,
71 		.int_grp	= 0x1a20,
72 		.hred		= 0x1a44,
73 		.ctx_ptr	= 0x1b00,
74 		.dtx_ptr	= 0x1b04,
75 		.crx_ptr	= 0x1b10,
76 		.drx_ptr	= 0x1b14,
77 		.fq_head	= 0x1b20,
78 		.fq_tail	= 0x1b24,
79 		.fq_count	= 0x1b28,
80 		.fq_blen	= 0x1b2c,
81 	},
82 	.gdm1_cnt		= 0x2400,
83 	.gdma_to_ppe		= 0x4444,
84 	.ppe_base		= 0x0c00,
85 	.wdma_base = {
86 		[0]		= 0x2800,
87 		[1]		= 0x2c00,
88 	},
89 	.pse_iq_sta		= 0x0110,
90 	.pse_oq_sta		= 0x0118,
91 };
92 
93 static const struct mtk_reg_map mt7628_reg_map = {
94 	.tx_irq_mask		= 0x0a28,
95 	.tx_irq_status		= 0x0a20,
96 	.pdma = {
97 		.rx_ptr		= 0x0900,
98 		.rx_cnt_cfg	= 0x0904,
99 		.pcrx_ptr	= 0x0908,
100 		.glo_cfg	= 0x0a04,
101 		.rst_idx	= 0x0a08,
102 		.delay_irq	= 0x0a0c,
103 		.irq_status	= 0x0a20,
104 		.irq_mask	= 0x0a28,
105 		.int_grp	= 0x0a50,
106 	},
107 };
108 
109 static const struct mtk_reg_map mt7986_reg_map = {
110 	.tx_irq_mask		= 0x461c,
111 	.tx_irq_status		= 0x4618,
112 	.pdma = {
113 		.rx_ptr		= 0x6100,
114 		.rx_cnt_cfg	= 0x6104,
115 		.pcrx_ptr	= 0x6108,
116 		.glo_cfg	= 0x6204,
117 		.rst_idx	= 0x6208,
118 		.delay_irq	= 0x620c,
119 		.irq_status	= 0x6220,
120 		.irq_mask	= 0x6228,
121 		.adma_rx_dbg0	= 0x6238,
122 		.int_grp	= 0x6250,
123 	},
124 	.qdma = {
125 		.qtx_cfg	= 0x4400,
126 		.qtx_sch	= 0x4404,
127 		.rx_ptr		= 0x4500,
128 		.rx_cnt_cfg	= 0x4504,
129 		.qcrx_ptr	= 0x4508,
130 		.glo_cfg	= 0x4604,
131 		.rst_idx	= 0x4608,
132 		.delay_irq	= 0x460c,
133 		.fc_th		= 0x4610,
134 		.int_grp	= 0x4620,
135 		.hred		= 0x4644,
136 		.ctx_ptr	= 0x4700,
137 		.dtx_ptr	= 0x4704,
138 		.crx_ptr	= 0x4710,
139 		.drx_ptr	= 0x4714,
140 		.fq_head	= 0x4720,
141 		.fq_tail	= 0x4724,
142 		.fq_count	= 0x4728,
143 		.fq_blen	= 0x472c,
144 		.tx_sch_rate	= 0x4798,
145 	},
146 	.gdm1_cnt		= 0x1c00,
147 	.gdma_to_ppe		= 0x3333,
148 	.ppe_base		= 0x2000,
149 	.wdma_base = {
150 		[0]		= 0x4800,
151 		[1]		= 0x4c00,
152 	},
153 	.pse_iq_sta		= 0x0180,
154 	.pse_oq_sta		= 0x01a0,
155 };
156 
157 static const struct mtk_reg_map mt7988_reg_map = {
158 	.tx_irq_mask		= 0x461c,
159 	.tx_irq_status		= 0x4618,
160 	.pdma = {
161 		.rx_ptr		= 0x6900,
162 		.rx_cnt_cfg	= 0x6904,
163 		.pcrx_ptr	= 0x6908,
164 		.glo_cfg	= 0x6a04,
165 		.rst_idx	= 0x6a08,
166 		.delay_irq	= 0x6a0c,
167 		.irq_status	= 0x6a20,
168 		.irq_mask	= 0x6a28,
169 		.adma_rx_dbg0	= 0x6a38,
170 		.int_grp	= 0x6a50,
171 	},
172 	.qdma = {
173 		.qtx_cfg	= 0x4400,
174 		.qtx_sch	= 0x4404,
175 		.rx_ptr		= 0x4500,
176 		.rx_cnt_cfg	= 0x4504,
177 		.qcrx_ptr	= 0x4508,
178 		.glo_cfg	= 0x4604,
179 		.rst_idx	= 0x4608,
180 		.delay_irq	= 0x460c,
181 		.fc_th		= 0x4610,
182 		.int_grp	= 0x4620,
183 		.hred		= 0x4644,
184 		.ctx_ptr	= 0x4700,
185 		.dtx_ptr	= 0x4704,
186 		.crx_ptr	= 0x4710,
187 		.drx_ptr	= 0x4714,
188 		.fq_head	= 0x4720,
189 		.fq_tail	= 0x4724,
190 		.fq_count	= 0x4728,
191 		.fq_blen	= 0x472c,
192 		.tx_sch_rate	= 0x4798,
193 	},
194 	.gdm1_cnt		= 0x1c00,
195 	.gdma_to_ppe		= 0x3333,
196 	.ppe_base		= 0x2000,
197 	.wdma_base = {
198 		[0]		= 0x4800,
199 		[1]		= 0x4c00,
200 	},
201 	.pse_iq_sta		= 0x0180,
202 	.pse_oq_sta		= 0x01a0,
203 };
204 
205 /* strings used by ethtool */
206 static const struct mtk_ethtool_stats {
207 	char str[ETH_GSTRING_LEN];
208 	u32 offset;
209 } mtk_ethtool_stats[] = {
210 	MTK_ETHTOOL_STAT(tx_bytes),
211 	MTK_ETHTOOL_STAT(tx_packets),
212 	MTK_ETHTOOL_STAT(tx_skip),
213 	MTK_ETHTOOL_STAT(tx_collisions),
214 	MTK_ETHTOOL_STAT(rx_bytes),
215 	MTK_ETHTOOL_STAT(rx_packets),
216 	MTK_ETHTOOL_STAT(rx_overflow),
217 	MTK_ETHTOOL_STAT(rx_fcs_errors),
218 	MTK_ETHTOOL_STAT(rx_short_errors),
219 	MTK_ETHTOOL_STAT(rx_long_errors),
220 	MTK_ETHTOOL_STAT(rx_checksum_errors),
221 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
222 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
223 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
224 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
225 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
226 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
227 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
228 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
229 };
230 
231 static const char * const mtk_clks_source_name[] = {
232 	"ethif",
233 	"sgmiitop",
234 	"esw",
235 	"gp0",
236 	"gp1",
237 	"gp2",
238 	"gp3",
239 	"xgp1",
240 	"xgp2",
241 	"xgp3",
242 	"crypto",
243 	"fe",
244 	"trgpll",
245 	"sgmii_tx250m",
246 	"sgmii_rx250m",
247 	"sgmii_cdr_ref",
248 	"sgmii_cdr_fb",
249 	"sgmii2_tx250m",
250 	"sgmii2_rx250m",
251 	"sgmii2_cdr_ref",
252 	"sgmii2_cdr_fb",
253 	"sgmii_ck",
254 	"eth2pll",
255 	"wocpu0",
256 	"wocpu1",
257 	"netsys0",
258 	"netsys1",
259 	"ethwarp_wocpu2",
260 	"ethwarp_wocpu1",
261 	"ethwarp_wocpu0",
262 	"top_usxgmii0_sel",
263 	"top_usxgmii1_sel",
264 	"top_sgm0_sel",
265 	"top_sgm1_sel",
266 	"top_xfi_phy0_xtal_sel",
267 	"top_xfi_phy1_xtal_sel",
268 	"top_eth_gmii_sel",
269 	"top_eth_refck_50m_sel",
270 	"top_eth_sys_200m_sel",
271 	"top_eth_sys_sel",
272 	"top_eth_xgmii_sel",
273 	"top_eth_mii_sel",
274 	"top_netsys_sel",
275 	"top_netsys_500m_sel",
276 	"top_netsys_pao_2x_sel",
277 	"top_netsys_sync_250m_sel",
278 	"top_netsys_ppefb_250m_sel",
279 	"top_netsys_warp_sel",
280 };
281 
282 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
283 {
284 	__raw_writel(val, eth->base + reg);
285 }
286 
287 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
288 {
289 	return __raw_readl(eth->base + reg);
290 }
291 
292 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
293 {
294 	u32 val;
295 
296 	val = mtk_r32(eth, reg);
297 	val &= ~mask;
298 	val |= set;
299 	mtk_w32(eth, val, reg);
300 	return reg;
301 }
302 
303 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
304 {
305 	unsigned long t_start = jiffies;
306 
307 	while (1) {
308 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
309 			return 0;
310 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
311 			break;
312 		cond_resched();
313 	}
314 
315 	dev_err(eth->dev, "mdio: MDIO timeout\n");
316 	return -ETIMEDOUT;
317 }
318 
319 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
320 			       u32 write_data)
321 {
322 	int ret;
323 
324 	ret = mtk_mdio_busy_wait(eth);
325 	if (ret < 0)
326 		return ret;
327 
328 	mtk_w32(eth, PHY_IAC_ACCESS |
329 		PHY_IAC_START_C22 |
330 		PHY_IAC_CMD_WRITE |
331 		PHY_IAC_REG(phy_reg) |
332 		PHY_IAC_ADDR(phy_addr) |
333 		PHY_IAC_DATA(write_data),
334 		MTK_PHY_IAC);
335 
336 	ret = mtk_mdio_busy_wait(eth);
337 	if (ret < 0)
338 		return ret;
339 
340 	return 0;
341 }
342 
343 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
344 			       u32 devad, u32 phy_reg, u32 write_data)
345 {
346 	int ret;
347 
348 	ret = mtk_mdio_busy_wait(eth);
349 	if (ret < 0)
350 		return ret;
351 
352 	mtk_w32(eth, PHY_IAC_ACCESS |
353 		PHY_IAC_START_C45 |
354 		PHY_IAC_CMD_C45_ADDR |
355 		PHY_IAC_REG(devad) |
356 		PHY_IAC_ADDR(phy_addr) |
357 		PHY_IAC_DATA(phy_reg),
358 		MTK_PHY_IAC);
359 
360 	ret = mtk_mdio_busy_wait(eth);
361 	if (ret < 0)
362 		return ret;
363 
364 	mtk_w32(eth, PHY_IAC_ACCESS |
365 		PHY_IAC_START_C45 |
366 		PHY_IAC_CMD_WRITE |
367 		PHY_IAC_REG(devad) |
368 		PHY_IAC_ADDR(phy_addr) |
369 		PHY_IAC_DATA(write_data),
370 		MTK_PHY_IAC);
371 
372 	ret = mtk_mdio_busy_wait(eth);
373 	if (ret < 0)
374 		return ret;
375 
376 	return 0;
377 }
378 
379 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
380 {
381 	int ret;
382 
383 	ret = mtk_mdio_busy_wait(eth);
384 	if (ret < 0)
385 		return ret;
386 
387 	mtk_w32(eth, PHY_IAC_ACCESS |
388 		PHY_IAC_START_C22 |
389 		PHY_IAC_CMD_C22_READ |
390 		PHY_IAC_REG(phy_reg) |
391 		PHY_IAC_ADDR(phy_addr),
392 		MTK_PHY_IAC);
393 
394 	ret = mtk_mdio_busy_wait(eth);
395 	if (ret < 0)
396 		return ret;
397 
398 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
399 }
400 
401 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
402 			      u32 devad, u32 phy_reg)
403 {
404 	int ret;
405 
406 	ret = mtk_mdio_busy_wait(eth);
407 	if (ret < 0)
408 		return ret;
409 
410 	mtk_w32(eth, PHY_IAC_ACCESS |
411 		PHY_IAC_START_C45 |
412 		PHY_IAC_CMD_C45_ADDR |
413 		PHY_IAC_REG(devad) |
414 		PHY_IAC_ADDR(phy_addr) |
415 		PHY_IAC_DATA(phy_reg),
416 		MTK_PHY_IAC);
417 
418 	ret = mtk_mdio_busy_wait(eth);
419 	if (ret < 0)
420 		return ret;
421 
422 	mtk_w32(eth, PHY_IAC_ACCESS |
423 		PHY_IAC_START_C45 |
424 		PHY_IAC_CMD_C45_READ |
425 		PHY_IAC_REG(devad) |
426 		PHY_IAC_ADDR(phy_addr),
427 		MTK_PHY_IAC);
428 
429 	ret = mtk_mdio_busy_wait(eth);
430 	if (ret < 0)
431 		return ret;
432 
433 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
434 }
435 
436 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
437 			      int phy_reg, u16 val)
438 {
439 	struct mtk_eth *eth = bus->priv;
440 
441 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
442 }
443 
444 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
445 			      int devad, int phy_reg, u16 val)
446 {
447 	struct mtk_eth *eth = bus->priv;
448 
449 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
450 }
451 
452 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
453 {
454 	struct mtk_eth *eth = bus->priv;
455 
456 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
457 }
458 
459 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
460 			     int phy_reg)
461 {
462 	struct mtk_eth *eth = bus->priv;
463 
464 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
465 }
466 
467 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
468 				     phy_interface_t interface)
469 {
470 	u32 val;
471 
472 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
473 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
474 
475 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
476 			   ETHSYS_TRGMII_MT7621_MASK, val);
477 
478 	return 0;
479 }
480 
481 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
482 				   phy_interface_t interface)
483 {
484 	int ret;
485 
486 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
487 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
488 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
489 		if (ret)
490 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
491 		return;
492 	}
493 
494 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
495 }
496 
497 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
498 {
499 	/* Force Port1 XGMAC Link Up */
500 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
501 		MTK_XGMAC_STS(MTK_GMAC1_ID));
502 
503 	/* Adjust GSW bridge IPG to 11 */
504 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
505 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
506 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
507 		MTK_GSW_CFG);
508 }
509 
510 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
511 					      phy_interface_t interface)
512 {
513 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
514 					   phylink_config);
515 	struct mtk_eth *eth = mac->hw;
516 	unsigned int sid;
517 
518 	if (interface == PHY_INTERFACE_MODE_SGMII ||
519 	    phy_interface_mode_is_8023z(interface)) {
520 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
521 		       0 : mac->id;
522 
523 		return eth->sgmii_pcs[sid];
524 	}
525 
526 	return NULL;
527 }
528 
529 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
530 			   const struct phylink_link_state *state)
531 {
532 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
533 					   phylink_config);
534 	struct mtk_eth *eth = mac->hw;
535 	int val, ge_mode, err = 0;
536 	u32 i;
537 
538 	/* MT76x8 has no hardware settings between for the MAC */
539 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
540 	    mac->interface != state->interface) {
541 		/* Setup soc pin functions */
542 		switch (state->interface) {
543 		case PHY_INTERFACE_MODE_TRGMII:
544 		case PHY_INTERFACE_MODE_RGMII_TXID:
545 		case PHY_INTERFACE_MODE_RGMII_RXID:
546 		case PHY_INTERFACE_MODE_RGMII_ID:
547 		case PHY_INTERFACE_MODE_RGMII:
548 		case PHY_INTERFACE_MODE_MII:
549 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
550 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
551 				if (err)
552 					goto init_err;
553 			}
554 			break;
555 		case PHY_INTERFACE_MODE_1000BASEX:
556 		case PHY_INTERFACE_MODE_2500BASEX:
557 		case PHY_INTERFACE_MODE_SGMII:
558 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
559 			if (err)
560 				goto init_err;
561 			break;
562 		case PHY_INTERFACE_MODE_GMII:
563 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
564 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
565 				if (err)
566 					goto init_err;
567 			}
568 			break;
569 		case PHY_INTERFACE_MODE_INTERNAL:
570 			break;
571 		default:
572 			goto err_phy;
573 		}
574 
575 		/* Setup clock for 1st gmac */
576 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
577 		    !phy_interface_mode_is_8023z(state->interface) &&
578 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
579 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
580 					 MTK_TRGMII_MT7621_CLK)) {
581 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
582 							      state->interface))
583 					goto err_phy;
584 			} else {
585 				mtk_gmac0_rgmii_adjust(mac->hw,
586 						       state->interface);
587 
588 				/* mt7623_pad_clk_setup */
589 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
590 					mtk_w32(mac->hw,
591 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
592 						TRGMII_TD_ODT(i));
593 
594 				/* Assert/release MT7623 RXC reset */
595 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
596 					TRGMII_RCK_CTRL);
597 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
598 			}
599 		}
600 
601 		switch (state->interface) {
602 		case PHY_INTERFACE_MODE_MII:
603 		case PHY_INTERFACE_MODE_GMII:
604 			ge_mode = 1;
605 			break;
606 		default:
607 			ge_mode = 0;
608 			break;
609 		}
610 
611 		/* put the gmac into the right mode */
612 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
613 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
614 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
615 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
616 
617 		mac->interface = state->interface;
618 	}
619 
620 	/* SGMII */
621 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
622 	    phy_interface_mode_is_8023z(state->interface)) {
623 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
624 		 * being setup done.
625 		 */
626 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
627 
628 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
629 				   SYSCFG0_SGMII_MASK,
630 				   ~(u32)SYSCFG0_SGMII_MASK);
631 
632 		/* Save the syscfg0 value for mac_finish */
633 		mac->syscfg0 = val;
634 	} else if (phylink_autoneg_inband(mode)) {
635 		dev_err(eth->dev,
636 			"In-band mode not supported in non SGMII mode!\n");
637 		return;
638 	}
639 
640 	/* Setup gmac */
641 	if (mtk_is_netsys_v3_or_greater(eth) &&
642 	    mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
643 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
644 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
645 
646 		mtk_setup_bridge_switch(eth);
647 	}
648 
649 	return;
650 
651 err_phy:
652 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
653 		mac->id, phy_modes(state->interface));
654 	return;
655 
656 init_err:
657 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
658 		mac->id, phy_modes(state->interface), err);
659 }
660 
661 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
662 			  phy_interface_t interface)
663 {
664 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
665 					   phylink_config);
666 	struct mtk_eth *eth = mac->hw;
667 	u32 mcr_cur, mcr_new;
668 
669 	/* Enable SGMII */
670 	if (interface == PHY_INTERFACE_MODE_SGMII ||
671 	    phy_interface_mode_is_8023z(interface))
672 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
673 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
674 
675 	/* Setup gmac */
676 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
677 	mcr_new = mcr_cur;
678 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
679 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
680 		   MAC_MCR_RX_FIFO_CLR_DIS;
681 
682 	/* Only update control register when needed! */
683 	if (mcr_new != mcr_cur)
684 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
685 
686 	return 0;
687 }
688 
689 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
690 			      phy_interface_t interface)
691 {
692 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
693 					   phylink_config);
694 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
695 
696 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
697 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
698 }
699 
700 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
701 				int speed)
702 {
703 	const struct mtk_soc_data *soc = eth->soc;
704 	u32 ofs, val;
705 
706 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
707 		return;
708 
709 	val = MTK_QTX_SCH_MIN_RATE_EN |
710 	      /* minimum: 10 Mbps */
711 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
712 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
713 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
714 	if (mtk_is_netsys_v1(eth))
715 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
716 
717 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
718 		switch (speed) {
719 		case SPEED_10:
720 			val |= MTK_QTX_SCH_MAX_RATE_EN |
721 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
722 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
723 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
724 			break;
725 		case SPEED_100:
726 			val |= MTK_QTX_SCH_MAX_RATE_EN |
727 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
728 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
729 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
730 			break;
731 		case SPEED_1000:
732 			val |= MTK_QTX_SCH_MAX_RATE_EN |
733 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
734 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
735 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
736 			break;
737 		default:
738 			break;
739 		}
740 	} else {
741 		switch (speed) {
742 		case SPEED_10:
743 			val |= MTK_QTX_SCH_MAX_RATE_EN |
744 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
745 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
746 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
747 			break;
748 		case SPEED_100:
749 			val |= MTK_QTX_SCH_MAX_RATE_EN |
750 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
751 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
752 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
753 			break;
754 		case SPEED_1000:
755 			val |= MTK_QTX_SCH_MAX_RATE_EN |
756 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
757 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
758 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
759 			break;
760 		default:
761 			break;
762 		}
763 	}
764 
765 	ofs = MTK_QTX_OFFSET * idx;
766 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
767 }
768 
769 static void mtk_mac_link_up(struct phylink_config *config,
770 			    struct phy_device *phy,
771 			    unsigned int mode, phy_interface_t interface,
772 			    int speed, int duplex, bool tx_pause, bool rx_pause)
773 {
774 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
775 					   phylink_config);
776 	u32 mcr;
777 
778 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
779 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
780 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
781 		 MAC_MCR_FORCE_RX_FC);
782 
783 	/* Configure speed */
784 	mac->speed = speed;
785 	switch (speed) {
786 	case SPEED_2500:
787 	case SPEED_1000:
788 		mcr |= MAC_MCR_SPEED_1000;
789 		break;
790 	case SPEED_100:
791 		mcr |= MAC_MCR_SPEED_100;
792 		break;
793 	}
794 
795 	/* Configure duplex */
796 	if (duplex == DUPLEX_FULL)
797 		mcr |= MAC_MCR_FORCE_DPX;
798 
799 	/* Configure pause modes - phylink will avoid these for half duplex */
800 	if (tx_pause)
801 		mcr |= MAC_MCR_FORCE_TX_FC;
802 	if (rx_pause)
803 		mcr |= MAC_MCR_FORCE_RX_FC;
804 
805 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
806 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
807 }
808 
809 static const struct phylink_mac_ops mtk_phylink_ops = {
810 	.mac_select_pcs = mtk_mac_select_pcs,
811 	.mac_config = mtk_mac_config,
812 	.mac_finish = mtk_mac_finish,
813 	.mac_link_down = mtk_mac_link_down,
814 	.mac_link_up = mtk_mac_link_up,
815 };
816 
817 static int mtk_mdio_init(struct mtk_eth *eth)
818 {
819 	unsigned int max_clk = 2500000, divider;
820 	struct device_node *mii_np;
821 	int ret;
822 	u32 val;
823 
824 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
825 	if (!mii_np) {
826 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
827 		return -ENODEV;
828 	}
829 
830 	if (!of_device_is_available(mii_np)) {
831 		ret = -ENODEV;
832 		goto err_put_node;
833 	}
834 
835 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
836 	if (!eth->mii_bus) {
837 		ret = -ENOMEM;
838 		goto err_put_node;
839 	}
840 
841 	eth->mii_bus->name = "mdio";
842 	eth->mii_bus->read = mtk_mdio_read_c22;
843 	eth->mii_bus->write = mtk_mdio_write_c22;
844 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
845 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
846 	eth->mii_bus->priv = eth;
847 	eth->mii_bus->parent = eth->dev;
848 
849 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
850 
851 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
852 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
853 			dev_err(eth->dev, "MDIO clock frequency out of range");
854 			ret = -EINVAL;
855 			goto err_put_node;
856 		}
857 		max_clk = val;
858 	}
859 	divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
860 
861 	/* Configure MDC Turbo Mode */
862 	if (mtk_is_netsys_v3_or_greater(eth))
863 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
864 
865 	/* Configure MDC Divider */
866 	val = FIELD_PREP(PPSC_MDC_CFG, divider);
867 	if (!mtk_is_netsys_v3_or_greater(eth))
868 		val |= PPSC_MDC_TURBO;
869 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
870 
871 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
872 
873 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
874 
875 err_put_node:
876 	of_node_put(mii_np);
877 	return ret;
878 }
879 
880 static void mtk_mdio_cleanup(struct mtk_eth *eth)
881 {
882 	if (!eth->mii_bus)
883 		return;
884 
885 	mdiobus_unregister(eth->mii_bus);
886 }
887 
888 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
889 {
890 	unsigned long flags;
891 	u32 val;
892 
893 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
894 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
895 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
896 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
897 }
898 
899 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
900 {
901 	unsigned long flags;
902 	u32 val;
903 
904 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
905 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
906 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
907 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
908 }
909 
910 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
911 {
912 	unsigned long flags;
913 	u32 val;
914 
915 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
916 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
917 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
918 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
919 }
920 
921 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
922 {
923 	unsigned long flags;
924 	u32 val;
925 
926 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
927 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
928 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
929 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
930 }
931 
932 static int mtk_set_mac_address(struct net_device *dev, void *p)
933 {
934 	int ret = eth_mac_addr(dev, p);
935 	struct mtk_mac *mac = netdev_priv(dev);
936 	struct mtk_eth *eth = mac->hw;
937 	const char *macaddr = dev->dev_addr;
938 
939 	if (ret)
940 		return ret;
941 
942 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
943 		return -EBUSY;
944 
945 	spin_lock_bh(&mac->hw->page_lock);
946 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
947 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
948 			MT7628_SDM_MAC_ADRH);
949 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
950 			(macaddr[4] << 8) | macaddr[5],
951 			MT7628_SDM_MAC_ADRL);
952 	} else {
953 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
954 			MTK_GDMA_MAC_ADRH(mac->id));
955 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
956 			(macaddr[4] << 8) | macaddr[5],
957 			MTK_GDMA_MAC_ADRL(mac->id));
958 	}
959 	spin_unlock_bh(&mac->hw->page_lock);
960 
961 	return 0;
962 }
963 
964 void mtk_stats_update_mac(struct mtk_mac *mac)
965 {
966 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
967 	struct mtk_eth *eth = mac->hw;
968 
969 	u64_stats_update_begin(&hw_stats->syncp);
970 
971 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
972 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
973 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
974 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
975 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
976 		hw_stats->rx_checksum_errors +=
977 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
978 	} else {
979 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
980 		unsigned int offs = hw_stats->reg_offset;
981 		u64 stats;
982 
983 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
984 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
985 		if (stats)
986 			hw_stats->rx_bytes += (stats << 32);
987 		hw_stats->rx_packets +=
988 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
989 		hw_stats->rx_overflow +=
990 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
991 		hw_stats->rx_fcs_errors +=
992 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
993 		hw_stats->rx_short_errors +=
994 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
995 		hw_stats->rx_long_errors +=
996 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
997 		hw_stats->rx_checksum_errors +=
998 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
999 		hw_stats->rx_flow_control_packets +=
1000 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1001 
1002 		if (mtk_is_netsys_v3_or_greater(eth)) {
1003 			hw_stats->tx_skip +=
1004 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1005 			hw_stats->tx_collisions +=
1006 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1007 			hw_stats->tx_bytes +=
1008 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1009 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1010 			if (stats)
1011 				hw_stats->tx_bytes += (stats << 32);
1012 			hw_stats->tx_packets +=
1013 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1014 		} else {
1015 			hw_stats->tx_skip +=
1016 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1017 			hw_stats->tx_collisions +=
1018 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1019 			hw_stats->tx_bytes +=
1020 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1021 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1022 			if (stats)
1023 				hw_stats->tx_bytes += (stats << 32);
1024 			hw_stats->tx_packets +=
1025 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1026 		}
1027 	}
1028 
1029 	u64_stats_update_end(&hw_stats->syncp);
1030 }
1031 
1032 static void mtk_stats_update(struct mtk_eth *eth)
1033 {
1034 	int i;
1035 
1036 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1037 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1038 			continue;
1039 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1040 			mtk_stats_update_mac(eth->mac[i]);
1041 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1042 		}
1043 	}
1044 }
1045 
1046 static void mtk_get_stats64(struct net_device *dev,
1047 			    struct rtnl_link_stats64 *storage)
1048 {
1049 	struct mtk_mac *mac = netdev_priv(dev);
1050 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1051 	unsigned int start;
1052 
1053 	if (netif_running(dev) && netif_device_present(dev)) {
1054 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1055 			mtk_stats_update_mac(mac);
1056 			spin_unlock_bh(&hw_stats->stats_lock);
1057 		}
1058 	}
1059 
1060 	do {
1061 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1062 		storage->rx_packets = hw_stats->rx_packets;
1063 		storage->tx_packets = hw_stats->tx_packets;
1064 		storage->rx_bytes = hw_stats->rx_bytes;
1065 		storage->tx_bytes = hw_stats->tx_bytes;
1066 		storage->collisions = hw_stats->tx_collisions;
1067 		storage->rx_length_errors = hw_stats->rx_short_errors +
1068 			hw_stats->rx_long_errors;
1069 		storage->rx_over_errors = hw_stats->rx_overflow;
1070 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1071 		storage->rx_errors = hw_stats->rx_checksum_errors;
1072 		storage->tx_aborted_errors = hw_stats->tx_skip;
1073 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1074 
1075 	storage->tx_errors = dev->stats.tx_errors;
1076 	storage->rx_dropped = dev->stats.rx_dropped;
1077 	storage->tx_dropped = dev->stats.tx_dropped;
1078 }
1079 
1080 static inline int mtk_max_frag_size(int mtu)
1081 {
1082 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1083 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1084 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1085 
1086 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1087 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1088 }
1089 
1090 static inline int mtk_max_buf_size(int frag_size)
1091 {
1092 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1093 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1094 
1095 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1096 
1097 	return buf_size;
1098 }
1099 
1100 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1101 			    struct mtk_rx_dma_v2 *dma_rxd)
1102 {
1103 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1104 	if (!(rxd->rxd2 & RX_DMA_DONE))
1105 		return false;
1106 
1107 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1108 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1109 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1110 	if (mtk_is_netsys_v2_or_greater(eth)) {
1111 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1112 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1113 	}
1114 
1115 	return true;
1116 }
1117 
1118 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1119 {
1120 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1121 	unsigned long data;
1122 
1123 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1124 				get_order(size));
1125 
1126 	return (void *)data;
1127 }
1128 
1129 /* the qdma core needs scratch memory to be setup */
1130 static int mtk_init_fq_dma(struct mtk_eth *eth)
1131 {
1132 	const struct mtk_soc_data *soc = eth->soc;
1133 	dma_addr_t phy_ring_tail;
1134 	int cnt = MTK_QDMA_RING_SIZE;
1135 	dma_addr_t dma_addr;
1136 	int i;
1137 
1138 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1139 					       cnt * soc->txrx.txd_size,
1140 					       &eth->phy_scratch_ring,
1141 					       GFP_KERNEL);
1142 	if (unlikely(!eth->scratch_ring))
1143 		return -ENOMEM;
1144 
1145 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1146 	if (unlikely(!eth->scratch_head))
1147 		return -ENOMEM;
1148 
1149 	dma_addr = dma_map_single(eth->dma_dev,
1150 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1151 				  DMA_FROM_DEVICE);
1152 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1153 		return -ENOMEM;
1154 
1155 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1156 
1157 	for (i = 0; i < cnt; i++) {
1158 		struct mtk_tx_dma_v2 *txd;
1159 
1160 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1161 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1162 		if (i < cnt - 1)
1163 			txd->txd2 = eth->phy_scratch_ring +
1164 				    (i + 1) * soc->txrx.txd_size;
1165 
1166 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1167 		txd->txd4 = 0;
1168 		if (mtk_is_netsys_v2_or_greater(eth)) {
1169 			txd->txd5 = 0;
1170 			txd->txd6 = 0;
1171 			txd->txd7 = 0;
1172 			txd->txd8 = 0;
1173 		}
1174 	}
1175 
1176 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1177 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1178 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1179 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1180 
1181 	return 0;
1182 }
1183 
1184 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1185 {
1186 	return ring->dma + (desc - ring->phys);
1187 }
1188 
1189 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1190 					     void *txd, u32 txd_size)
1191 {
1192 	int idx = (txd - ring->dma) / txd_size;
1193 
1194 	return &ring->buf[idx];
1195 }
1196 
1197 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1198 				       struct mtk_tx_dma *dma)
1199 {
1200 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1201 }
1202 
1203 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1204 {
1205 	return (dma - ring->dma) / txd_size;
1206 }
1207 
1208 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1209 			 struct xdp_frame_bulk *bq, bool napi)
1210 {
1211 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1212 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1213 			dma_unmap_single(eth->dma_dev,
1214 					 dma_unmap_addr(tx_buf, dma_addr0),
1215 					 dma_unmap_len(tx_buf, dma_len0),
1216 					 DMA_TO_DEVICE);
1217 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1218 			dma_unmap_page(eth->dma_dev,
1219 				       dma_unmap_addr(tx_buf, dma_addr0),
1220 				       dma_unmap_len(tx_buf, dma_len0),
1221 				       DMA_TO_DEVICE);
1222 		}
1223 	} else {
1224 		if (dma_unmap_len(tx_buf, dma_len0)) {
1225 			dma_unmap_page(eth->dma_dev,
1226 				       dma_unmap_addr(tx_buf, dma_addr0),
1227 				       dma_unmap_len(tx_buf, dma_len0),
1228 				       DMA_TO_DEVICE);
1229 		}
1230 
1231 		if (dma_unmap_len(tx_buf, dma_len1)) {
1232 			dma_unmap_page(eth->dma_dev,
1233 				       dma_unmap_addr(tx_buf, dma_addr1),
1234 				       dma_unmap_len(tx_buf, dma_len1),
1235 				       DMA_TO_DEVICE);
1236 		}
1237 	}
1238 
1239 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1240 		if (tx_buf->type == MTK_TYPE_SKB) {
1241 			struct sk_buff *skb = tx_buf->data;
1242 
1243 			if (napi)
1244 				napi_consume_skb(skb, napi);
1245 			else
1246 				dev_kfree_skb_any(skb);
1247 		} else {
1248 			struct xdp_frame *xdpf = tx_buf->data;
1249 
1250 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1251 				xdp_return_frame_rx_napi(xdpf);
1252 			else if (bq)
1253 				xdp_return_frame_bulk(xdpf, bq);
1254 			else
1255 				xdp_return_frame(xdpf);
1256 		}
1257 	}
1258 	tx_buf->flags = 0;
1259 	tx_buf->data = NULL;
1260 }
1261 
1262 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1263 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1264 			 size_t size, int idx)
1265 {
1266 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1267 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1268 		dma_unmap_len_set(tx_buf, dma_len0, size);
1269 	} else {
1270 		if (idx & 1) {
1271 			txd->txd3 = mapped_addr;
1272 			txd->txd2 |= TX_DMA_PLEN1(size);
1273 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1274 			dma_unmap_len_set(tx_buf, dma_len1, size);
1275 		} else {
1276 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1277 			txd->txd1 = mapped_addr;
1278 			txd->txd2 = TX_DMA_PLEN0(size);
1279 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1280 			dma_unmap_len_set(tx_buf, dma_len0, size);
1281 		}
1282 	}
1283 }
1284 
1285 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1286 				   struct mtk_tx_dma_desc_info *info)
1287 {
1288 	struct mtk_mac *mac = netdev_priv(dev);
1289 	struct mtk_eth *eth = mac->hw;
1290 	struct mtk_tx_dma *desc = txd;
1291 	u32 data;
1292 
1293 	WRITE_ONCE(desc->txd1, info->addr);
1294 
1295 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1296 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1297 	if (info->last)
1298 		data |= TX_DMA_LS0;
1299 	WRITE_ONCE(desc->txd3, data);
1300 
1301 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1302 	if (info->first) {
1303 		if (info->gso)
1304 			data |= TX_DMA_TSO;
1305 		/* tx checksum offload */
1306 		if (info->csum)
1307 			data |= TX_DMA_CHKSUM;
1308 		/* vlan header offload */
1309 		if (info->vlan)
1310 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1311 	}
1312 	WRITE_ONCE(desc->txd4, data);
1313 }
1314 
1315 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1316 				   struct mtk_tx_dma_desc_info *info)
1317 {
1318 	struct mtk_mac *mac = netdev_priv(dev);
1319 	struct mtk_tx_dma_v2 *desc = txd;
1320 	struct mtk_eth *eth = mac->hw;
1321 	u32 data;
1322 
1323 	WRITE_ONCE(desc->txd1, info->addr);
1324 
1325 	data = TX_DMA_PLEN0(info->size);
1326 	if (info->last)
1327 		data |= TX_DMA_LS0;
1328 	WRITE_ONCE(desc->txd3, data);
1329 
1330 	 /* set forward port */
1331 	switch (mac->id) {
1332 	case MTK_GMAC1_ID:
1333 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1334 		break;
1335 	case MTK_GMAC2_ID:
1336 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1337 		break;
1338 	case MTK_GMAC3_ID:
1339 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1340 		break;
1341 	}
1342 
1343 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1344 	WRITE_ONCE(desc->txd4, data);
1345 
1346 	data = 0;
1347 	if (info->first) {
1348 		if (info->gso)
1349 			data |= TX_DMA_TSO_V2;
1350 		/* tx checksum offload */
1351 		if (info->csum)
1352 			data |= TX_DMA_CHKSUM_V2;
1353 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1354 			data |= TX_DMA_SPTAG_V3;
1355 	}
1356 	WRITE_ONCE(desc->txd5, data);
1357 
1358 	data = 0;
1359 	if (info->first && info->vlan)
1360 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1361 	WRITE_ONCE(desc->txd6, data);
1362 
1363 	WRITE_ONCE(desc->txd7, 0);
1364 	WRITE_ONCE(desc->txd8, 0);
1365 }
1366 
1367 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1368 				struct mtk_tx_dma_desc_info *info)
1369 {
1370 	struct mtk_mac *mac = netdev_priv(dev);
1371 	struct mtk_eth *eth = mac->hw;
1372 
1373 	if (mtk_is_netsys_v2_or_greater(eth))
1374 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1375 	else
1376 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1377 }
1378 
1379 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1380 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1381 {
1382 	struct mtk_tx_dma_desc_info txd_info = {
1383 		.size = skb_headlen(skb),
1384 		.gso = gso,
1385 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1386 		.vlan = skb_vlan_tag_present(skb),
1387 		.qid = skb_get_queue_mapping(skb),
1388 		.vlan_tci = skb_vlan_tag_get(skb),
1389 		.first = true,
1390 		.last = !skb_is_nonlinear(skb),
1391 	};
1392 	struct netdev_queue *txq;
1393 	struct mtk_mac *mac = netdev_priv(dev);
1394 	struct mtk_eth *eth = mac->hw;
1395 	const struct mtk_soc_data *soc = eth->soc;
1396 	struct mtk_tx_dma *itxd, *txd;
1397 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1398 	struct mtk_tx_buf *itx_buf, *tx_buf;
1399 	int i, n_desc = 1;
1400 	int queue = skb_get_queue_mapping(skb);
1401 	int k = 0;
1402 
1403 	txq = netdev_get_tx_queue(dev, queue);
1404 	itxd = ring->next_free;
1405 	itxd_pdma = qdma_to_pdma(ring, itxd);
1406 	if (itxd == ring->last_free)
1407 		return -ENOMEM;
1408 
1409 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1410 	memset(itx_buf, 0, sizeof(*itx_buf));
1411 
1412 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1413 				       DMA_TO_DEVICE);
1414 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1415 		return -ENOMEM;
1416 
1417 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1418 
1419 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1420 	itx_buf->mac_id = mac->id;
1421 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1422 		     k++);
1423 
1424 	/* TX SG offload */
1425 	txd = itxd;
1426 	txd_pdma = qdma_to_pdma(ring, txd);
1427 
1428 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1429 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1430 		unsigned int offset = 0;
1431 		int frag_size = skb_frag_size(frag);
1432 
1433 		while (frag_size) {
1434 			bool new_desc = true;
1435 
1436 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1437 			    (i & 0x1)) {
1438 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1439 				txd_pdma = qdma_to_pdma(ring, txd);
1440 				if (txd == ring->last_free)
1441 					goto err_dma;
1442 
1443 				n_desc++;
1444 			} else {
1445 				new_desc = false;
1446 			}
1447 
1448 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1449 			txd_info.size = min_t(unsigned int, frag_size,
1450 					      soc->txrx.dma_max_len);
1451 			txd_info.qid = queue;
1452 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1453 					!(frag_size - txd_info.size);
1454 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1455 							 offset, txd_info.size,
1456 							 DMA_TO_DEVICE);
1457 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1458 				goto err_dma;
1459 
1460 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1461 
1462 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1463 						    soc->txrx.txd_size);
1464 			if (new_desc)
1465 				memset(tx_buf, 0, sizeof(*tx_buf));
1466 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1467 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1468 			tx_buf->mac_id = mac->id;
1469 
1470 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1471 				     txd_info.size, k++);
1472 
1473 			frag_size -= txd_info.size;
1474 			offset += txd_info.size;
1475 		}
1476 	}
1477 
1478 	/* store skb to cleanup */
1479 	itx_buf->type = MTK_TYPE_SKB;
1480 	itx_buf->data = skb;
1481 
1482 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1483 		if (k & 0x1)
1484 			txd_pdma->txd2 |= TX_DMA_LS0;
1485 		else
1486 			txd_pdma->txd2 |= TX_DMA_LS1;
1487 	}
1488 
1489 	netdev_tx_sent_queue(txq, skb->len);
1490 	skb_tx_timestamp(skb);
1491 
1492 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1493 	atomic_sub(n_desc, &ring->free_count);
1494 
1495 	/* make sure that all changes to the dma ring are flushed before we
1496 	 * continue
1497 	 */
1498 	wmb();
1499 
1500 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1501 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1502 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1503 	} else {
1504 		int next_idx;
1505 
1506 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1507 					 ring->dma_size);
1508 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1509 	}
1510 
1511 	return 0;
1512 
1513 err_dma:
1514 	do {
1515 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1516 
1517 		/* unmap dma */
1518 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1519 
1520 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1521 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1522 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1523 
1524 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1525 		itxd_pdma = qdma_to_pdma(ring, itxd);
1526 	} while (itxd != txd);
1527 
1528 	return -ENOMEM;
1529 }
1530 
1531 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1532 {
1533 	int i, nfrags = 1;
1534 	skb_frag_t *frag;
1535 
1536 	if (skb_is_gso(skb)) {
1537 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1538 			frag = &skb_shinfo(skb)->frags[i];
1539 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1540 					       eth->soc->txrx.dma_max_len);
1541 		}
1542 	} else {
1543 		nfrags += skb_shinfo(skb)->nr_frags;
1544 	}
1545 
1546 	return nfrags;
1547 }
1548 
1549 static int mtk_queue_stopped(struct mtk_eth *eth)
1550 {
1551 	int i;
1552 
1553 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1554 		if (!eth->netdev[i])
1555 			continue;
1556 		if (netif_queue_stopped(eth->netdev[i]))
1557 			return 1;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
1563 static void mtk_wake_queue(struct mtk_eth *eth)
1564 {
1565 	int i;
1566 
1567 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1568 		if (!eth->netdev[i])
1569 			continue;
1570 		netif_tx_wake_all_queues(eth->netdev[i]);
1571 	}
1572 }
1573 
1574 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1575 {
1576 	struct mtk_mac *mac = netdev_priv(dev);
1577 	struct mtk_eth *eth = mac->hw;
1578 	struct mtk_tx_ring *ring = &eth->tx_ring;
1579 	struct net_device_stats *stats = &dev->stats;
1580 	bool gso = false;
1581 	int tx_num;
1582 
1583 	/* normally we can rely on the stack not calling this more than once,
1584 	 * however we have 2 queues running on the same ring so we need to lock
1585 	 * the ring access
1586 	 */
1587 	spin_lock(&eth->page_lock);
1588 
1589 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1590 		goto drop;
1591 
1592 	tx_num = mtk_cal_txd_req(eth, skb);
1593 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1594 		netif_tx_stop_all_queues(dev);
1595 		netif_err(eth, tx_queued, dev,
1596 			  "Tx Ring full when queue awake!\n");
1597 		spin_unlock(&eth->page_lock);
1598 		return NETDEV_TX_BUSY;
1599 	}
1600 
1601 	/* TSO: fill MSS info in tcp checksum field */
1602 	if (skb_is_gso(skb)) {
1603 		if (skb_cow_head(skb, 0)) {
1604 			netif_warn(eth, tx_err, dev,
1605 				   "GSO expand head fail.\n");
1606 			goto drop;
1607 		}
1608 
1609 		if (skb_shinfo(skb)->gso_type &
1610 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1611 			gso = true;
1612 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1613 		}
1614 	}
1615 
1616 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1617 		goto drop;
1618 
1619 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1620 		netif_tx_stop_all_queues(dev);
1621 
1622 	spin_unlock(&eth->page_lock);
1623 
1624 	return NETDEV_TX_OK;
1625 
1626 drop:
1627 	spin_unlock(&eth->page_lock);
1628 	stats->tx_dropped++;
1629 	dev_kfree_skb_any(skb);
1630 	return NETDEV_TX_OK;
1631 }
1632 
1633 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1634 {
1635 	int i;
1636 	struct mtk_rx_ring *ring;
1637 	int idx;
1638 
1639 	if (!eth->hwlro)
1640 		return &eth->rx_ring[0];
1641 
1642 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1643 		struct mtk_rx_dma *rxd;
1644 
1645 		ring = &eth->rx_ring[i];
1646 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1647 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1648 		if (rxd->rxd2 & RX_DMA_DONE) {
1649 			ring->calc_idx_update = true;
1650 			return ring;
1651 		}
1652 	}
1653 
1654 	return NULL;
1655 }
1656 
1657 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1658 {
1659 	struct mtk_rx_ring *ring;
1660 	int i;
1661 
1662 	if (!eth->hwlro) {
1663 		ring = &eth->rx_ring[0];
1664 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1665 	} else {
1666 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1667 			ring = &eth->rx_ring[i];
1668 			if (ring->calc_idx_update) {
1669 				ring->calc_idx_update = false;
1670 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1671 			}
1672 		}
1673 	}
1674 }
1675 
1676 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1677 {
1678 	return mtk_is_netsys_v2_or_greater(eth);
1679 }
1680 
1681 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1682 					      struct xdp_rxq_info *xdp_q,
1683 					      int id, int size)
1684 {
1685 	struct page_pool_params pp_params = {
1686 		.order = 0,
1687 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1688 		.pool_size = size,
1689 		.nid = NUMA_NO_NODE,
1690 		.dev = eth->dma_dev,
1691 		.offset = MTK_PP_HEADROOM,
1692 		.max_len = MTK_PP_MAX_BUF_SIZE,
1693 	};
1694 	struct page_pool *pp;
1695 	int err;
1696 
1697 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1698 							  : DMA_FROM_DEVICE;
1699 	pp = page_pool_create(&pp_params);
1700 	if (IS_ERR(pp))
1701 		return pp;
1702 
1703 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
1704 				 eth->rx_napi.napi_id, PAGE_SIZE);
1705 	if (err < 0)
1706 		goto err_free_pp;
1707 
1708 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1709 	if (err)
1710 		goto err_unregister_rxq;
1711 
1712 	return pp;
1713 
1714 err_unregister_rxq:
1715 	xdp_rxq_info_unreg(xdp_q);
1716 err_free_pp:
1717 	page_pool_destroy(pp);
1718 
1719 	return ERR_PTR(err);
1720 }
1721 
1722 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1723 				    gfp_t gfp_mask)
1724 {
1725 	struct page *page;
1726 
1727 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1728 	if (!page)
1729 		return NULL;
1730 
1731 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1732 	return page_address(page);
1733 }
1734 
1735 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1736 {
1737 	if (ring->page_pool)
1738 		page_pool_put_full_page(ring->page_pool,
1739 					virt_to_head_page(data), napi);
1740 	else
1741 		skb_free_frag(data);
1742 }
1743 
1744 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1745 			     struct mtk_tx_dma_desc_info *txd_info,
1746 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1747 			     void *data, u16 headroom, int index, bool dma_map)
1748 {
1749 	struct mtk_tx_ring *ring = &eth->tx_ring;
1750 	struct mtk_mac *mac = netdev_priv(dev);
1751 	struct mtk_tx_dma *txd_pdma;
1752 
1753 	if (dma_map) {  /* ndo_xdp_xmit */
1754 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1755 						txd_info->size, DMA_TO_DEVICE);
1756 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1757 			return -ENOMEM;
1758 
1759 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1760 	} else {
1761 		struct page *page = virt_to_head_page(data);
1762 
1763 		txd_info->addr = page_pool_get_dma_addr(page) +
1764 				 sizeof(struct xdp_frame) + headroom;
1765 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1766 					   txd_info->size, DMA_BIDIRECTIONAL);
1767 	}
1768 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1769 
1770 	tx_buf->mac_id = mac->id;
1771 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1772 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1773 
1774 	txd_pdma = qdma_to_pdma(ring, txd);
1775 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1776 		     index);
1777 
1778 	return 0;
1779 }
1780 
1781 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1782 				struct net_device *dev, bool dma_map)
1783 {
1784 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1785 	const struct mtk_soc_data *soc = eth->soc;
1786 	struct mtk_tx_ring *ring = &eth->tx_ring;
1787 	struct mtk_mac *mac = netdev_priv(dev);
1788 	struct mtk_tx_dma_desc_info txd_info = {
1789 		.size	= xdpf->len,
1790 		.first	= true,
1791 		.last	= !xdp_frame_has_frags(xdpf),
1792 		.qid	= mac->id,
1793 	};
1794 	int err, index = 0, n_desc = 1, nr_frags;
1795 	struct mtk_tx_buf *htx_buf, *tx_buf;
1796 	struct mtk_tx_dma *htxd, *txd;
1797 	void *data = xdpf->data;
1798 
1799 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1800 		return -EBUSY;
1801 
1802 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1803 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1804 		return -EBUSY;
1805 
1806 	spin_lock(&eth->page_lock);
1807 
1808 	txd = ring->next_free;
1809 	if (txd == ring->last_free) {
1810 		spin_unlock(&eth->page_lock);
1811 		return -ENOMEM;
1812 	}
1813 	htxd = txd;
1814 
1815 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1816 	memset(tx_buf, 0, sizeof(*tx_buf));
1817 	htx_buf = tx_buf;
1818 
1819 	for (;;) {
1820 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1821 					data, xdpf->headroom, index, dma_map);
1822 		if (err < 0)
1823 			goto unmap;
1824 
1825 		if (txd_info.last)
1826 			break;
1827 
1828 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1829 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1830 			if (txd == ring->last_free)
1831 				goto unmap;
1832 
1833 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1834 						    soc->txrx.txd_size);
1835 			memset(tx_buf, 0, sizeof(*tx_buf));
1836 			n_desc++;
1837 		}
1838 
1839 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1840 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1841 		txd_info.last = index + 1 == nr_frags;
1842 		txd_info.qid = mac->id;
1843 		data = skb_frag_address(&sinfo->frags[index]);
1844 
1845 		index++;
1846 	}
1847 	/* store xdpf for cleanup */
1848 	htx_buf->data = xdpf;
1849 
1850 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1851 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1852 
1853 		if (index & 1)
1854 			txd_pdma->txd2 |= TX_DMA_LS0;
1855 		else
1856 			txd_pdma->txd2 |= TX_DMA_LS1;
1857 	}
1858 
1859 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1860 	atomic_sub(n_desc, &ring->free_count);
1861 
1862 	/* make sure that all changes to the dma ring are flushed before we
1863 	 * continue
1864 	 */
1865 	wmb();
1866 
1867 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1868 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1869 	} else {
1870 		int idx;
1871 
1872 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1873 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1874 			MT7628_TX_CTX_IDX0);
1875 	}
1876 
1877 	spin_unlock(&eth->page_lock);
1878 
1879 	return 0;
1880 
1881 unmap:
1882 	while (htxd != txd) {
1883 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1884 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1885 
1886 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1887 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1888 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1889 
1890 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1891 		}
1892 
1893 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1894 	}
1895 
1896 	spin_unlock(&eth->page_lock);
1897 
1898 	return err;
1899 }
1900 
1901 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1902 			struct xdp_frame **frames, u32 flags)
1903 {
1904 	struct mtk_mac *mac = netdev_priv(dev);
1905 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1906 	struct mtk_eth *eth = mac->hw;
1907 	int i, nxmit = 0;
1908 
1909 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1910 		return -EINVAL;
1911 
1912 	for (i = 0; i < num_frame; i++) {
1913 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1914 			break;
1915 		nxmit++;
1916 	}
1917 
1918 	u64_stats_update_begin(&hw_stats->syncp);
1919 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1920 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1921 	u64_stats_update_end(&hw_stats->syncp);
1922 
1923 	return nxmit;
1924 }
1925 
1926 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1927 		       struct xdp_buff *xdp, struct net_device *dev)
1928 {
1929 	struct mtk_mac *mac = netdev_priv(dev);
1930 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1931 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1932 	struct bpf_prog *prog;
1933 	u32 act = XDP_PASS;
1934 
1935 	rcu_read_lock();
1936 
1937 	prog = rcu_dereference(eth->prog);
1938 	if (!prog)
1939 		goto out;
1940 
1941 	act = bpf_prog_run_xdp(prog, xdp);
1942 	switch (act) {
1943 	case XDP_PASS:
1944 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1945 		goto update_stats;
1946 	case XDP_REDIRECT:
1947 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1948 			act = XDP_DROP;
1949 			break;
1950 		}
1951 
1952 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1953 		goto update_stats;
1954 	case XDP_TX: {
1955 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1956 
1957 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1958 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1959 			act = XDP_DROP;
1960 			break;
1961 		}
1962 
1963 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1964 		goto update_stats;
1965 	}
1966 	default:
1967 		bpf_warn_invalid_xdp_action(dev, prog, act);
1968 		fallthrough;
1969 	case XDP_ABORTED:
1970 		trace_xdp_exception(dev, prog, act);
1971 		fallthrough;
1972 	case XDP_DROP:
1973 		break;
1974 	}
1975 
1976 	page_pool_put_full_page(ring->page_pool,
1977 				virt_to_head_page(xdp->data), true);
1978 
1979 update_stats:
1980 	u64_stats_update_begin(&hw_stats->syncp);
1981 	*count = *count + 1;
1982 	u64_stats_update_end(&hw_stats->syncp);
1983 out:
1984 	rcu_read_unlock();
1985 
1986 	return act;
1987 }
1988 
1989 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1990 		       struct mtk_eth *eth)
1991 {
1992 	struct dim_sample dim_sample = {};
1993 	struct mtk_rx_ring *ring;
1994 	bool xdp_flush = false;
1995 	int idx;
1996 	struct sk_buff *skb;
1997 	u8 *data, *new_data;
1998 	struct mtk_rx_dma_v2 *rxd, trxd;
1999 	int done = 0, bytes = 0;
2000 
2001 	while (done < budget) {
2002 		unsigned int pktlen, *rxdcsum;
2003 		struct net_device *netdev;
2004 		dma_addr_t dma_addr;
2005 		u32 hash, reason;
2006 		int mac = 0;
2007 
2008 		ring = mtk_get_rx_ring(eth);
2009 		if (unlikely(!ring))
2010 			goto rx_done;
2011 
2012 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2013 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2014 		data = ring->data[idx];
2015 
2016 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2017 			break;
2018 
2019 		/* find out which mac the packet come from. values start at 1 */
2020 		if (mtk_is_netsys_v2_or_greater(eth)) {
2021 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2022 
2023 			switch (val) {
2024 			case PSE_GDM1_PORT:
2025 			case PSE_GDM2_PORT:
2026 				mac = val - 1;
2027 				break;
2028 			case PSE_GDM3_PORT:
2029 				mac = MTK_GMAC3_ID;
2030 				break;
2031 			default:
2032 				break;
2033 			}
2034 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2035 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2036 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2037 		}
2038 
2039 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2040 			     !eth->netdev[mac]))
2041 			goto release_desc;
2042 
2043 		netdev = eth->netdev[mac];
2044 
2045 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2046 			goto release_desc;
2047 
2048 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2049 
2050 		/* alloc new buffer */
2051 		if (ring->page_pool) {
2052 			struct page *page = virt_to_head_page(data);
2053 			struct xdp_buff xdp;
2054 			u32 ret;
2055 
2056 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2057 							  &dma_addr,
2058 							  GFP_ATOMIC);
2059 			if (unlikely(!new_data)) {
2060 				netdev->stats.rx_dropped++;
2061 				goto release_desc;
2062 			}
2063 
2064 			dma_sync_single_for_cpu(eth->dma_dev,
2065 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2066 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2067 
2068 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2069 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2070 					 false);
2071 			xdp_buff_clear_frags_flag(&xdp);
2072 
2073 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2074 			if (ret == XDP_REDIRECT)
2075 				xdp_flush = true;
2076 
2077 			if (ret != XDP_PASS)
2078 				goto skip_rx;
2079 
2080 			skb = build_skb(data, PAGE_SIZE);
2081 			if (unlikely(!skb)) {
2082 				page_pool_put_full_page(ring->page_pool,
2083 							page, true);
2084 				netdev->stats.rx_dropped++;
2085 				goto skip_rx;
2086 			}
2087 
2088 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2089 			skb_put(skb, xdp.data_end - xdp.data);
2090 			skb_mark_for_recycle(skb);
2091 		} else {
2092 			if (ring->frag_size <= PAGE_SIZE)
2093 				new_data = napi_alloc_frag(ring->frag_size);
2094 			else
2095 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2096 
2097 			if (unlikely(!new_data)) {
2098 				netdev->stats.rx_dropped++;
2099 				goto release_desc;
2100 			}
2101 
2102 			dma_addr = dma_map_single(eth->dma_dev,
2103 				new_data + NET_SKB_PAD + eth->ip_align,
2104 				ring->buf_size, DMA_FROM_DEVICE);
2105 			if (unlikely(dma_mapping_error(eth->dma_dev,
2106 						       dma_addr))) {
2107 				skb_free_frag(new_data);
2108 				netdev->stats.rx_dropped++;
2109 				goto release_desc;
2110 			}
2111 
2112 			dma_unmap_single(eth->dma_dev, trxd.rxd1,
2113 					 ring->buf_size, DMA_FROM_DEVICE);
2114 
2115 			skb = build_skb(data, ring->frag_size);
2116 			if (unlikely(!skb)) {
2117 				netdev->stats.rx_dropped++;
2118 				skb_free_frag(data);
2119 				goto skip_rx;
2120 			}
2121 
2122 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2123 			skb_put(skb, pktlen);
2124 		}
2125 
2126 		skb->dev = netdev;
2127 		bytes += skb->len;
2128 
2129 		if (mtk_is_netsys_v2_or_greater(eth)) {
2130 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2131 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2132 			if (hash != MTK_RXD5_FOE_ENTRY)
2133 				skb_set_hash(skb, jhash_1word(hash, 0),
2134 					     PKT_HASH_TYPE_L4);
2135 			rxdcsum = &trxd.rxd3;
2136 		} else {
2137 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2138 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2139 			if (hash != MTK_RXD4_FOE_ENTRY)
2140 				skb_set_hash(skb, jhash_1word(hash, 0),
2141 					     PKT_HASH_TYPE_L4);
2142 			rxdcsum = &trxd.rxd4;
2143 		}
2144 
2145 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2146 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2147 		else
2148 			skb_checksum_none_assert(skb);
2149 		skb->protocol = eth_type_trans(skb, netdev);
2150 
2151 		/* When using VLAN untagging in combination with DSA, the
2152 		 * hardware treats the MTK special tag as a VLAN and untags it.
2153 		 */
2154 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2155 		    netdev_uses_dsa(netdev)) {
2156 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2157 
2158 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2159 			    eth->dsa_meta[port])
2160 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2161 		}
2162 
2163 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2164 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2165 
2166 		skb_record_rx_queue(skb, 0);
2167 		napi_gro_receive(napi, skb);
2168 
2169 skip_rx:
2170 		ring->data[idx] = new_data;
2171 		rxd->rxd1 = (unsigned int)dma_addr;
2172 release_desc:
2173 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2174 			rxd->rxd2 = RX_DMA_LSO;
2175 		else
2176 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2177 
2178 		ring->calc_idx = idx;
2179 		done++;
2180 	}
2181 
2182 rx_done:
2183 	if (done) {
2184 		/* make sure that all changes to the dma ring are flushed before
2185 		 * we continue
2186 		 */
2187 		wmb();
2188 		mtk_update_rx_cpu_idx(eth);
2189 	}
2190 
2191 	eth->rx_packets += done;
2192 	eth->rx_bytes += bytes;
2193 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2194 			  &dim_sample);
2195 	net_dim(&eth->rx_dim, dim_sample);
2196 
2197 	if (xdp_flush)
2198 		xdp_do_flush_map();
2199 
2200 	return done;
2201 }
2202 
2203 struct mtk_poll_state {
2204     struct netdev_queue *txq;
2205     unsigned int total;
2206     unsigned int done;
2207     unsigned int bytes;
2208 };
2209 
2210 static void
2211 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2212 		 struct sk_buff *skb)
2213 {
2214 	struct netdev_queue *txq;
2215 	struct net_device *dev;
2216 	unsigned int bytes = skb->len;
2217 
2218 	state->total++;
2219 	eth->tx_packets++;
2220 	eth->tx_bytes += bytes;
2221 
2222 	dev = eth->netdev[mac];
2223 	if (!dev)
2224 		return;
2225 
2226 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2227 	if (state->txq == txq) {
2228 		state->done++;
2229 		state->bytes += bytes;
2230 		return;
2231 	}
2232 
2233 	if (state->txq)
2234 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2235 
2236 	state->txq = txq;
2237 	state->done = 1;
2238 	state->bytes = bytes;
2239 }
2240 
2241 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2242 			    struct mtk_poll_state *state)
2243 {
2244 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2245 	struct mtk_tx_ring *ring = &eth->tx_ring;
2246 	struct mtk_tx_buf *tx_buf;
2247 	struct xdp_frame_bulk bq;
2248 	struct mtk_tx_dma *desc;
2249 	u32 cpu, dma;
2250 
2251 	cpu = ring->last_free_ptr;
2252 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2253 
2254 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2255 	xdp_frame_bulk_init(&bq);
2256 
2257 	while ((cpu != dma) && budget) {
2258 		u32 next_cpu = desc->txd2;
2259 
2260 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2261 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2262 			break;
2263 
2264 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2265 					    eth->soc->txrx.txd_size);
2266 		if (!tx_buf->data)
2267 			break;
2268 
2269 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2270 			if (tx_buf->type == MTK_TYPE_SKB)
2271 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2272 						 tx_buf->data);
2273 
2274 			budget--;
2275 		}
2276 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2277 
2278 		ring->last_free = desc;
2279 		atomic_inc(&ring->free_count);
2280 
2281 		cpu = next_cpu;
2282 	}
2283 	xdp_flush_frame_bulk(&bq);
2284 
2285 	ring->last_free_ptr = cpu;
2286 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2287 
2288 	return budget;
2289 }
2290 
2291 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2292 			    struct mtk_poll_state *state)
2293 {
2294 	struct mtk_tx_ring *ring = &eth->tx_ring;
2295 	struct mtk_tx_buf *tx_buf;
2296 	struct xdp_frame_bulk bq;
2297 	struct mtk_tx_dma *desc;
2298 	u32 cpu, dma;
2299 
2300 	cpu = ring->cpu_idx;
2301 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2302 	xdp_frame_bulk_init(&bq);
2303 
2304 	while ((cpu != dma) && budget) {
2305 		tx_buf = &ring->buf[cpu];
2306 		if (!tx_buf->data)
2307 			break;
2308 
2309 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2310 			if (tx_buf->type == MTK_TYPE_SKB)
2311 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2312 			budget--;
2313 		}
2314 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2315 
2316 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2317 		ring->last_free = desc;
2318 		atomic_inc(&ring->free_count);
2319 
2320 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2321 	}
2322 	xdp_flush_frame_bulk(&bq);
2323 
2324 	ring->cpu_idx = cpu;
2325 
2326 	return budget;
2327 }
2328 
2329 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2330 {
2331 	struct mtk_tx_ring *ring = &eth->tx_ring;
2332 	struct dim_sample dim_sample = {};
2333 	struct mtk_poll_state state = {};
2334 
2335 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2336 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2337 	else
2338 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2339 
2340 	if (state.txq)
2341 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2342 
2343 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2344 			  &dim_sample);
2345 	net_dim(&eth->tx_dim, dim_sample);
2346 
2347 	if (mtk_queue_stopped(eth) &&
2348 	    (atomic_read(&ring->free_count) > ring->thresh))
2349 		mtk_wake_queue(eth);
2350 
2351 	return state.total;
2352 }
2353 
2354 static void mtk_handle_status_irq(struct mtk_eth *eth)
2355 {
2356 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2357 
2358 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2359 		mtk_stats_update(eth);
2360 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2361 			MTK_INT_STATUS2);
2362 	}
2363 }
2364 
2365 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2366 {
2367 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2368 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2369 	int tx_done = 0;
2370 
2371 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2372 		mtk_handle_status_irq(eth);
2373 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2374 	tx_done = mtk_poll_tx(eth, budget);
2375 
2376 	if (unlikely(netif_msg_intr(eth))) {
2377 		dev_info(eth->dev,
2378 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2379 			 mtk_r32(eth, reg_map->tx_irq_status),
2380 			 mtk_r32(eth, reg_map->tx_irq_mask));
2381 	}
2382 
2383 	if (tx_done == budget)
2384 		return budget;
2385 
2386 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2387 		return budget;
2388 
2389 	if (napi_complete_done(napi, tx_done))
2390 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2391 
2392 	return tx_done;
2393 }
2394 
2395 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2396 {
2397 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2398 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2399 	int rx_done_total = 0;
2400 
2401 	mtk_handle_status_irq(eth);
2402 
2403 	do {
2404 		int rx_done;
2405 
2406 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2407 			reg_map->pdma.irq_status);
2408 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2409 		rx_done_total += rx_done;
2410 
2411 		if (unlikely(netif_msg_intr(eth))) {
2412 			dev_info(eth->dev,
2413 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2414 				 mtk_r32(eth, reg_map->pdma.irq_status),
2415 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2416 		}
2417 
2418 		if (rx_done_total == budget)
2419 			return budget;
2420 
2421 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2422 		 eth->soc->txrx.rx_irq_done_mask);
2423 
2424 	if (napi_complete_done(napi, rx_done_total))
2425 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2426 
2427 	return rx_done_total;
2428 }
2429 
2430 static int mtk_tx_alloc(struct mtk_eth *eth)
2431 {
2432 	const struct mtk_soc_data *soc = eth->soc;
2433 	struct mtk_tx_ring *ring = &eth->tx_ring;
2434 	int i, sz = soc->txrx.txd_size;
2435 	struct mtk_tx_dma_v2 *txd;
2436 	int ring_size;
2437 	u32 ofs, val;
2438 
2439 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2440 		ring_size = MTK_QDMA_RING_SIZE;
2441 	else
2442 		ring_size = MTK_DMA_SIZE;
2443 
2444 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2445 			       GFP_KERNEL);
2446 	if (!ring->buf)
2447 		goto no_tx_mem;
2448 
2449 	ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2450 				       &ring->phys, GFP_KERNEL);
2451 	if (!ring->dma)
2452 		goto no_tx_mem;
2453 
2454 	for (i = 0; i < ring_size; i++) {
2455 		int next = (i + 1) % ring_size;
2456 		u32 next_ptr = ring->phys + next * sz;
2457 
2458 		txd = ring->dma + i * sz;
2459 		txd->txd2 = next_ptr;
2460 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2461 		txd->txd4 = 0;
2462 		if (mtk_is_netsys_v2_or_greater(eth)) {
2463 			txd->txd5 = 0;
2464 			txd->txd6 = 0;
2465 			txd->txd7 = 0;
2466 			txd->txd8 = 0;
2467 		}
2468 	}
2469 
2470 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2471 	 * only as the framework. The real HW descriptors are the PDMA
2472 	 * descriptors in ring->dma_pdma.
2473 	 */
2474 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2475 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2476 						    &ring->phys_pdma, GFP_KERNEL);
2477 		if (!ring->dma_pdma)
2478 			goto no_tx_mem;
2479 
2480 		for (i = 0; i < ring_size; i++) {
2481 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2482 			ring->dma_pdma[i].txd4 = 0;
2483 		}
2484 	}
2485 
2486 	ring->dma_size = ring_size;
2487 	atomic_set(&ring->free_count, ring_size - 2);
2488 	ring->next_free = ring->dma;
2489 	ring->last_free = (void *)txd;
2490 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2491 	ring->thresh = MAX_SKB_FRAGS;
2492 
2493 	/* make sure that all changes to the dma ring are flushed before we
2494 	 * continue
2495 	 */
2496 	wmb();
2497 
2498 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2499 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2500 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2501 		mtk_w32(eth,
2502 			ring->phys + ((ring_size - 1) * sz),
2503 			soc->reg_map->qdma.crx_ptr);
2504 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2505 
2506 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2507 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2508 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2509 
2510 			val = MTK_QTX_SCH_MIN_RATE_EN |
2511 			      /* minimum: 10 Mbps */
2512 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2513 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2514 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2515 			if (mtk_is_netsys_v1(eth))
2516 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2517 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2518 			ofs += MTK_QTX_OFFSET;
2519 		}
2520 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2521 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2522 		if (mtk_is_netsys_v2_or_greater(eth))
2523 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2524 	} else {
2525 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2526 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2527 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2528 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2529 	}
2530 
2531 	return 0;
2532 
2533 no_tx_mem:
2534 	return -ENOMEM;
2535 }
2536 
2537 static void mtk_tx_clean(struct mtk_eth *eth)
2538 {
2539 	const struct mtk_soc_data *soc = eth->soc;
2540 	struct mtk_tx_ring *ring = &eth->tx_ring;
2541 	int i;
2542 
2543 	if (ring->buf) {
2544 		for (i = 0; i < ring->dma_size; i++)
2545 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2546 		kfree(ring->buf);
2547 		ring->buf = NULL;
2548 	}
2549 
2550 	if (ring->dma) {
2551 		dma_free_coherent(eth->dma_dev,
2552 				  ring->dma_size * soc->txrx.txd_size,
2553 				  ring->dma, ring->phys);
2554 		ring->dma = NULL;
2555 	}
2556 
2557 	if (ring->dma_pdma) {
2558 		dma_free_coherent(eth->dma_dev,
2559 				  ring->dma_size * soc->txrx.txd_size,
2560 				  ring->dma_pdma, ring->phys_pdma);
2561 		ring->dma_pdma = NULL;
2562 	}
2563 }
2564 
2565 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2566 {
2567 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2568 	struct mtk_rx_ring *ring;
2569 	int rx_data_len, rx_dma_size;
2570 	int i;
2571 
2572 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2573 		if (ring_no)
2574 			return -EINVAL;
2575 		ring = &eth->rx_ring_qdma;
2576 	} else {
2577 		ring = &eth->rx_ring[ring_no];
2578 	}
2579 
2580 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2581 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2582 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2583 	} else {
2584 		rx_data_len = ETH_DATA_LEN;
2585 		rx_dma_size = MTK_DMA_SIZE;
2586 	}
2587 
2588 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2589 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2590 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2591 			     GFP_KERNEL);
2592 	if (!ring->data)
2593 		return -ENOMEM;
2594 
2595 	if (mtk_page_pool_enabled(eth)) {
2596 		struct page_pool *pp;
2597 
2598 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2599 					  rx_dma_size);
2600 		if (IS_ERR(pp))
2601 			return PTR_ERR(pp);
2602 
2603 		ring->page_pool = pp;
2604 	}
2605 
2606 	ring->dma = dma_alloc_coherent(eth->dma_dev,
2607 				       rx_dma_size * eth->soc->txrx.rxd_size,
2608 				       &ring->phys, GFP_KERNEL);
2609 	if (!ring->dma)
2610 		return -ENOMEM;
2611 
2612 	for (i = 0; i < rx_dma_size; i++) {
2613 		struct mtk_rx_dma_v2 *rxd;
2614 		dma_addr_t dma_addr;
2615 		void *data;
2616 
2617 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2618 		if (ring->page_pool) {
2619 			data = mtk_page_pool_get_buff(ring->page_pool,
2620 						      &dma_addr, GFP_KERNEL);
2621 			if (!data)
2622 				return -ENOMEM;
2623 		} else {
2624 			if (ring->frag_size <= PAGE_SIZE)
2625 				data = netdev_alloc_frag(ring->frag_size);
2626 			else
2627 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2628 
2629 			if (!data)
2630 				return -ENOMEM;
2631 
2632 			dma_addr = dma_map_single(eth->dma_dev,
2633 				data + NET_SKB_PAD + eth->ip_align,
2634 				ring->buf_size, DMA_FROM_DEVICE);
2635 			if (unlikely(dma_mapping_error(eth->dma_dev,
2636 						       dma_addr))) {
2637 				skb_free_frag(data);
2638 				return -ENOMEM;
2639 			}
2640 		}
2641 		rxd->rxd1 = (unsigned int)dma_addr;
2642 		ring->data[i] = data;
2643 
2644 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2645 			rxd->rxd2 = RX_DMA_LSO;
2646 		else
2647 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2648 
2649 		rxd->rxd3 = 0;
2650 		rxd->rxd4 = 0;
2651 		if (mtk_is_netsys_v2_or_greater(eth)) {
2652 			rxd->rxd5 = 0;
2653 			rxd->rxd6 = 0;
2654 			rxd->rxd7 = 0;
2655 			rxd->rxd8 = 0;
2656 		}
2657 	}
2658 
2659 	ring->dma_size = rx_dma_size;
2660 	ring->calc_idx_update = false;
2661 	ring->calc_idx = rx_dma_size - 1;
2662 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2663 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2664 				    ring_no * MTK_QRX_OFFSET;
2665 	else
2666 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2667 				    ring_no * MTK_QRX_OFFSET;
2668 	/* make sure that all changes to the dma ring are flushed before we
2669 	 * continue
2670 	 */
2671 	wmb();
2672 
2673 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2674 		mtk_w32(eth, ring->phys,
2675 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2676 		mtk_w32(eth, rx_dma_size,
2677 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2678 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2679 			reg_map->qdma.rst_idx);
2680 	} else {
2681 		mtk_w32(eth, ring->phys,
2682 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2683 		mtk_w32(eth, rx_dma_size,
2684 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2685 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2686 			reg_map->pdma.rst_idx);
2687 	}
2688 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2689 
2690 	return 0;
2691 }
2692 
2693 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2694 {
2695 	int i;
2696 
2697 	if (ring->data && ring->dma) {
2698 		for (i = 0; i < ring->dma_size; i++) {
2699 			struct mtk_rx_dma *rxd;
2700 
2701 			if (!ring->data[i])
2702 				continue;
2703 
2704 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2705 			if (!rxd->rxd1)
2706 				continue;
2707 
2708 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2709 					 ring->buf_size, DMA_FROM_DEVICE);
2710 			mtk_rx_put_buff(ring, ring->data[i], false);
2711 		}
2712 		kfree(ring->data);
2713 		ring->data = NULL;
2714 	}
2715 
2716 	if (ring->dma) {
2717 		dma_free_coherent(eth->dma_dev,
2718 				  ring->dma_size * eth->soc->txrx.rxd_size,
2719 				  ring->dma, ring->phys);
2720 		ring->dma = NULL;
2721 	}
2722 
2723 	if (ring->page_pool) {
2724 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2725 			xdp_rxq_info_unreg(&ring->xdp_q);
2726 		page_pool_destroy(ring->page_pool);
2727 		ring->page_pool = NULL;
2728 	}
2729 }
2730 
2731 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2732 {
2733 	int i;
2734 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2735 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2736 
2737 	/* set LRO rings to auto-learn modes */
2738 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2739 
2740 	/* validate LRO ring */
2741 	ring_ctrl_dw2 |= MTK_RING_VLD;
2742 
2743 	/* set AGE timer (unit: 20us) */
2744 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2745 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2746 
2747 	/* set max AGG timer (unit: 20us) */
2748 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2749 
2750 	/* set max LRO AGG count */
2751 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2752 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2753 
2754 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2755 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2756 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2757 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2758 	}
2759 
2760 	/* IPv4 checksum update enable */
2761 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2762 
2763 	/* switch priority comparison to packet count mode */
2764 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2765 
2766 	/* bandwidth threshold setting */
2767 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2768 
2769 	/* auto-learn score delta setting */
2770 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2771 
2772 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2773 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2774 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2775 
2776 	/* set HW LRO mode & the max aggregation count for rx packets */
2777 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2778 
2779 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2780 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2781 
2782 	/* enable HW LRO */
2783 	lro_ctrl_dw0 |= MTK_LRO_EN;
2784 
2785 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2786 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2787 
2788 	return 0;
2789 }
2790 
2791 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2792 {
2793 	int i;
2794 	u32 val;
2795 
2796 	/* relinquish lro rings, flush aggregated packets */
2797 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2798 
2799 	/* wait for relinquishments done */
2800 	for (i = 0; i < 10; i++) {
2801 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2802 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2803 			msleep(20);
2804 			continue;
2805 		}
2806 		break;
2807 	}
2808 
2809 	/* invalidate lro rings */
2810 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2811 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2812 
2813 	/* disable HW LRO */
2814 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2815 }
2816 
2817 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2818 {
2819 	u32 reg_val;
2820 
2821 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2822 
2823 	/* invalidate the IP setting */
2824 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2825 
2826 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2827 
2828 	/* validate the IP setting */
2829 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2830 }
2831 
2832 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2833 {
2834 	u32 reg_val;
2835 
2836 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2837 
2838 	/* invalidate the IP setting */
2839 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2840 
2841 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2842 }
2843 
2844 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2845 {
2846 	int cnt = 0;
2847 	int i;
2848 
2849 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2850 		if (mac->hwlro_ip[i])
2851 			cnt++;
2852 	}
2853 
2854 	return cnt;
2855 }
2856 
2857 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2858 				struct ethtool_rxnfc *cmd)
2859 {
2860 	struct ethtool_rx_flow_spec *fsp =
2861 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2862 	struct mtk_mac *mac = netdev_priv(dev);
2863 	struct mtk_eth *eth = mac->hw;
2864 	int hwlro_idx;
2865 
2866 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2867 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2868 	    (fsp->location > 1))
2869 		return -EINVAL;
2870 
2871 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2872 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2873 
2874 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2875 
2876 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2877 
2878 	return 0;
2879 }
2880 
2881 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2882 				struct ethtool_rxnfc *cmd)
2883 {
2884 	struct ethtool_rx_flow_spec *fsp =
2885 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2886 	struct mtk_mac *mac = netdev_priv(dev);
2887 	struct mtk_eth *eth = mac->hw;
2888 	int hwlro_idx;
2889 
2890 	if (fsp->location > 1)
2891 		return -EINVAL;
2892 
2893 	mac->hwlro_ip[fsp->location] = 0;
2894 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2895 
2896 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2897 
2898 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2899 
2900 	return 0;
2901 }
2902 
2903 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2904 {
2905 	struct mtk_mac *mac = netdev_priv(dev);
2906 	struct mtk_eth *eth = mac->hw;
2907 	int i, hwlro_idx;
2908 
2909 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2910 		mac->hwlro_ip[i] = 0;
2911 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2912 
2913 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2914 	}
2915 
2916 	mac->hwlro_ip_cnt = 0;
2917 }
2918 
2919 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2920 				    struct ethtool_rxnfc *cmd)
2921 {
2922 	struct mtk_mac *mac = netdev_priv(dev);
2923 	struct ethtool_rx_flow_spec *fsp =
2924 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2925 
2926 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2927 		return -EINVAL;
2928 
2929 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2930 	fsp->flow_type = TCP_V4_FLOW;
2931 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2932 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2933 
2934 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2935 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2936 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2937 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2938 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2939 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2940 	fsp->h_u.tcp_ip4_spec.tos = 0;
2941 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2942 
2943 	return 0;
2944 }
2945 
2946 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2947 				  struct ethtool_rxnfc *cmd,
2948 				  u32 *rule_locs)
2949 {
2950 	struct mtk_mac *mac = netdev_priv(dev);
2951 	int cnt = 0;
2952 	int i;
2953 
2954 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2955 		if (mac->hwlro_ip[i]) {
2956 			rule_locs[cnt] = i;
2957 			cnt++;
2958 		}
2959 	}
2960 
2961 	cmd->rule_cnt = cnt;
2962 
2963 	return 0;
2964 }
2965 
2966 static netdev_features_t mtk_fix_features(struct net_device *dev,
2967 					  netdev_features_t features)
2968 {
2969 	if (!(features & NETIF_F_LRO)) {
2970 		struct mtk_mac *mac = netdev_priv(dev);
2971 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2972 
2973 		if (ip_cnt) {
2974 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2975 
2976 			features |= NETIF_F_LRO;
2977 		}
2978 	}
2979 
2980 	return features;
2981 }
2982 
2983 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2984 {
2985 	netdev_features_t diff = dev->features ^ features;
2986 
2987 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2988 		mtk_hwlro_netdev_disable(dev);
2989 
2990 	return 0;
2991 }
2992 
2993 /* wait for DMA to finish whatever it is doing before we start using it again */
2994 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2995 {
2996 	unsigned int reg;
2997 	int ret;
2998 	u32 val;
2999 
3000 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3001 		reg = eth->soc->reg_map->qdma.glo_cfg;
3002 	else
3003 		reg = eth->soc->reg_map->pdma.glo_cfg;
3004 
3005 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3006 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3007 					5, MTK_DMA_BUSY_TIMEOUT_US);
3008 	if (ret)
3009 		dev_err(eth->dev, "DMA init timeout\n");
3010 
3011 	return ret;
3012 }
3013 
3014 static int mtk_dma_init(struct mtk_eth *eth)
3015 {
3016 	int err;
3017 	u32 i;
3018 
3019 	if (mtk_dma_busy_wait(eth))
3020 		return -EBUSY;
3021 
3022 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3023 		/* QDMA needs scratch memory for internal reordering of the
3024 		 * descriptors
3025 		 */
3026 		err = mtk_init_fq_dma(eth);
3027 		if (err)
3028 			return err;
3029 	}
3030 
3031 	err = mtk_tx_alloc(eth);
3032 	if (err)
3033 		return err;
3034 
3035 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3036 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3037 		if (err)
3038 			return err;
3039 	}
3040 
3041 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3042 	if (err)
3043 		return err;
3044 
3045 	if (eth->hwlro) {
3046 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3047 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3048 			if (err)
3049 				return err;
3050 		}
3051 		err = mtk_hwlro_rx_init(eth);
3052 		if (err)
3053 			return err;
3054 	}
3055 
3056 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3057 		/* Enable random early drop and set drop threshold
3058 		 * automatically
3059 		 */
3060 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3061 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3062 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 static void mtk_dma_free(struct mtk_eth *eth)
3069 {
3070 	const struct mtk_soc_data *soc = eth->soc;
3071 	int i;
3072 
3073 	for (i = 0; i < MTK_MAX_DEVS; i++)
3074 		if (eth->netdev[i])
3075 			netdev_reset_queue(eth->netdev[i]);
3076 	if (eth->scratch_ring) {
3077 		dma_free_coherent(eth->dma_dev,
3078 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3079 				  eth->scratch_ring, eth->phy_scratch_ring);
3080 		eth->scratch_ring = NULL;
3081 		eth->phy_scratch_ring = 0;
3082 	}
3083 	mtk_tx_clean(eth);
3084 	mtk_rx_clean(eth, &eth->rx_ring[0]);
3085 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
3086 
3087 	if (eth->hwlro) {
3088 		mtk_hwlro_rx_uninit(eth);
3089 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3090 			mtk_rx_clean(eth, &eth->rx_ring[i]);
3091 	}
3092 
3093 	kfree(eth->scratch_head);
3094 }
3095 
3096 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3097 {
3098 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3099 
3100 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3101 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3102 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3103 }
3104 
3105 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3106 {
3107 	struct mtk_mac *mac = netdev_priv(dev);
3108 	struct mtk_eth *eth = mac->hw;
3109 
3110 	if (test_bit(MTK_RESETTING, &eth->state))
3111 		return;
3112 
3113 	if (!mtk_hw_reset_check(eth))
3114 		return;
3115 
3116 	eth->netdev[mac->id]->stats.tx_errors++;
3117 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3118 
3119 	schedule_work(&eth->pending_work);
3120 }
3121 
3122 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3123 {
3124 	struct mtk_eth *eth = _eth;
3125 
3126 	eth->rx_events++;
3127 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3128 		__napi_schedule(&eth->rx_napi);
3129 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3130 	}
3131 
3132 	return IRQ_HANDLED;
3133 }
3134 
3135 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3136 {
3137 	struct mtk_eth *eth = _eth;
3138 
3139 	eth->tx_events++;
3140 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3141 		__napi_schedule(&eth->tx_napi);
3142 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3143 	}
3144 
3145 	return IRQ_HANDLED;
3146 }
3147 
3148 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3149 {
3150 	struct mtk_eth *eth = _eth;
3151 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3152 
3153 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3154 	    eth->soc->txrx.rx_irq_done_mask) {
3155 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3156 		    eth->soc->txrx.rx_irq_done_mask)
3157 			mtk_handle_irq_rx(irq, _eth);
3158 	}
3159 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3160 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3161 			mtk_handle_irq_tx(irq, _eth);
3162 	}
3163 
3164 	return IRQ_HANDLED;
3165 }
3166 
3167 #ifdef CONFIG_NET_POLL_CONTROLLER
3168 static void mtk_poll_controller(struct net_device *dev)
3169 {
3170 	struct mtk_mac *mac = netdev_priv(dev);
3171 	struct mtk_eth *eth = mac->hw;
3172 
3173 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3174 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3175 	mtk_handle_irq_rx(eth->irq[2], dev);
3176 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3177 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3178 }
3179 #endif
3180 
3181 static int mtk_start_dma(struct mtk_eth *eth)
3182 {
3183 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3184 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3185 	int err;
3186 
3187 	err = mtk_dma_init(eth);
3188 	if (err) {
3189 		mtk_dma_free(eth);
3190 		return err;
3191 	}
3192 
3193 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3194 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3195 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3196 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3197 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3198 
3199 		if (mtk_is_netsys_v2_or_greater(eth))
3200 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3201 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3202 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3203 		else
3204 			val |= MTK_RX_BT_32DWORDS;
3205 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3206 
3207 		mtk_w32(eth,
3208 			MTK_RX_DMA_EN | rx_2b_offset |
3209 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3210 			reg_map->pdma.glo_cfg);
3211 	} else {
3212 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3213 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3214 			reg_map->pdma.glo_cfg);
3215 	}
3216 
3217 	return 0;
3218 }
3219 
3220 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3221 {
3222 	int i;
3223 
3224 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3225 		return;
3226 
3227 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3228 		u32 val;
3229 
3230 		if (!eth->netdev[i])
3231 			continue;
3232 
3233 		val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3234 
3235 		/* default setup the forward port to send frame to PDMA */
3236 		val &= ~0xffff;
3237 
3238 		/* Enable RX checksum */
3239 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3240 
3241 		val |= config;
3242 
3243 		if (netdev_uses_dsa(eth->netdev[i]))
3244 			val |= MTK_GDMA_SPECIAL_TAG;
3245 
3246 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3247 	}
3248 	/* Reset and enable PSE */
3249 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3250 	mtk_w32(eth, 0, MTK_RST_GL);
3251 }
3252 
3253 
3254 static bool mtk_uses_dsa(struct net_device *dev)
3255 {
3256 #if IS_ENABLED(CONFIG_NET_DSA)
3257 	return netdev_uses_dsa(dev) &&
3258 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3259 #else
3260 	return false;
3261 #endif
3262 }
3263 
3264 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3265 {
3266 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3267 	struct mtk_eth *eth = mac->hw;
3268 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3269 	struct ethtool_link_ksettings s;
3270 	struct net_device *ldev;
3271 	struct list_head *iter;
3272 	struct dsa_port *dp;
3273 
3274 	if (event != NETDEV_CHANGE)
3275 		return NOTIFY_DONE;
3276 
3277 	netdev_for_each_lower_dev(dev, ldev, iter) {
3278 		if (netdev_priv(ldev) == mac)
3279 			goto found;
3280 	}
3281 
3282 	return NOTIFY_DONE;
3283 
3284 found:
3285 	if (!dsa_slave_dev_check(dev))
3286 		return NOTIFY_DONE;
3287 
3288 	if (__ethtool_get_link_ksettings(dev, &s))
3289 		return NOTIFY_DONE;
3290 
3291 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3292 		return NOTIFY_DONE;
3293 
3294 	dp = dsa_port_from_netdev(dev);
3295 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3296 		return NOTIFY_DONE;
3297 
3298 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3299 		s.base.speed = 0;
3300 
3301 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3302 
3303 	return NOTIFY_DONE;
3304 }
3305 
3306 static int mtk_open(struct net_device *dev)
3307 {
3308 	struct mtk_mac *mac = netdev_priv(dev);
3309 	struct mtk_eth *eth = mac->hw;
3310 	int i, err;
3311 
3312 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3313 	if (err) {
3314 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3315 			   err);
3316 		return err;
3317 	}
3318 
3319 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3320 	if (!refcount_read(&eth->dma_refcnt)) {
3321 		const struct mtk_soc_data *soc = eth->soc;
3322 		u32 gdm_config;
3323 		int i;
3324 
3325 		err = mtk_start_dma(eth);
3326 		if (err) {
3327 			phylink_disconnect_phy(mac->phylink);
3328 			return err;
3329 		}
3330 
3331 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3332 			mtk_ppe_start(eth->ppe[i]);
3333 
3334 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3335 						  : MTK_GDMA_TO_PDMA;
3336 		mtk_gdm_config(eth, gdm_config);
3337 
3338 		napi_enable(&eth->tx_napi);
3339 		napi_enable(&eth->rx_napi);
3340 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3341 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3342 		refcount_set(&eth->dma_refcnt, 1);
3343 	}
3344 	else
3345 		refcount_inc(&eth->dma_refcnt);
3346 
3347 	phylink_start(mac->phylink);
3348 	netif_tx_start_all_queues(dev);
3349 
3350 	if (mtk_is_netsys_v2_or_greater(eth))
3351 		return 0;
3352 
3353 	if (mtk_uses_dsa(dev) && !eth->prog) {
3354 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3355 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3356 
3357 			if (md_dst)
3358 				continue;
3359 
3360 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3361 						    GFP_KERNEL);
3362 			if (!md_dst)
3363 				return -ENOMEM;
3364 
3365 			md_dst->u.port_info.port_id = i;
3366 			eth->dsa_meta[i] = md_dst;
3367 		}
3368 	} else {
3369 		/* Hardware DSA untagging and VLAN RX offloading need to be
3370 		 * disabled if at least one MAC does not use DSA.
3371 		 */
3372 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3373 
3374 		val &= ~MTK_CDMP_STAG_EN;
3375 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3376 
3377 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3378 	}
3379 
3380 	return 0;
3381 }
3382 
3383 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3384 {
3385 	u32 val;
3386 	int i;
3387 
3388 	/* stop the dma engine */
3389 	spin_lock_bh(&eth->page_lock);
3390 	val = mtk_r32(eth, glo_cfg);
3391 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3392 		glo_cfg);
3393 	spin_unlock_bh(&eth->page_lock);
3394 
3395 	/* wait for dma stop */
3396 	for (i = 0; i < 10; i++) {
3397 		val = mtk_r32(eth, glo_cfg);
3398 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3399 			msleep(20);
3400 			continue;
3401 		}
3402 		break;
3403 	}
3404 }
3405 
3406 static int mtk_stop(struct net_device *dev)
3407 {
3408 	struct mtk_mac *mac = netdev_priv(dev);
3409 	struct mtk_eth *eth = mac->hw;
3410 	int i;
3411 
3412 	phylink_stop(mac->phylink);
3413 
3414 	netif_tx_disable(dev);
3415 
3416 	phylink_disconnect_phy(mac->phylink);
3417 
3418 	/* only shutdown DMA if this is the last user */
3419 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3420 		return 0;
3421 
3422 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3423 
3424 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3425 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3426 	napi_disable(&eth->tx_napi);
3427 	napi_disable(&eth->rx_napi);
3428 
3429 	cancel_work_sync(&eth->rx_dim.work);
3430 	cancel_work_sync(&eth->tx_dim.work);
3431 
3432 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3433 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3434 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3435 
3436 	mtk_dma_free(eth);
3437 
3438 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3439 		mtk_ppe_stop(eth->ppe[i]);
3440 
3441 	return 0;
3442 }
3443 
3444 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3445 			 struct netlink_ext_ack *extack)
3446 {
3447 	struct mtk_mac *mac = netdev_priv(dev);
3448 	struct mtk_eth *eth = mac->hw;
3449 	struct bpf_prog *old_prog;
3450 	bool need_update;
3451 
3452 	if (eth->hwlro) {
3453 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3454 		return -EOPNOTSUPP;
3455 	}
3456 
3457 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3458 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3459 		return -EOPNOTSUPP;
3460 	}
3461 
3462 	need_update = !!eth->prog != !!prog;
3463 	if (netif_running(dev) && need_update)
3464 		mtk_stop(dev);
3465 
3466 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3467 	if (old_prog)
3468 		bpf_prog_put(old_prog);
3469 
3470 	if (netif_running(dev) && need_update)
3471 		return mtk_open(dev);
3472 
3473 	return 0;
3474 }
3475 
3476 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3477 {
3478 	switch (xdp->command) {
3479 	case XDP_SETUP_PROG:
3480 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3481 	default:
3482 		return -EINVAL;
3483 	}
3484 }
3485 
3486 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3487 {
3488 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3489 			   reset_bits,
3490 			   reset_bits);
3491 
3492 	usleep_range(1000, 1100);
3493 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3494 			   reset_bits,
3495 			   ~reset_bits);
3496 	mdelay(10);
3497 }
3498 
3499 static void mtk_clk_disable(struct mtk_eth *eth)
3500 {
3501 	int clk;
3502 
3503 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3504 		clk_disable_unprepare(eth->clks[clk]);
3505 }
3506 
3507 static int mtk_clk_enable(struct mtk_eth *eth)
3508 {
3509 	int clk, ret;
3510 
3511 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3512 		ret = clk_prepare_enable(eth->clks[clk]);
3513 		if (ret)
3514 			goto err_disable_clks;
3515 	}
3516 
3517 	return 0;
3518 
3519 err_disable_clks:
3520 	while (--clk >= 0)
3521 		clk_disable_unprepare(eth->clks[clk]);
3522 
3523 	return ret;
3524 }
3525 
3526 static void mtk_dim_rx(struct work_struct *work)
3527 {
3528 	struct dim *dim = container_of(work, struct dim, work);
3529 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3530 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3531 	struct dim_cq_moder cur_profile;
3532 	u32 val, cur;
3533 
3534 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3535 						dim->profile_ix);
3536 	spin_lock_bh(&eth->dim_lock);
3537 
3538 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3539 	val &= MTK_PDMA_DELAY_TX_MASK;
3540 	val |= MTK_PDMA_DELAY_RX_EN;
3541 
3542 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3543 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3544 
3545 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3546 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3547 
3548 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3549 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3550 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3551 
3552 	spin_unlock_bh(&eth->dim_lock);
3553 
3554 	dim->state = DIM_START_MEASURE;
3555 }
3556 
3557 static void mtk_dim_tx(struct work_struct *work)
3558 {
3559 	struct dim *dim = container_of(work, struct dim, work);
3560 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3561 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3562 	struct dim_cq_moder cur_profile;
3563 	u32 val, cur;
3564 
3565 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3566 						dim->profile_ix);
3567 	spin_lock_bh(&eth->dim_lock);
3568 
3569 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3570 	val &= MTK_PDMA_DELAY_RX_MASK;
3571 	val |= MTK_PDMA_DELAY_TX_EN;
3572 
3573 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3574 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3575 
3576 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3577 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3578 
3579 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3580 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3581 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3582 
3583 	spin_unlock_bh(&eth->dim_lock);
3584 
3585 	dim->state = DIM_START_MEASURE;
3586 }
3587 
3588 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3589 {
3590 	struct mtk_eth *eth = mac->hw;
3591 	u32 mcr_cur, mcr_new;
3592 
3593 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3594 		return;
3595 
3596 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3597 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3598 
3599 	if (val <= 1518)
3600 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3601 	else if (val <= 1536)
3602 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3603 	else if (val <= 1552)
3604 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3605 	else
3606 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3607 
3608 	if (mcr_new != mcr_cur)
3609 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3610 }
3611 
3612 static void mtk_hw_reset(struct mtk_eth *eth)
3613 {
3614 	u32 val;
3615 
3616 	if (mtk_is_netsys_v2_or_greater(eth)) {
3617 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3618 		val = RSTCTRL_PPE0_V2;
3619 	} else {
3620 		val = RSTCTRL_PPE0;
3621 	}
3622 
3623 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3624 		val |= RSTCTRL_PPE1;
3625 
3626 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3627 
3628 	if (mtk_is_netsys_v2_or_greater(eth))
3629 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3630 			     0x3ffffff);
3631 }
3632 
3633 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3634 {
3635 	u32 val;
3636 
3637 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3638 	return val;
3639 }
3640 
3641 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3642 {
3643 	u32 rst_mask, val;
3644 
3645 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3646 			   RSTCTRL_FE);
3647 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3648 				      val & RSTCTRL_FE, 1, 1000)) {
3649 		dev_err(eth->dev, "warm reset failed\n");
3650 		mtk_hw_reset(eth);
3651 		return;
3652 	}
3653 
3654 	if (mtk_is_netsys_v2_or_greater(eth))
3655 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3656 	else
3657 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3658 
3659 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3660 		rst_mask |= RSTCTRL_PPE1;
3661 
3662 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3663 
3664 	udelay(1);
3665 	val = mtk_hw_reset_read(eth);
3666 	if (!(val & rst_mask))
3667 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3668 			val, rst_mask);
3669 
3670 	rst_mask |= RSTCTRL_FE;
3671 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3672 
3673 	udelay(1);
3674 	val = mtk_hw_reset_read(eth);
3675 	if (val & rst_mask)
3676 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3677 			val, rst_mask);
3678 }
3679 
3680 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3681 {
3682 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3683 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3684 	bool oq_hang, cdm1_busy, adma_busy;
3685 	bool wtx_busy, cdm_full, oq_free;
3686 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3687 	bool qfsm_hang, qfwd_hang;
3688 	bool ret = false;
3689 
3690 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3691 		return false;
3692 
3693 	/* WDMA sanity checks */
3694 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3695 
3696 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3697 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3698 
3699 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3700 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3701 
3702 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3703 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3704 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3705 
3706 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3707 		if (++eth->reset.wdma_hang_count > 2) {
3708 			eth->reset.wdma_hang_count = 0;
3709 			ret = true;
3710 		}
3711 		goto out;
3712 	}
3713 
3714 	/* QDMA sanity checks */
3715 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3716 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3717 
3718 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3719 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3720 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3721 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3722 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3723 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3724 
3725 	if (qfsm_hang && qfwd_hang &&
3726 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3727 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3728 		if (++eth->reset.qdma_hang_count > 2) {
3729 			eth->reset.qdma_hang_count = 0;
3730 			ret = true;
3731 		}
3732 		goto out;
3733 	}
3734 
3735 	/* ADMA sanity checks */
3736 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3737 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3738 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3739 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3740 
3741 	if (oq_hang && cdm1_busy && adma_busy) {
3742 		if (++eth->reset.adma_hang_count > 2) {
3743 			eth->reset.adma_hang_count = 0;
3744 			ret = true;
3745 		}
3746 		goto out;
3747 	}
3748 
3749 	eth->reset.wdma_hang_count = 0;
3750 	eth->reset.qdma_hang_count = 0;
3751 	eth->reset.adma_hang_count = 0;
3752 out:
3753 	eth->reset.wdidx = wdidx;
3754 
3755 	return ret;
3756 }
3757 
3758 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3759 {
3760 	struct delayed_work *del_work = to_delayed_work(work);
3761 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3762 					   reset.monitor_work);
3763 
3764 	if (test_bit(MTK_RESETTING, &eth->state))
3765 		goto out;
3766 
3767 	/* DMA stuck checks */
3768 	if (mtk_hw_check_dma_hang(eth))
3769 		schedule_work(&eth->pending_work);
3770 
3771 out:
3772 	schedule_delayed_work(&eth->reset.monitor_work,
3773 			      MTK_DMA_MONITOR_TIMEOUT);
3774 }
3775 
3776 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3777 {
3778 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3779 		       ETHSYS_DMA_AG_MAP_PPE;
3780 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3781 	int i, val, ret;
3782 
3783 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3784 		return 0;
3785 
3786 	if (!reset) {
3787 		pm_runtime_enable(eth->dev);
3788 		pm_runtime_get_sync(eth->dev);
3789 
3790 		ret = mtk_clk_enable(eth);
3791 		if (ret)
3792 			goto err_disable_pm;
3793 	}
3794 
3795 	if (eth->ethsys)
3796 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3797 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3798 
3799 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3800 		ret = device_reset(eth->dev);
3801 		if (ret) {
3802 			dev_err(eth->dev, "MAC reset failed!\n");
3803 			goto err_disable_pm;
3804 		}
3805 
3806 		/* set interrupt delays based on current Net DIM sample */
3807 		mtk_dim_rx(&eth->rx_dim.work);
3808 		mtk_dim_tx(&eth->tx_dim.work);
3809 
3810 		/* disable delay and normal interrupt */
3811 		mtk_tx_irq_disable(eth, ~0);
3812 		mtk_rx_irq_disable(eth, ~0);
3813 
3814 		return 0;
3815 	}
3816 
3817 	msleep(100);
3818 
3819 	if (reset)
3820 		mtk_hw_warm_reset(eth);
3821 	else
3822 		mtk_hw_reset(eth);
3823 
3824 	if (mtk_is_netsys_v2_or_greater(eth)) {
3825 		/* Set FE to PDMAv2 if necessary */
3826 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3827 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3828 	}
3829 
3830 	if (eth->pctl) {
3831 		/* Set GE2 driving and slew rate */
3832 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3833 
3834 		/* set GE2 TDSEL */
3835 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3836 
3837 		/* set GE2 TUNE */
3838 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3839 	}
3840 
3841 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3842 	 * up with the more appropriate value when mtk_mac_config call is being
3843 	 * invoked.
3844 	 */
3845 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3846 		struct net_device *dev = eth->netdev[i];
3847 
3848 		if (!dev)
3849 			continue;
3850 
3851 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3852 		mtk_set_mcr_max_rx(netdev_priv(dev),
3853 				   dev->mtu + MTK_RX_ETH_HLEN);
3854 	}
3855 
3856 	/* Indicates CDM to parse the MTK special tag from CPU
3857 	 * which also is working out for untag packets.
3858 	 */
3859 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3860 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3861 	if (mtk_is_netsys_v1(eth)) {
3862 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3863 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3864 
3865 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3866 	}
3867 
3868 	/* set interrupt delays based on current Net DIM sample */
3869 	mtk_dim_rx(&eth->rx_dim.work);
3870 	mtk_dim_tx(&eth->tx_dim.work);
3871 
3872 	/* disable delay and normal interrupt */
3873 	mtk_tx_irq_disable(eth, ~0);
3874 	mtk_rx_irq_disable(eth, ~0);
3875 
3876 	/* FE int grouping */
3877 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3878 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3879 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3880 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3881 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3882 
3883 	if (mtk_is_netsys_v3_or_greater(eth)) {
3884 		/* PSE should not drop port1, port8 and port9 packets */
3885 		mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3886 
3887 		/* GDM and CDM Threshold */
3888 		mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3889 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3890 
3891 		/* Disable GDM1 RX CRC stripping */
3892 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
3893 
3894 		/* PSE GDM3 MIB counter has incorrect hw default values,
3895 		 * so the driver ought to read clear the values beforehand
3896 		 * in case ethtool retrieve wrong mib values.
3897 		 */
3898 		for (i = 0; i < 0x80; i += 0x4)
3899 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
3900 	} else if (!mtk_is_netsys_v1(eth)) {
3901 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3902 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3903 
3904 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3905 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3906 
3907 		/* PSE Free Queue Flow Control  */
3908 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3909 
3910 		/* PSE config input queue threshold */
3911 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3912 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3913 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3914 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3915 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3916 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3917 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3918 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3919 
3920 		/* PSE config output queue threshold */
3921 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3922 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3923 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3924 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3925 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3926 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3927 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3928 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3929 
3930 		/* GDM and CDM Threshold */
3931 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3932 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3933 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3934 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3935 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3936 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3937 	}
3938 
3939 	return 0;
3940 
3941 err_disable_pm:
3942 	if (!reset) {
3943 		pm_runtime_put_sync(eth->dev);
3944 		pm_runtime_disable(eth->dev);
3945 	}
3946 
3947 	return ret;
3948 }
3949 
3950 static int mtk_hw_deinit(struct mtk_eth *eth)
3951 {
3952 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3953 		return 0;
3954 
3955 	mtk_clk_disable(eth);
3956 
3957 	pm_runtime_put_sync(eth->dev);
3958 	pm_runtime_disable(eth->dev);
3959 
3960 	return 0;
3961 }
3962 
3963 static void mtk_uninit(struct net_device *dev)
3964 {
3965 	struct mtk_mac *mac = netdev_priv(dev);
3966 	struct mtk_eth *eth = mac->hw;
3967 
3968 	phylink_disconnect_phy(mac->phylink);
3969 	mtk_tx_irq_disable(eth, ~0);
3970 	mtk_rx_irq_disable(eth, ~0);
3971 }
3972 
3973 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3974 {
3975 	int length = new_mtu + MTK_RX_ETH_HLEN;
3976 	struct mtk_mac *mac = netdev_priv(dev);
3977 	struct mtk_eth *eth = mac->hw;
3978 
3979 	if (rcu_access_pointer(eth->prog) &&
3980 	    length > MTK_PP_MAX_BUF_SIZE) {
3981 		netdev_err(dev, "Invalid MTU for XDP mode\n");
3982 		return -EINVAL;
3983 	}
3984 
3985 	mtk_set_mcr_max_rx(mac, length);
3986 	dev->mtu = new_mtu;
3987 
3988 	return 0;
3989 }
3990 
3991 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3992 {
3993 	struct mtk_mac *mac = netdev_priv(dev);
3994 
3995 	switch (cmd) {
3996 	case SIOCGMIIPHY:
3997 	case SIOCGMIIREG:
3998 	case SIOCSMIIREG:
3999 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4000 	default:
4001 		break;
4002 	}
4003 
4004 	return -EOPNOTSUPP;
4005 }
4006 
4007 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4008 {
4009 	u32 val;
4010 	int i;
4011 
4012 	/* disabe FE P3 and P4 */
4013 	val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
4014 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4015 		val |= MTK_FE_LINK_DOWN_P4;
4016 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
4017 
4018 	/* adjust PPE configurations to prepare for reset */
4019 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4020 		mtk_ppe_prepare_reset(eth->ppe[i]);
4021 
4022 	/* disable NETSYS interrupts */
4023 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4024 
4025 	/* force link down GMAC */
4026 	for (i = 0; i < 2; i++) {
4027 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4028 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4029 	}
4030 }
4031 
4032 static void mtk_pending_work(struct work_struct *work)
4033 {
4034 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4035 	unsigned long restart = 0;
4036 	u32 val;
4037 	int i;
4038 
4039 	rtnl_lock();
4040 	set_bit(MTK_RESETTING, &eth->state);
4041 
4042 	mtk_prepare_for_reset(eth);
4043 	mtk_wed_fe_reset();
4044 	/* Run again reset preliminary configuration in order to avoid any
4045 	 * possible race during FE reset since it can run releasing RTNL lock.
4046 	 */
4047 	mtk_prepare_for_reset(eth);
4048 
4049 	/* stop all devices to make sure that dma is properly shut down */
4050 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4051 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4052 			continue;
4053 
4054 		mtk_stop(eth->netdev[i]);
4055 		__set_bit(i, &restart);
4056 	}
4057 
4058 	usleep_range(15000, 16000);
4059 
4060 	if (eth->dev->pins)
4061 		pinctrl_select_state(eth->dev->pins->p,
4062 				     eth->dev->pins->default_state);
4063 	mtk_hw_init(eth, true);
4064 
4065 	/* restart DMA and enable IRQs */
4066 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4067 		if (!eth->netdev[i] || !test_bit(i, &restart))
4068 			continue;
4069 
4070 		if (mtk_open(eth->netdev[i])) {
4071 			netif_alert(eth, ifup, eth->netdev[i],
4072 				    "Driver up/down cycle failed\n");
4073 			dev_close(eth->netdev[i]);
4074 		}
4075 	}
4076 
4077 	/* enabe FE P3 and P4 */
4078 	val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
4079 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4080 		val &= ~MTK_FE_LINK_DOWN_P4;
4081 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
4082 
4083 	clear_bit(MTK_RESETTING, &eth->state);
4084 
4085 	mtk_wed_fe_reset_complete();
4086 
4087 	rtnl_unlock();
4088 }
4089 
4090 static int mtk_free_dev(struct mtk_eth *eth)
4091 {
4092 	int i;
4093 
4094 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4095 		if (!eth->netdev[i])
4096 			continue;
4097 		free_netdev(eth->netdev[i]);
4098 	}
4099 
4100 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4101 		if (!eth->dsa_meta[i])
4102 			break;
4103 		metadata_dst_free(eth->dsa_meta[i]);
4104 	}
4105 
4106 	return 0;
4107 }
4108 
4109 static int mtk_unreg_dev(struct mtk_eth *eth)
4110 {
4111 	int i;
4112 
4113 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4114 		struct mtk_mac *mac;
4115 		if (!eth->netdev[i])
4116 			continue;
4117 		mac = netdev_priv(eth->netdev[i]);
4118 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4119 			unregister_netdevice_notifier(&mac->device_notifier);
4120 		unregister_netdev(eth->netdev[i]);
4121 	}
4122 
4123 	return 0;
4124 }
4125 
4126 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4127 {
4128 	int i;
4129 
4130 	for (i = 0; i < MTK_MAX_DEVS; i++)
4131 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4132 }
4133 
4134 static int mtk_cleanup(struct mtk_eth *eth)
4135 {
4136 	mtk_sgmii_destroy(eth);
4137 	mtk_unreg_dev(eth);
4138 	mtk_free_dev(eth);
4139 	cancel_work_sync(&eth->pending_work);
4140 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4141 
4142 	return 0;
4143 }
4144 
4145 static int mtk_get_link_ksettings(struct net_device *ndev,
4146 				  struct ethtool_link_ksettings *cmd)
4147 {
4148 	struct mtk_mac *mac = netdev_priv(ndev);
4149 
4150 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4151 		return -EBUSY;
4152 
4153 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4154 }
4155 
4156 static int mtk_set_link_ksettings(struct net_device *ndev,
4157 				  const struct ethtool_link_ksettings *cmd)
4158 {
4159 	struct mtk_mac *mac = netdev_priv(ndev);
4160 
4161 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4162 		return -EBUSY;
4163 
4164 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4165 }
4166 
4167 static void mtk_get_drvinfo(struct net_device *dev,
4168 			    struct ethtool_drvinfo *info)
4169 {
4170 	struct mtk_mac *mac = netdev_priv(dev);
4171 
4172 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4173 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4174 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4175 }
4176 
4177 static u32 mtk_get_msglevel(struct net_device *dev)
4178 {
4179 	struct mtk_mac *mac = netdev_priv(dev);
4180 
4181 	return mac->hw->msg_enable;
4182 }
4183 
4184 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4185 {
4186 	struct mtk_mac *mac = netdev_priv(dev);
4187 
4188 	mac->hw->msg_enable = value;
4189 }
4190 
4191 static int mtk_nway_reset(struct net_device *dev)
4192 {
4193 	struct mtk_mac *mac = netdev_priv(dev);
4194 
4195 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4196 		return -EBUSY;
4197 
4198 	if (!mac->phylink)
4199 		return -ENOTSUPP;
4200 
4201 	return phylink_ethtool_nway_reset(mac->phylink);
4202 }
4203 
4204 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4205 {
4206 	int i;
4207 
4208 	switch (stringset) {
4209 	case ETH_SS_STATS: {
4210 		struct mtk_mac *mac = netdev_priv(dev);
4211 
4212 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4213 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4214 			data += ETH_GSTRING_LEN;
4215 		}
4216 		if (mtk_page_pool_enabled(mac->hw))
4217 			page_pool_ethtool_stats_get_strings(data);
4218 		break;
4219 	}
4220 	default:
4221 		break;
4222 	}
4223 }
4224 
4225 static int mtk_get_sset_count(struct net_device *dev, int sset)
4226 {
4227 	switch (sset) {
4228 	case ETH_SS_STATS: {
4229 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4230 		struct mtk_mac *mac = netdev_priv(dev);
4231 
4232 		if (mtk_page_pool_enabled(mac->hw))
4233 			count += page_pool_ethtool_stats_get_count();
4234 		return count;
4235 	}
4236 	default:
4237 		return -EOPNOTSUPP;
4238 	}
4239 }
4240 
4241 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4242 {
4243 	struct page_pool_stats stats = {};
4244 	int i;
4245 
4246 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4247 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4248 
4249 		if (!ring->page_pool)
4250 			continue;
4251 
4252 		page_pool_get_stats(ring->page_pool, &stats);
4253 	}
4254 	page_pool_ethtool_stats_get(data, &stats);
4255 }
4256 
4257 static void mtk_get_ethtool_stats(struct net_device *dev,
4258 				  struct ethtool_stats *stats, u64 *data)
4259 {
4260 	struct mtk_mac *mac = netdev_priv(dev);
4261 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4262 	u64 *data_src, *data_dst;
4263 	unsigned int start;
4264 	int i;
4265 
4266 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4267 		return;
4268 
4269 	if (netif_running(dev) && netif_device_present(dev)) {
4270 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4271 			mtk_stats_update_mac(mac);
4272 			spin_unlock_bh(&hwstats->stats_lock);
4273 		}
4274 	}
4275 
4276 	data_src = (u64 *)hwstats;
4277 
4278 	do {
4279 		data_dst = data;
4280 		start = u64_stats_fetch_begin(&hwstats->syncp);
4281 
4282 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4283 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4284 		if (mtk_page_pool_enabled(mac->hw))
4285 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4286 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4287 }
4288 
4289 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4290 			 u32 *rule_locs)
4291 {
4292 	int ret = -EOPNOTSUPP;
4293 
4294 	switch (cmd->cmd) {
4295 	case ETHTOOL_GRXRINGS:
4296 		if (dev->hw_features & NETIF_F_LRO) {
4297 			cmd->data = MTK_MAX_RX_RING_NUM;
4298 			ret = 0;
4299 		}
4300 		break;
4301 	case ETHTOOL_GRXCLSRLCNT:
4302 		if (dev->hw_features & NETIF_F_LRO) {
4303 			struct mtk_mac *mac = netdev_priv(dev);
4304 
4305 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4306 			ret = 0;
4307 		}
4308 		break;
4309 	case ETHTOOL_GRXCLSRULE:
4310 		if (dev->hw_features & NETIF_F_LRO)
4311 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4312 		break;
4313 	case ETHTOOL_GRXCLSRLALL:
4314 		if (dev->hw_features & NETIF_F_LRO)
4315 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4316 						     rule_locs);
4317 		break;
4318 	default:
4319 		break;
4320 	}
4321 
4322 	return ret;
4323 }
4324 
4325 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4326 {
4327 	int ret = -EOPNOTSUPP;
4328 
4329 	switch (cmd->cmd) {
4330 	case ETHTOOL_SRXCLSRLINS:
4331 		if (dev->hw_features & NETIF_F_LRO)
4332 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4333 		break;
4334 	case ETHTOOL_SRXCLSRLDEL:
4335 		if (dev->hw_features & NETIF_F_LRO)
4336 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4337 		break;
4338 	default:
4339 		break;
4340 	}
4341 
4342 	return ret;
4343 }
4344 
4345 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4346 			    struct net_device *sb_dev)
4347 {
4348 	struct mtk_mac *mac = netdev_priv(dev);
4349 	unsigned int queue = 0;
4350 
4351 	if (netdev_uses_dsa(dev))
4352 		queue = skb_get_queue_mapping(skb) + 3;
4353 	else
4354 		queue = mac->id;
4355 
4356 	if (queue >= dev->num_tx_queues)
4357 		queue = 0;
4358 
4359 	return queue;
4360 }
4361 
4362 static const struct ethtool_ops mtk_ethtool_ops = {
4363 	.get_link_ksettings	= mtk_get_link_ksettings,
4364 	.set_link_ksettings	= mtk_set_link_ksettings,
4365 	.get_drvinfo		= mtk_get_drvinfo,
4366 	.get_msglevel		= mtk_get_msglevel,
4367 	.set_msglevel		= mtk_set_msglevel,
4368 	.nway_reset		= mtk_nway_reset,
4369 	.get_link		= ethtool_op_get_link,
4370 	.get_strings		= mtk_get_strings,
4371 	.get_sset_count		= mtk_get_sset_count,
4372 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4373 	.get_rxnfc		= mtk_get_rxnfc,
4374 	.set_rxnfc              = mtk_set_rxnfc,
4375 };
4376 
4377 static const struct net_device_ops mtk_netdev_ops = {
4378 	.ndo_uninit		= mtk_uninit,
4379 	.ndo_open		= mtk_open,
4380 	.ndo_stop		= mtk_stop,
4381 	.ndo_start_xmit		= mtk_start_xmit,
4382 	.ndo_set_mac_address	= mtk_set_mac_address,
4383 	.ndo_validate_addr	= eth_validate_addr,
4384 	.ndo_eth_ioctl		= mtk_do_ioctl,
4385 	.ndo_change_mtu		= mtk_change_mtu,
4386 	.ndo_tx_timeout		= mtk_tx_timeout,
4387 	.ndo_get_stats64        = mtk_get_stats64,
4388 	.ndo_fix_features	= mtk_fix_features,
4389 	.ndo_set_features	= mtk_set_features,
4390 #ifdef CONFIG_NET_POLL_CONTROLLER
4391 	.ndo_poll_controller	= mtk_poll_controller,
4392 #endif
4393 	.ndo_setup_tc		= mtk_eth_setup_tc,
4394 	.ndo_bpf		= mtk_xdp,
4395 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4396 	.ndo_select_queue	= mtk_select_queue,
4397 };
4398 
4399 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4400 {
4401 	const __be32 *_id = of_get_property(np, "reg", NULL);
4402 	phy_interface_t phy_mode;
4403 	struct phylink *phylink;
4404 	struct mtk_mac *mac;
4405 	int id, err;
4406 	int txqs = 1;
4407 	u32 val;
4408 
4409 	if (!_id) {
4410 		dev_err(eth->dev, "missing mac id\n");
4411 		return -EINVAL;
4412 	}
4413 
4414 	id = be32_to_cpup(_id);
4415 	if (id >= MTK_MAX_DEVS) {
4416 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4417 		return -EINVAL;
4418 	}
4419 
4420 	if (eth->netdev[id]) {
4421 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4422 		return -EINVAL;
4423 	}
4424 
4425 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4426 		txqs = MTK_QDMA_NUM_QUEUES;
4427 
4428 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4429 	if (!eth->netdev[id]) {
4430 		dev_err(eth->dev, "alloc_etherdev failed\n");
4431 		return -ENOMEM;
4432 	}
4433 	mac = netdev_priv(eth->netdev[id]);
4434 	eth->mac[id] = mac;
4435 	mac->id = id;
4436 	mac->hw = eth;
4437 	mac->of_node = np;
4438 
4439 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4440 	if (err == -EPROBE_DEFER)
4441 		return err;
4442 
4443 	if (err) {
4444 		/* If the mac address is invalid, use random mac address */
4445 		eth_hw_addr_random(eth->netdev[id]);
4446 		dev_err(eth->dev, "generated random MAC address %pM\n",
4447 			eth->netdev[id]->dev_addr);
4448 	}
4449 
4450 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4451 	mac->hwlro_ip_cnt = 0;
4452 
4453 	mac->hw_stats = devm_kzalloc(eth->dev,
4454 				     sizeof(*mac->hw_stats),
4455 				     GFP_KERNEL);
4456 	if (!mac->hw_stats) {
4457 		dev_err(eth->dev, "failed to allocate counter memory\n");
4458 		err = -ENOMEM;
4459 		goto free_netdev;
4460 	}
4461 	spin_lock_init(&mac->hw_stats->stats_lock);
4462 	u64_stats_init(&mac->hw_stats->syncp);
4463 
4464 	if (mtk_is_netsys_v3_or_greater(eth))
4465 		mac->hw_stats->reg_offset = id * 0x80;
4466 	else
4467 		mac->hw_stats->reg_offset = id * 0x40;
4468 
4469 	/* phylink create */
4470 	err = of_get_phy_mode(np, &phy_mode);
4471 	if (err) {
4472 		dev_err(eth->dev, "incorrect phy-mode\n");
4473 		goto free_netdev;
4474 	}
4475 
4476 	/* mac config is not set */
4477 	mac->interface = PHY_INTERFACE_MODE_NA;
4478 	mac->speed = SPEED_UNKNOWN;
4479 
4480 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4481 	mac->phylink_config.type = PHYLINK_NETDEV;
4482 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4483 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4484 
4485 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4486 	 * in its .mac_config method (since state->speed is not valid there.
4487 	 * Disable support for MII, GMII and RGMII.
4488 	 */
4489 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4490 		__set_bit(PHY_INTERFACE_MODE_MII,
4491 			  mac->phylink_config.supported_interfaces);
4492 		__set_bit(PHY_INTERFACE_MODE_GMII,
4493 			  mac->phylink_config.supported_interfaces);
4494 
4495 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4496 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4497 	}
4498 
4499 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4500 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4501 			  mac->phylink_config.supported_interfaces);
4502 
4503 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4504 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4505 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4506 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4507 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4508 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4509 				    mac->phylink_config.supported_interfaces);
4510 	}
4511 
4512 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4513 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4514 			  mac->phylink_config.supported_interfaces);
4515 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4516 			  mac->phylink_config.supported_interfaces);
4517 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4518 			  mac->phylink_config.supported_interfaces);
4519 	}
4520 
4521 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4522 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4523 	    id == MTK_GMAC1_ID) {
4524 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4525 						       MAC_SYM_PAUSE |
4526 						       MAC_10000FD;
4527 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4528 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4529 			  mac->phylink_config.supported_interfaces);
4530 	}
4531 
4532 	phylink = phylink_create(&mac->phylink_config,
4533 				 of_fwnode_handle(mac->of_node),
4534 				 phy_mode, &mtk_phylink_ops);
4535 	if (IS_ERR(phylink)) {
4536 		err = PTR_ERR(phylink);
4537 		goto free_netdev;
4538 	}
4539 
4540 	mac->phylink = phylink;
4541 
4542 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4543 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4544 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4545 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4546 
4547 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4548 	if (eth->hwlro)
4549 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4550 
4551 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4552 		~NETIF_F_HW_VLAN_CTAG_TX;
4553 	eth->netdev[id]->features |= eth->soc->hw_features;
4554 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4555 
4556 	eth->netdev[id]->irq = eth->irq[0];
4557 	eth->netdev[id]->dev.of_node = np;
4558 
4559 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4560 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4561 	else
4562 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4563 
4564 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4565 		mac->device_notifier.notifier_call = mtk_device_event;
4566 		register_netdevice_notifier(&mac->device_notifier);
4567 	}
4568 
4569 	if (mtk_page_pool_enabled(eth))
4570 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4571 						NETDEV_XDP_ACT_REDIRECT |
4572 						NETDEV_XDP_ACT_NDO_XMIT |
4573 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4574 
4575 	return 0;
4576 
4577 free_netdev:
4578 	free_netdev(eth->netdev[id]);
4579 	return err;
4580 }
4581 
4582 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4583 {
4584 	struct net_device *dev, *tmp;
4585 	LIST_HEAD(dev_list);
4586 	int i;
4587 
4588 	rtnl_lock();
4589 
4590 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4591 		dev = eth->netdev[i];
4592 
4593 		if (!dev || !(dev->flags & IFF_UP))
4594 			continue;
4595 
4596 		list_add_tail(&dev->close_list, &dev_list);
4597 	}
4598 
4599 	dev_close_many(&dev_list, false);
4600 
4601 	eth->dma_dev = dma_dev;
4602 
4603 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4604 		list_del_init(&dev->close_list);
4605 		dev_open(dev, NULL);
4606 	}
4607 
4608 	rtnl_unlock();
4609 }
4610 
4611 static int mtk_sgmii_init(struct mtk_eth *eth)
4612 {
4613 	struct device_node *np;
4614 	struct regmap *regmap;
4615 	u32 flags;
4616 	int i;
4617 
4618 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4619 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4620 		if (!np)
4621 			break;
4622 
4623 		regmap = syscon_node_to_regmap(np);
4624 		flags = 0;
4625 		if (of_property_read_bool(np, "mediatek,pnswap"))
4626 			flags |= MTK_SGMII_FLAG_PN_SWAP;
4627 
4628 		of_node_put(np);
4629 
4630 		if (IS_ERR(regmap))
4631 			return PTR_ERR(regmap);
4632 
4633 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4634 							 eth->soc->ana_rgc3,
4635 							 flags);
4636 	}
4637 
4638 	return 0;
4639 }
4640 
4641 static int mtk_probe(struct platform_device *pdev)
4642 {
4643 	struct resource *res = NULL;
4644 	struct device_node *mac_np;
4645 	struct mtk_eth *eth;
4646 	int err, i;
4647 
4648 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4649 	if (!eth)
4650 		return -ENOMEM;
4651 
4652 	eth->soc = of_device_get_match_data(&pdev->dev);
4653 
4654 	eth->dev = &pdev->dev;
4655 	eth->dma_dev = &pdev->dev;
4656 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4657 	if (IS_ERR(eth->base))
4658 		return PTR_ERR(eth->base);
4659 
4660 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4661 		eth->ip_align = NET_IP_ALIGN;
4662 
4663 	spin_lock_init(&eth->page_lock);
4664 	spin_lock_init(&eth->tx_irq_lock);
4665 	spin_lock_init(&eth->rx_irq_lock);
4666 	spin_lock_init(&eth->dim_lock);
4667 
4668 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4669 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4670 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4671 
4672 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4673 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4674 
4675 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4676 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4677 							      "mediatek,ethsys");
4678 		if (IS_ERR(eth->ethsys)) {
4679 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4680 			return PTR_ERR(eth->ethsys);
4681 		}
4682 	}
4683 
4684 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4685 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4686 							     "mediatek,infracfg");
4687 		if (IS_ERR(eth->infra)) {
4688 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4689 			return PTR_ERR(eth->infra);
4690 		}
4691 	}
4692 
4693 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4694 		struct regmap *cci;
4695 
4696 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4697 						      "cci-control-port");
4698 		/* enable CPU/bus coherency */
4699 		if (!IS_ERR(cci))
4700 			regmap_write(cci, 0, 3);
4701 	}
4702 
4703 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4704 		err = mtk_sgmii_init(eth);
4705 
4706 		if (err)
4707 			return err;
4708 	}
4709 
4710 	if (eth->soc->required_pctl) {
4711 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4712 							    "mediatek,pctl");
4713 		if (IS_ERR(eth->pctl)) {
4714 			dev_err(&pdev->dev, "no pctl regmap found\n");
4715 			err = PTR_ERR(eth->pctl);
4716 			goto err_destroy_sgmii;
4717 		}
4718 	}
4719 
4720 	if (mtk_is_netsys_v2_or_greater(eth)) {
4721 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4722 		if (!res) {
4723 			err = -EINVAL;
4724 			goto err_destroy_sgmii;
4725 		}
4726 	}
4727 
4728 	if (eth->soc->offload_version) {
4729 		for (i = 0;; i++) {
4730 			struct device_node *np;
4731 			phys_addr_t wdma_phy;
4732 			u32 wdma_base;
4733 
4734 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4735 				break;
4736 
4737 			np = of_parse_phandle(pdev->dev.of_node,
4738 					      "mediatek,wed", i);
4739 			if (!np)
4740 				break;
4741 
4742 			wdma_base = eth->soc->reg_map->wdma_base[i];
4743 			wdma_phy = res ? res->start + wdma_base : 0;
4744 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4745 				       wdma_phy, i);
4746 		}
4747 	}
4748 
4749 	for (i = 0; i < 3; i++) {
4750 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4751 			eth->irq[i] = eth->irq[0];
4752 		else
4753 			eth->irq[i] = platform_get_irq(pdev, i);
4754 		if (eth->irq[i] < 0) {
4755 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4756 			err = -ENXIO;
4757 			goto err_wed_exit;
4758 		}
4759 	}
4760 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4761 		eth->clks[i] = devm_clk_get(eth->dev,
4762 					    mtk_clks_source_name[i]);
4763 		if (IS_ERR(eth->clks[i])) {
4764 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4765 				err = -EPROBE_DEFER;
4766 				goto err_wed_exit;
4767 			}
4768 			if (eth->soc->required_clks & BIT(i)) {
4769 				dev_err(&pdev->dev, "clock %s not found\n",
4770 					mtk_clks_source_name[i]);
4771 				err = -EINVAL;
4772 				goto err_wed_exit;
4773 			}
4774 			eth->clks[i] = NULL;
4775 		}
4776 	}
4777 
4778 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4779 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4780 
4781 	err = mtk_hw_init(eth, false);
4782 	if (err)
4783 		goto err_wed_exit;
4784 
4785 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4786 
4787 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4788 		if (!of_device_is_compatible(mac_np,
4789 					     "mediatek,eth-mac"))
4790 			continue;
4791 
4792 		if (!of_device_is_available(mac_np))
4793 			continue;
4794 
4795 		err = mtk_add_mac(eth, mac_np);
4796 		if (err) {
4797 			of_node_put(mac_np);
4798 			goto err_deinit_hw;
4799 		}
4800 	}
4801 
4802 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4803 		err = devm_request_irq(eth->dev, eth->irq[0],
4804 				       mtk_handle_irq, 0,
4805 				       dev_name(eth->dev), eth);
4806 	} else {
4807 		err = devm_request_irq(eth->dev, eth->irq[1],
4808 				       mtk_handle_irq_tx, 0,
4809 				       dev_name(eth->dev), eth);
4810 		if (err)
4811 			goto err_free_dev;
4812 
4813 		err = devm_request_irq(eth->dev, eth->irq[2],
4814 				       mtk_handle_irq_rx, 0,
4815 				       dev_name(eth->dev), eth);
4816 	}
4817 	if (err)
4818 		goto err_free_dev;
4819 
4820 	/* No MT7628/88 support yet */
4821 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4822 		err = mtk_mdio_init(eth);
4823 		if (err)
4824 			goto err_free_dev;
4825 	}
4826 
4827 	if (eth->soc->offload_version) {
4828 		u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
4829 
4830 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4831 		for (i = 0; i < num_ppe; i++) {
4832 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4833 
4834 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
4835 
4836 			if (!eth->ppe[i]) {
4837 				err = -ENOMEM;
4838 				goto err_deinit_ppe;
4839 			}
4840 		}
4841 
4842 		err = mtk_eth_offload_init(eth);
4843 		if (err)
4844 			goto err_deinit_ppe;
4845 	}
4846 
4847 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4848 		if (!eth->netdev[i])
4849 			continue;
4850 
4851 		err = register_netdev(eth->netdev[i]);
4852 		if (err) {
4853 			dev_err(eth->dev, "error bringing up device\n");
4854 			goto err_deinit_ppe;
4855 		} else
4856 			netif_info(eth, probe, eth->netdev[i],
4857 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4858 				   eth->netdev[i]->base_addr, eth->irq[0]);
4859 	}
4860 
4861 	/* we run 2 devices on the same DMA ring so we need a dummy device
4862 	 * for NAPI to work
4863 	 */
4864 	init_dummy_netdev(&eth->dummy_dev);
4865 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4866 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4867 
4868 	platform_set_drvdata(pdev, eth);
4869 	schedule_delayed_work(&eth->reset.monitor_work,
4870 			      MTK_DMA_MONITOR_TIMEOUT);
4871 
4872 	return 0;
4873 
4874 err_deinit_ppe:
4875 	mtk_ppe_deinit(eth);
4876 	mtk_mdio_cleanup(eth);
4877 err_free_dev:
4878 	mtk_free_dev(eth);
4879 err_deinit_hw:
4880 	mtk_hw_deinit(eth);
4881 err_wed_exit:
4882 	mtk_wed_exit();
4883 err_destroy_sgmii:
4884 	mtk_sgmii_destroy(eth);
4885 
4886 	return err;
4887 }
4888 
4889 static int mtk_remove(struct platform_device *pdev)
4890 {
4891 	struct mtk_eth *eth = platform_get_drvdata(pdev);
4892 	struct mtk_mac *mac;
4893 	int i;
4894 
4895 	/* stop all devices to make sure that dma is properly shut down */
4896 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4897 		if (!eth->netdev[i])
4898 			continue;
4899 		mtk_stop(eth->netdev[i]);
4900 		mac = netdev_priv(eth->netdev[i]);
4901 		phylink_disconnect_phy(mac->phylink);
4902 	}
4903 
4904 	mtk_wed_exit();
4905 	mtk_hw_deinit(eth);
4906 
4907 	netif_napi_del(&eth->tx_napi);
4908 	netif_napi_del(&eth->rx_napi);
4909 	mtk_cleanup(eth);
4910 	mtk_mdio_cleanup(eth);
4911 
4912 	return 0;
4913 }
4914 
4915 static const struct mtk_soc_data mt2701_data = {
4916 	.reg_map = &mtk_reg_map,
4917 	.caps = MT7623_CAPS | MTK_HWLRO,
4918 	.hw_features = MTK_HW_FEATURES,
4919 	.required_clks = MT7623_CLKS_BITMAP,
4920 	.required_pctl = true,
4921 	.version = 1,
4922 	.txrx = {
4923 		.txd_size = sizeof(struct mtk_tx_dma),
4924 		.rxd_size = sizeof(struct mtk_rx_dma),
4925 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4926 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4927 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4928 		.dma_len_offset = 16,
4929 	},
4930 };
4931 
4932 static const struct mtk_soc_data mt7621_data = {
4933 	.reg_map = &mtk_reg_map,
4934 	.caps = MT7621_CAPS,
4935 	.hw_features = MTK_HW_FEATURES,
4936 	.required_clks = MT7621_CLKS_BITMAP,
4937 	.required_pctl = false,
4938 	.version = 1,
4939 	.offload_version = 1,
4940 	.hash_offset = 2,
4941 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
4942 	.txrx = {
4943 		.txd_size = sizeof(struct mtk_tx_dma),
4944 		.rxd_size = sizeof(struct mtk_rx_dma),
4945 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4946 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4947 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4948 		.dma_len_offset = 16,
4949 	},
4950 };
4951 
4952 static const struct mtk_soc_data mt7622_data = {
4953 	.reg_map = &mtk_reg_map,
4954 	.ana_rgc3 = 0x2028,
4955 	.caps = MT7622_CAPS | MTK_HWLRO,
4956 	.hw_features = MTK_HW_FEATURES,
4957 	.required_clks = MT7622_CLKS_BITMAP,
4958 	.required_pctl = false,
4959 	.version = 1,
4960 	.offload_version = 2,
4961 	.hash_offset = 2,
4962 	.has_accounting = true,
4963 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
4964 	.txrx = {
4965 		.txd_size = sizeof(struct mtk_tx_dma),
4966 		.rxd_size = sizeof(struct mtk_rx_dma),
4967 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4968 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4969 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4970 		.dma_len_offset = 16,
4971 	},
4972 };
4973 
4974 static const struct mtk_soc_data mt7623_data = {
4975 	.reg_map = &mtk_reg_map,
4976 	.caps = MT7623_CAPS | MTK_HWLRO,
4977 	.hw_features = MTK_HW_FEATURES,
4978 	.required_clks = MT7623_CLKS_BITMAP,
4979 	.required_pctl = true,
4980 	.version = 1,
4981 	.offload_version = 1,
4982 	.hash_offset = 2,
4983 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
4984 	.disable_pll_modes = true,
4985 	.txrx = {
4986 		.txd_size = sizeof(struct mtk_tx_dma),
4987 		.rxd_size = sizeof(struct mtk_rx_dma),
4988 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4989 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4990 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4991 		.dma_len_offset = 16,
4992 	},
4993 };
4994 
4995 static const struct mtk_soc_data mt7629_data = {
4996 	.reg_map = &mtk_reg_map,
4997 	.ana_rgc3 = 0x128,
4998 	.caps = MT7629_CAPS | MTK_HWLRO,
4999 	.hw_features = MTK_HW_FEATURES,
5000 	.required_clks = MT7629_CLKS_BITMAP,
5001 	.required_pctl = false,
5002 	.has_accounting = true,
5003 	.version = 1,
5004 	.txrx = {
5005 		.txd_size = sizeof(struct mtk_tx_dma),
5006 		.rxd_size = sizeof(struct mtk_rx_dma),
5007 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5008 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5009 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5010 		.dma_len_offset = 16,
5011 	},
5012 };
5013 
5014 static const struct mtk_soc_data mt7981_data = {
5015 	.reg_map = &mt7986_reg_map,
5016 	.ana_rgc3 = 0x128,
5017 	.caps = MT7981_CAPS,
5018 	.hw_features = MTK_HW_FEATURES,
5019 	.required_clks = MT7981_CLKS_BITMAP,
5020 	.required_pctl = false,
5021 	.version = 2,
5022 	.offload_version = 2,
5023 	.hash_offset = 4,
5024 	.has_accounting = true,
5025 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5026 	.txrx = {
5027 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5028 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5029 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5030 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5031 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5032 		.dma_len_offset = 8,
5033 	},
5034 };
5035 
5036 static const struct mtk_soc_data mt7986_data = {
5037 	.reg_map = &mt7986_reg_map,
5038 	.ana_rgc3 = 0x128,
5039 	.caps = MT7986_CAPS,
5040 	.hw_features = MTK_HW_FEATURES,
5041 	.required_clks = MT7986_CLKS_BITMAP,
5042 	.required_pctl = false,
5043 	.version = 2,
5044 	.offload_version = 2,
5045 	.hash_offset = 4,
5046 	.has_accounting = true,
5047 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5048 	.txrx = {
5049 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5050 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5051 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5052 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5053 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5054 		.dma_len_offset = 8,
5055 	},
5056 };
5057 
5058 static const struct mtk_soc_data mt7988_data = {
5059 	.reg_map = &mt7988_reg_map,
5060 	.ana_rgc3 = 0x128,
5061 	.caps = MT7988_CAPS,
5062 	.hw_features = MTK_HW_FEATURES,
5063 	.required_clks = MT7988_CLKS_BITMAP,
5064 	.required_pctl = false,
5065 	.version = 3,
5066 	.offload_version = 2,
5067 	.hash_offset = 4,
5068 	.has_accounting = true,
5069 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5070 	.txrx = {
5071 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5072 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5073 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5074 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5075 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5076 		.dma_len_offset = 8,
5077 	},
5078 };
5079 
5080 static const struct mtk_soc_data rt5350_data = {
5081 	.reg_map = &mt7628_reg_map,
5082 	.caps = MT7628_CAPS,
5083 	.hw_features = MTK_HW_FEATURES_MT7628,
5084 	.required_clks = MT7628_CLKS_BITMAP,
5085 	.required_pctl = false,
5086 	.version = 1,
5087 	.txrx = {
5088 		.txd_size = sizeof(struct mtk_tx_dma),
5089 		.rxd_size = sizeof(struct mtk_rx_dma),
5090 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5091 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5092 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5093 		.dma_len_offset = 16,
5094 	},
5095 };
5096 
5097 const struct of_device_id of_mtk_match[] = {
5098 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5099 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5100 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5101 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5102 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5103 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5104 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5105 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5106 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5107 	{},
5108 };
5109 MODULE_DEVICE_TABLE(of, of_mtk_match);
5110 
5111 static struct platform_driver mtk_driver = {
5112 	.probe = mtk_probe,
5113 	.remove = mtk_remove,
5114 	.driver = {
5115 		.name = "mtk_soc_eth",
5116 		.of_match_table = of_mtk_match,
5117 	},
5118 };
5119 
5120 module_platform_driver(mtk_driver);
5121 
5122 MODULE_LICENSE("GPL");
5123 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5124 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5125