1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
25 #include <net/dsa.h>
26 #include <net/dst_metadata.h>
27 
28 #include "mtk_eth_soc.h"
29 #include "mtk_wed.h"
30 
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
40 				  sizeof(u64) }
41 
42 static const struct mtk_reg_map mtk_reg_map = {
43 	.tx_irq_mask		= 0x1a1c,
44 	.tx_irq_status		= 0x1a18,
45 	.pdma = {
46 		.rx_ptr		= 0x0900,
47 		.rx_cnt_cfg	= 0x0904,
48 		.pcrx_ptr	= 0x0908,
49 		.glo_cfg	= 0x0a04,
50 		.rst_idx	= 0x0a08,
51 		.delay_irq	= 0x0a0c,
52 		.irq_status	= 0x0a20,
53 		.irq_mask	= 0x0a28,
54 		.int_grp	= 0x0a50,
55 	},
56 	.qdma = {
57 		.qtx_cfg	= 0x1800,
58 		.qtx_sch	= 0x1804,
59 		.rx_ptr		= 0x1900,
60 		.rx_cnt_cfg	= 0x1904,
61 		.qcrx_ptr	= 0x1908,
62 		.glo_cfg	= 0x1a04,
63 		.rst_idx	= 0x1a08,
64 		.delay_irq	= 0x1a0c,
65 		.fc_th		= 0x1a10,
66 		.tx_sch_rate	= 0x1a14,
67 		.int_grp	= 0x1a20,
68 		.hred		= 0x1a44,
69 		.ctx_ptr	= 0x1b00,
70 		.dtx_ptr	= 0x1b04,
71 		.crx_ptr	= 0x1b10,
72 		.drx_ptr	= 0x1b14,
73 		.fq_head	= 0x1b20,
74 		.fq_tail	= 0x1b24,
75 		.fq_count	= 0x1b28,
76 		.fq_blen	= 0x1b2c,
77 	},
78 	.gdm1_cnt		= 0x2400,
79 	.gdma_to_ppe		= 0x4444,
80 	.ppe_base		= 0x0c00,
81 	.wdma_base = {
82 		[0]		= 0x2800,
83 		[1]		= 0x2c00,
84 	},
85 };
86 
87 static const struct mtk_reg_map mt7628_reg_map = {
88 	.tx_irq_mask		= 0x0a28,
89 	.tx_irq_status		= 0x0a20,
90 	.pdma = {
91 		.rx_ptr		= 0x0900,
92 		.rx_cnt_cfg	= 0x0904,
93 		.pcrx_ptr	= 0x0908,
94 		.glo_cfg	= 0x0a04,
95 		.rst_idx	= 0x0a08,
96 		.delay_irq	= 0x0a0c,
97 		.irq_status	= 0x0a20,
98 		.irq_mask	= 0x0a28,
99 		.int_grp	= 0x0a50,
100 	},
101 };
102 
103 static const struct mtk_reg_map mt7986_reg_map = {
104 	.tx_irq_mask		= 0x461c,
105 	.tx_irq_status		= 0x4618,
106 	.pdma = {
107 		.rx_ptr		= 0x6100,
108 		.rx_cnt_cfg	= 0x6104,
109 		.pcrx_ptr	= 0x6108,
110 		.glo_cfg	= 0x6204,
111 		.rst_idx	= 0x6208,
112 		.delay_irq	= 0x620c,
113 		.irq_status	= 0x6220,
114 		.irq_mask	= 0x6228,
115 		.int_grp	= 0x6250,
116 	},
117 	.qdma = {
118 		.qtx_cfg	= 0x4400,
119 		.qtx_sch	= 0x4404,
120 		.rx_ptr		= 0x4500,
121 		.rx_cnt_cfg	= 0x4504,
122 		.qcrx_ptr	= 0x4508,
123 		.glo_cfg	= 0x4604,
124 		.rst_idx	= 0x4608,
125 		.delay_irq	= 0x460c,
126 		.fc_th		= 0x4610,
127 		.int_grp	= 0x4620,
128 		.hred		= 0x4644,
129 		.ctx_ptr	= 0x4700,
130 		.dtx_ptr	= 0x4704,
131 		.crx_ptr	= 0x4710,
132 		.drx_ptr	= 0x4714,
133 		.fq_head	= 0x4720,
134 		.fq_tail	= 0x4724,
135 		.fq_count	= 0x4728,
136 		.fq_blen	= 0x472c,
137 		.tx_sch_rate	= 0x4798,
138 	},
139 	.gdm1_cnt		= 0x1c00,
140 	.gdma_to_ppe		= 0x3333,
141 	.ppe_base		= 0x2000,
142 	.wdma_base = {
143 		[0]		= 0x4800,
144 		[1]		= 0x4c00,
145 	},
146 };
147 
148 /* strings used by ethtool */
149 static const struct mtk_ethtool_stats {
150 	char str[ETH_GSTRING_LEN];
151 	u32 offset;
152 } mtk_ethtool_stats[] = {
153 	MTK_ETHTOOL_STAT(tx_bytes),
154 	MTK_ETHTOOL_STAT(tx_packets),
155 	MTK_ETHTOOL_STAT(tx_skip),
156 	MTK_ETHTOOL_STAT(tx_collisions),
157 	MTK_ETHTOOL_STAT(rx_bytes),
158 	MTK_ETHTOOL_STAT(rx_packets),
159 	MTK_ETHTOOL_STAT(rx_overflow),
160 	MTK_ETHTOOL_STAT(rx_fcs_errors),
161 	MTK_ETHTOOL_STAT(rx_short_errors),
162 	MTK_ETHTOOL_STAT(rx_long_errors),
163 	MTK_ETHTOOL_STAT(rx_checksum_errors),
164 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
165 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
166 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
167 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
168 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
169 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
170 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
171 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
172 };
173 
174 static const char * const mtk_clks_source_name[] = {
175 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
176 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
177 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
178 	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
179 };
180 
181 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
182 {
183 	__raw_writel(val, eth->base + reg);
184 }
185 
186 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
187 {
188 	return __raw_readl(eth->base + reg);
189 }
190 
191 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
192 {
193 	u32 val;
194 
195 	val = mtk_r32(eth, reg);
196 	val &= ~mask;
197 	val |= set;
198 	mtk_w32(eth, val, reg);
199 	return reg;
200 }
201 
202 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
203 {
204 	unsigned long t_start = jiffies;
205 
206 	while (1) {
207 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
208 			return 0;
209 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
210 			break;
211 		cond_resched();
212 	}
213 
214 	dev_err(eth->dev, "mdio: MDIO timeout\n");
215 	return -ETIMEDOUT;
216 }
217 
218 static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
219 			   u32 write_data)
220 {
221 	int ret;
222 
223 	ret = mtk_mdio_busy_wait(eth);
224 	if (ret < 0)
225 		return ret;
226 
227 	if (phy_reg & MII_ADDR_C45) {
228 		mtk_w32(eth, PHY_IAC_ACCESS |
229 			     PHY_IAC_START_C45 |
230 			     PHY_IAC_CMD_C45_ADDR |
231 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
232 			     PHY_IAC_ADDR(phy_addr) |
233 			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
234 			MTK_PHY_IAC);
235 
236 		ret = mtk_mdio_busy_wait(eth);
237 		if (ret < 0)
238 			return ret;
239 
240 		mtk_w32(eth, PHY_IAC_ACCESS |
241 			     PHY_IAC_START_C45 |
242 			     PHY_IAC_CMD_WRITE |
243 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
244 			     PHY_IAC_ADDR(phy_addr) |
245 			     PHY_IAC_DATA(write_data),
246 			MTK_PHY_IAC);
247 	} else {
248 		mtk_w32(eth, PHY_IAC_ACCESS |
249 			     PHY_IAC_START_C22 |
250 			     PHY_IAC_CMD_WRITE |
251 			     PHY_IAC_REG(phy_reg) |
252 			     PHY_IAC_ADDR(phy_addr) |
253 			     PHY_IAC_DATA(write_data),
254 			MTK_PHY_IAC);
255 	}
256 
257 	ret = mtk_mdio_busy_wait(eth);
258 	if (ret < 0)
259 		return ret;
260 
261 	return 0;
262 }
263 
264 static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
265 {
266 	int ret;
267 
268 	ret = mtk_mdio_busy_wait(eth);
269 	if (ret < 0)
270 		return ret;
271 
272 	if (phy_reg & MII_ADDR_C45) {
273 		mtk_w32(eth, PHY_IAC_ACCESS |
274 			     PHY_IAC_START_C45 |
275 			     PHY_IAC_CMD_C45_ADDR |
276 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
277 			     PHY_IAC_ADDR(phy_addr) |
278 			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
279 			MTK_PHY_IAC);
280 
281 		ret = mtk_mdio_busy_wait(eth);
282 		if (ret < 0)
283 			return ret;
284 
285 		mtk_w32(eth, PHY_IAC_ACCESS |
286 			     PHY_IAC_START_C45 |
287 			     PHY_IAC_CMD_C45_READ |
288 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
289 			     PHY_IAC_ADDR(phy_addr),
290 			MTK_PHY_IAC);
291 	} else {
292 		mtk_w32(eth, PHY_IAC_ACCESS |
293 			     PHY_IAC_START_C22 |
294 			     PHY_IAC_CMD_C22_READ |
295 			     PHY_IAC_REG(phy_reg) |
296 			     PHY_IAC_ADDR(phy_addr),
297 			MTK_PHY_IAC);
298 	}
299 
300 	ret = mtk_mdio_busy_wait(eth);
301 	if (ret < 0)
302 		return ret;
303 
304 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
305 }
306 
307 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
308 			  int phy_reg, u16 val)
309 {
310 	struct mtk_eth *eth = bus->priv;
311 
312 	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
313 }
314 
315 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
316 {
317 	struct mtk_eth *eth = bus->priv;
318 
319 	return _mtk_mdio_read(eth, phy_addr, phy_reg);
320 }
321 
322 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
323 				     phy_interface_t interface)
324 {
325 	u32 val;
326 
327 	/* Check DDR memory type.
328 	 * Currently TRGMII mode with DDR2 memory is not supported.
329 	 */
330 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
331 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
332 	    val & SYSCFG_DRAM_TYPE_DDR2) {
333 		dev_err(eth->dev,
334 			"TRGMII mode with DDR2 memory is not supported!\n");
335 		return -EOPNOTSUPP;
336 	}
337 
338 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
339 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
340 
341 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
342 			   ETHSYS_TRGMII_MT7621_MASK, val);
343 
344 	return 0;
345 }
346 
347 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
348 				   phy_interface_t interface, int speed)
349 {
350 	u32 val;
351 	int ret;
352 
353 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
354 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
355 		val = 500000000;
356 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
357 		if (ret)
358 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
359 		return;
360 	}
361 
362 	val = (speed == SPEED_1000) ?
363 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
364 	mtk_w32(eth, val, INTF_MODE);
365 
366 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
367 			   ETHSYS_TRGMII_CLK_SEL362_5,
368 			   ETHSYS_TRGMII_CLK_SEL362_5);
369 
370 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
371 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
372 	if (ret)
373 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
374 
375 	val = (speed == SPEED_1000) ?
376 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
377 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
378 
379 	val = (speed == SPEED_1000) ?
380 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
381 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
382 }
383 
384 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
385 					      phy_interface_t interface)
386 {
387 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
388 					   phylink_config);
389 	struct mtk_eth *eth = mac->hw;
390 	unsigned int sid;
391 
392 	if (interface == PHY_INTERFACE_MODE_SGMII ||
393 	    phy_interface_mode_is_8023z(interface)) {
394 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
395 		       0 : mac->id;
396 
397 		return mtk_sgmii_select_pcs(eth->sgmii, sid);
398 	}
399 
400 	return NULL;
401 }
402 
403 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
404 			   const struct phylink_link_state *state)
405 {
406 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
407 					   phylink_config);
408 	struct mtk_eth *eth = mac->hw;
409 	int val, ge_mode, err = 0;
410 	u32 i;
411 
412 	/* MT76x8 has no hardware settings between for the MAC */
413 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
414 	    mac->interface != state->interface) {
415 		/* Setup soc pin functions */
416 		switch (state->interface) {
417 		case PHY_INTERFACE_MODE_TRGMII:
418 			if (mac->id)
419 				goto err_phy;
420 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
421 					  MTK_GMAC1_TRGMII))
422 				goto err_phy;
423 			fallthrough;
424 		case PHY_INTERFACE_MODE_RGMII_TXID:
425 		case PHY_INTERFACE_MODE_RGMII_RXID:
426 		case PHY_INTERFACE_MODE_RGMII_ID:
427 		case PHY_INTERFACE_MODE_RGMII:
428 		case PHY_INTERFACE_MODE_MII:
429 		case PHY_INTERFACE_MODE_REVMII:
430 		case PHY_INTERFACE_MODE_RMII:
431 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
432 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
433 				if (err)
434 					goto init_err;
435 			}
436 			break;
437 		case PHY_INTERFACE_MODE_1000BASEX:
438 		case PHY_INTERFACE_MODE_2500BASEX:
439 		case PHY_INTERFACE_MODE_SGMII:
440 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
441 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
442 				if (err)
443 					goto init_err;
444 			}
445 			break;
446 		case PHY_INTERFACE_MODE_GMII:
447 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
448 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
449 				if (err)
450 					goto init_err;
451 			}
452 			break;
453 		default:
454 			goto err_phy;
455 		}
456 
457 		/* Setup clock for 1st gmac */
458 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
459 		    !phy_interface_mode_is_8023z(state->interface) &&
460 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
461 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
462 					 MTK_TRGMII_MT7621_CLK)) {
463 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
464 							      state->interface))
465 					goto err_phy;
466 			} else {
467 				/* FIXME: this is incorrect. Not only does it
468 				 * use state->speed (which is not guaranteed
469 				 * to be correct) but it also makes use of it
470 				 * in a code path that will only be reachable
471 				 * when the PHY interface mode changes, not
472 				 * when the speed changes. Consequently, RGMII
473 				 * is probably broken.
474 				 */
475 				mtk_gmac0_rgmii_adjust(mac->hw,
476 						       state->interface,
477 						       state->speed);
478 
479 				/* mt7623_pad_clk_setup */
480 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
481 					mtk_w32(mac->hw,
482 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
483 						TRGMII_TD_ODT(i));
484 
485 				/* Assert/release MT7623 RXC reset */
486 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
487 					TRGMII_RCK_CTRL);
488 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
489 			}
490 		}
491 
492 		ge_mode = 0;
493 		switch (state->interface) {
494 		case PHY_INTERFACE_MODE_MII:
495 		case PHY_INTERFACE_MODE_GMII:
496 			ge_mode = 1;
497 			break;
498 		case PHY_INTERFACE_MODE_REVMII:
499 			ge_mode = 2;
500 			break;
501 		case PHY_INTERFACE_MODE_RMII:
502 			if (mac->id)
503 				goto err_phy;
504 			ge_mode = 3;
505 			break;
506 		default:
507 			break;
508 		}
509 
510 		/* put the gmac into the right mode */
511 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
512 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
513 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
514 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
515 
516 		mac->interface = state->interface;
517 	}
518 
519 	/* SGMII */
520 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
521 	    phy_interface_mode_is_8023z(state->interface)) {
522 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
523 		 * being setup done.
524 		 */
525 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
526 
527 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
528 				   SYSCFG0_SGMII_MASK,
529 				   ~(u32)SYSCFG0_SGMII_MASK);
530 
531 		/* Save the syscfg0 value for mac_finish */
532 		mac->syscfg0 = val;
533 	} else if (phylink_autoneg_inband(mode)) {
534 		dev_err(eth->dev,
535 			"In-band mode not supported in non SGMII mode!\n");
536 		return;
537 	}
538 
539 	return;
540 
541 err_phy:
542 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
543 		mac->id, phy_modes(state->interface));
544 	return;
545 
546 init_err:
547 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
548 		mac->id, phy_modes(state->interface), err);
549 }
550 
551 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
552 			  phy_interface_t interface)
553 {
554 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
555 					   phylink_config);
556 	struct mtk_eth *eth = mac->hw;
557 	u32 mcr_cur, mcr_new;
558 
559 	/* Enable SGMII */
560 	if (interface == PHY_INTERFACE_MODE_SGMII ||
561 	    phy_interface_mode_is_8023z(interface))
562 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
563 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
564 
565 	/* Setup gmac */
566 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
567 	mcr_new = mcr_cur;
568 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
569 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
570 
571 	/* Only update control register when needed! */
572 	if (mcr_new != mcr_cur)
573 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
574 
575 	return 0;
576 }
577 
578 static void mtk_mac_pcs_get_state(struct phylink_config *config,
579 				  struct phylink_link_state *state)
580 {
581 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
582 					   phylink_config);
583 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
584 
585 	state->link = (pmsr & MAC_MSR_LINK);
586 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
587 
588 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
589 	case 0:
590 		state->speed = SPEED_10;
591 		break;
592 	case MAC_MSR_SPEED_100:
593 		state->speed = SPEED_100;
594 		break;
595 	case MAC_MSR_SPEED_1000:
596 		state->speed = SPEED_1000;
597 		break;
598 	default:
599 		state->speed = SPEED_UNKNOWN;
600 		break;
601 	}
602 
603 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
604 	if (pmsr & MAC_MSR_RX_FC)
605 		state->pause |= MLO_PAUSE_RX;
606 	if (pmsr & MAC_MSR_TX_FC)
607 		state->pause |= MLO_PAUSE_TX;
608 }
609 
610 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
611 			      phy_interface_t interface)
612 {
613 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
614 					   phylink_config);
615 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
616 
617 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
618 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
619 }
620 
621 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
622 				int speed)
623 {
624 	const struct mtk_soc_data *soc = eth->soc;
625 	u32 ofs, val;
626 
627 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
628 		return;
629 
630 	val = MTK_QTX_SCH_MIN_RATE_EN |
631 	      /* minimum: 10 Mbps */
632 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
633 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
634 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
635 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
636 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
637 
638 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
639 		switch (speed) {
640 		case SPEED_10:
641 			val |= MTK_QTX_SCH_MAX_RATE_EN |
642 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
643 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
644 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
645 			break;
646 		case SPEED_100:
647 			val |= MTK_QTX_SCH_MAX_RATE_EN |
648 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
649 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
650 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
651 			break;
652 		case SPEED_1000:
653 			val |= MTK_QTX_SCH_MAX_RATE_EN |
654 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
655 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
656 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
657 			break;
658 		default:
659 			break;
660 		}
661 	} else {
662 		switch (speed) {
663 		case SPEED_10:
664 			val |= MTK_QTX_SCH_MAX_RATE_EN |
665 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
666 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
667 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
668 			break;
669 		case SPEED_100:
670 			val |= MTK_QTX_SCH_MAX_RATE_EN |
671 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
672 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
673 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
674 			break;
675 		case SPEED_1000:
676 			val |= MTK_QTX_SCH_MAX_RATE_EN |
677 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
678 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
679 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
680 			break;
681 		default:
682 			break;
683 		}
684 	}
685 
686 	ofs = MTK_QTX_OFFSET * idx;
687 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
688 }
689 
690 static void mtk_mac_link_up(struct phylink_config *config,
691 			    struct phy_device *phy,
692 			    unsigned int mode, phy_interface_t interface,
693 			    int speed, int duplex, bool tx_pause, bool rx_pause)
694 {
695 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
696 					   phylink_config);
697 	u32 mcr;
698 
699 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
700 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
701 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
702 		 MAC_MCR_FORCE_RX_FC);
703 
704 	/* Configure speed */
705 	switch (speed) {
706 	case SPEED_2500:
707 	case SPEED_1000:
708 		mcr |= MAC_MCR_SPEED_1000;
709 		break;
710 	case SPEED_100:
711 		mcr |= MAC_MCR_SPEED_100;
712 		break;
713 	}
714 
715 	mtk_set_queue_speed(mac->hw, mac->id, speed);
716 
717 	/* Configure duplex */
718 	if (duplex == DUPLEX_FULL)
719 		mcr |= MAC_MCR_FORCE_DPX;
720 
721 	/* Configure pause modes - phylink will avoid these for half duplex */
722 	if (tx_pause)
723 		mcr |= MAC_MCR_FORCE_TX_FC;
724 	if (rx_pause)
725 		mcr |= MAC_MCR_FORCE_RX_FC;
726 
727 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
728 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
729 }
730 
731 static const struct phylink_mac_ops mtk_phylink_ops = {
732 	.mac_select_pcs = mtk_mac_select_pcs,
733 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
734 	.mac_config = mtk_mac_config,
735 	.mac_finish = mtk_mac_finish,
736 	.mac_link_down = mtk_mac_link_down,
737 	.mac_link_up = mtk_mac_link_up,
738 };
739 
740 static int mtk_mdio_init(struct mtk_eth *eth)
741 {
742 	struct device_node *mii_np;
743 	int ret;
744 
745 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
746 	if (!mii_np) {
747 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
748 		return -ENODEV;
749 	}
750 
751 	if (!of_device_is_available(mii_np)) {
752 		ret = -ENODEV;
753 		goto err_put_node;
754 	}
755 
756 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
757 	if (!eth->mii_bus) {
758 		ret = -ENOMEM;
759 		goto err_put_node;
760 	}
761 
762 	eth->mii_bus->name = "mdio";
763 	eth->mii_bus->read = mtk_mdio_read;
764 	eth->mii_bus->write = mtk_mdio_write;
765 	eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
766 	eth->mii_bus->priv = eth;
767 	eth->mii_bus->parent = eth->dev;
768 
769 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
770 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
771 
772 err_put_node:
773 	of_node_put(mii_np);
774 	return ret;
775 }
776 
777 static void mtk_mdio_cleanup(struct mtk_eth *eth)
778 {
779 	if (!eth->mii_bus)
780 		return;
781 
782 	mdiobus_unregister(eth->mii_bus);
783 }
784 
785 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
786 {
787 	unsigned long flags;
788 	u32 val;
789 
790 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
791 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
792 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
793 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
794 }
795 
796 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
797 {
798 	unsigned long flags;
799 	u32 val;
800 
801 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
802 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
803 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
804 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
805 }
806 
807 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
808 {
809 	unsigned long flags;
810 	u32 val;
811 
812 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
813 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
814 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
815 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
816 }
817 
818 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
819 {
820 	unsigned long flags;
821 	u32 val;
822 
823 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
824 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
825 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
826 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
827 }
828 
829 static int mtk_set_mac_address(struct net_device *dev, void *p)
830 {
831 	int ret = eth_mac_addr(dev, p);
832 	struct mtk_mac *mac = netdev_priv(dev);
833 	struct mtk_eth *eth = mac->hw;
834 	const char *macaddr = dev->dev_addr;
835 
836 	if (ret)
837 		return ret;
838 
839 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
840 		return -EBUSY;
841 
842 	spin_lock_bh(&mac->hw->page_lock);
843 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
844 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
845 			MT7628_SDM_MAC_ADRH);
846 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
847 			(macaddr[4] << 8) | macaddr[5],
848 			MT7628_SDM_MAC_ADRL);
849 	} else {
850 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
851 			MTK_GDMA_MAC_ADRH(mac->id));
852 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
853 			(macaddr[4] << 8) | macaddr[5],
854 			MTK_GDMA_MAC_ADRL(mac->id));
855 	}
856 	spin_unlock_bh(&mac->hw->page_lock);
857 
858 	return 0;
859 }
860 
861 void mtk_stats_update_mac(struct mtk_mac *mac)
862 {
863 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
864 	struct mtk_eth *eth = mac->hw;
865 
866 	u64_stats_update_begin(&hw_stats->syncp);
867 
868 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
869 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
870 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
871 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
872 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
873 		hw_stats->rx_checksum_errors +=
874 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
875 	} else {
876 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
877 		unsigned int offs = hw_stats->reg_offset;
878 		u64 stats;
879 
880 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
881 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
882 		if (stats)
883 			hw_stats->rx_bytes += (stats << 32);
884 		hw_stats->rx_packets +=
885 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
886 		hw_stats->rx_overflow +=
887 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
888 		hw_stats->rx_fcs_errors +=
889 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
890 		hw_stats->rx_short_errors +=
891 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
892 		hw_stats->rx_long_errors +=
893 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
894 		hw_stats->rx_checksum_errors +=
895 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
896 		hw_stats->rx_flow_control_packets +=
897 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
898 		hw_stats->tx_skip +=
899 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
900 		hw_stats->tx_collisions +=
901 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
902 		hw_stats->tx_bytes +=
903 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
904 		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
905 		if (stats)
906 			hw_stats->tx_bytes += (stats << 32);
907 		hw_stats->tx_packets +=
908 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
909 	}
910 
911 	u64_stats_update_end(&hw_stats->syncp);
912 }
913 
914 static void mtk_stats_update(struct mtk_eth *eth)
915 {
916 	int i;
917 
918 	for (i = 0; i < MTK_MAC_COUNT; i++) {
919 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
920 			continue;
921 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
922 			mtk_stats_update_mac(eth->mac[i]);
923 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
924 		}
925 	}
926 }
927 
928 static void mtk_get_stats64(struct net_device *dev,
929 			    struct rtnl_link_stats64 *storage)
930 {
931 	struct mtk_mac *mac = netdev_priv(dev);
932 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
933 	unsigned int start;
934 
935 	if (netif_running(dev) && netif_device_present(dev)) {
936 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
937 			mtk_stats_update_mac(mac);
938 			spin_unlock_bh(&hw_stats->stats_lock);
939 		}
940 	}
941 
942 	do {
943 		start = u64_stats_fetch_begin(&hw_stats->syncp);
944 		storage->rx_packets = hw_stats->rx_packets;
945 		storage->tx_packets = hw_stats->tx_packets;
946 		storage->rx_bytes = hw_stats->rx_bytes;
947 		storage->tx_bytes = hw_stats->tx_bytes;
948 		storage->collisions = hw_stats->tx_collisions;
949 		storage->rx_length_errors = hw_stats->rx_short_errors +
950 			hw_stats->rx_long_errors;
951 		storage->rx_over_errors = hw_stats->rx_overflow;
952 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
953 		storage->rx_errors = hw_stats->rx_checksum_errors;
954 		storage->tx_aborted_errors = hw_stats->tx_skip;
955 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
956 
957 	storage->tx_errors = dev->stats.tx_errors;
958 	storage->rx_dropped = dev->stats.rx_dropped;
959 	storage->tx_dropped = dev->stats.tx_dropped;
960 }
961 
962 static inline int mtk_max_frag_size(int mtu)
963 {
964 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
965 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
966 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
967 
968 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
969 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
970 }
971 
972 static inline int mtk_max_buf_size(int frag_size)
973 {
974 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
975 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
976 
977 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
978 
979 	return buf_size;
980 }
981 
982 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
983 			    struct mtk_rx_dma_v2 *dma_rxd)
984 {
985 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
986 	if (!(rxd->rxd2 & RX_DMA_DONE))
987 		return false;
988 
989 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
990 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
991 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
992 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
993 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
994 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
995 	}
996 
997 	return true;
998 }
999 
1000 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1001 {
1002 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1003 	unsigned long data;
1004 
1005 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1006 				get_order(size));
1007 
1008 	return (void *)data;
1009 }
1010 
1011 /* the qdma core needs scratch memory to be setup */
1012 static int mtk_init_fq_dma(struct mtk_eth *eth)
1013 {
1014 	const struct mtk_soc_data *soc = eth->soc;
1015 	dma_addr_t phy_ring_tail;
1016 	int cnt = MTK_QDMA_RING_SIZE;
1017 	dma_addr_t dma_addr;
1018 	int i;
1019 
1020 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1021 					       cnt * soc->txrx.txd_size,
1022 					       &eth->phy_scratch_ring,
1023 					       GFP_KERNEL);
1024 	if (unlikely(!eth->scratch_ring))
1025 		return -ENOMEM;
1026 
1027 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1028 	if (unlikely(!eth->scratch_head))
1029 		return -ENOMEM;
1030 
1031 	dma_addr = dma_map_single(eth->dma_dev,
1032 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1033 				  DMA_FROM_DEVICE);
1034 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1035 		return -ENOMEM;
1036 
1037 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1038 
1039 	for (i = 0; i < cnt; i++) {
1040 		struct mtk_tx_dma_v2 *txd;
1041 
1042 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1043 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1044 		if (i < cnt - 1)
1045 			txd->txd2 = eth->phy_scratch_ring +
1046 				    (i + 1) * soc->txrx.txd_size;
1047 
1048 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1049 		txd->txd4 = 0;
1050 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1051 			txd->txd5 = 0;
1052 			txd->txd6 = 0;
1053 			txd->txd7 = 0;
1054 			txd->txd8 = 0;
1055 		}
1056 	}
1057 
1058 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1059 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1060 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1061 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1062 
1063 	return 0;
1064 }
1065 
1066 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1067 {
1068 	return ring->dma + (desc - ring->phys);
1069 }
1070 
1071 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1072 					     void *txd, u32 txd_size)
1073 {
1074 	int idx = (txd - ring->dma) / txd_size;
1075 
1076 	return &ring->buf[idx];
1077 }
1078 
1079 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1080 				       struct mtk_tx_dma *dma)
1081 {
1082 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1083 }
1084 
1085 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1086 {
1087 	return (dma - ring->dma) / txd_size;
1088 }
1089 
1090 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1091 			 struct xdp_frame_bulk *bq, bool napi)
1092 {
1093 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1094 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1095 			dma_unmap_single(eth->dma_dev,
1096 					 dma_unmap_addr(tx_buf, dma_addr0),
1097 					 dma_unmap_len(tx_buf, dma_len0),
1098 					 DMA_TO_DEVICE);
1099 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1100 			dma_unmap_page(eth->dma_dev,
1101 				       dma_unmap_addr(tx_buf, dma_addr0),
1102 				       dma_unmap_len(tx_buf, dma_len0),
1103 				       DMA_TO_DEVICE);
1104 		}
1105 	} else {
1106 		if (dma_unmap_len(tx_buf, dma_len0)) {
1107 			dma_unmap_page(eth->dma_dev,
1108 				       dma_unmap_addr(tx_buf, dma_addr0),
1109 				       dma_unmap_len(tx_buf, dma_len0),
1110 				       DMA_TO_DEVICE);
1111 		}
1112 
1113 		if (dma_unmap_len(tx_buf, dma_len1)) {
1114 			dma_unmap_page(eth->dma_dev,
1115 				       dma_unmap_addr(tx_buf, dma_addr1),
1116 				       dma_unmap_len(tx_buf, dma_len1),
1117 				       DMA_TO_DEVICE);
1118 		}
1119 	}
1120 
1121 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1122 		if (tx_buf->type == MTK_TYPE_SKB) {
1123 			struct sk_buff *skb = tx_buf->data;
1124 
1125 			if (napi)
1126 				napi_consume_skb(skb, napi);
1127 			else
1128 				dev_kfree_skb_any(skb);
1129 		} else {
1130 			struct xdp_frame *xdpf = tx_buf->data;
1131 
1132 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1133 				xdp_return_frame_rx_napi(xdpf);
1134 			else if (bq)
1135 				xdp_return_frame_bulk(xdpf, bq);
1136 			else
1137 				xdp_return_frame(xdpf);
1138 		}
1139 	}
1140 	tx_buf->flags = 0;
1141 	tx_buf->data = NULL;
1142 }
1143 
1144 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1145 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1146 			 size_t size, int idx)
1147 {
1148 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1149 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1150 		dma_unmap_len_set(tx_buf, dma_len0, size);
1151 	} else {
1152 		if (idx & 1) {
1153 			txd->txd3 = mapped_addr;
1154 			txd->txd2 |= TX_DMA_PLEN1(size);
1155 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1156 			dma_unmap_len_set(tx_buf, dma_len1, size);
1157 		} else {
1158 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1159 			txd->txd1 = mapped_addr;
1160 			txd->txd2 = TX_DMA_PLEN0(size);
1161 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1162 			dma_unmap_len_set(tx_buf, dma_len0, size);
1163 		}
1164 	}
1165 }
1166 
1167 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1168 				   struct mtk_tx_dma_desc_info *info)
1169 {
1170 	struct mtk_mac *mac = netdev_priv(dev);
1171 	struct mtk_eth *eth = mac->hw;
1172 	struct mtk_tx_dma *desc = txd;
1173 	u32 data;
1174 
1175 	WRITE_ONCE(desc->txd1, info->addr);
1176 
1177 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1178 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1179 	if (info->last)
1180 		data |= TX_DMA_LS0;
1181 	WRITE_ONCE(desc->txd3, data);
1182 
1183 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1184 	if (info->first) {
1185 		if (info->gso)
1186 			data |= TX_DMA_TSO;
1187 		/* tx checksum offload */
1188 		if (info->csum)
1189 			data |= TX_DMA_CHKSUM;
1190 		/* vlan header offload */
1191 		if (info->vlan)
1192 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1193 	}
1194 	WRITE_ONCE(desc->txd4, data);
1195 }
1196 
1197 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1198 				   struct mtk_tx_dma_desc_info *info)
1199 {
1200 	struct mtk_mac *mac = netdev_priv(dev);
1201 	struct mtk_tx_dma_v2 *desc = txd;
1202 	struct mtk_eth *eth = mac->hw;
1203 	u32 data;
1204 
1205 	WRITE_ONCE(desc->txd1, info->addr);
1206 
1207 	data = TX_DMA_PLEN0(info->size);
1208 	if (info->last)
1209 		data |= TX_DMA_LS0;
1210 	WRITE_ONCE(desc->txd3, data);
1211 
1212 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1213 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1214 	WRITE_ONCE(desc->txd4, data);
1215 
1216 	data = 0;
1217 	if (info->first) {
1218 		if (info->gso)
1219 			data |= TX_DMA_TSO_V2;
1220 		/* tx checksum offload */
1221 		if (info->csum)
1222 			data |= TX_DMA_CHKSUM_V2;
1223 	}
1224 	WRITE_ONCE(desc->txd5, data);
1225 
1226 	data = 0;
1227 	if (info->first && info->vlan)
1228 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1229 	WRITE_ONCE(desc->txd6, data);
1230 
1231 	WRITE_ONCE(desc->txd7, 0);
1232 	WRITE_ONCE(desc->txd8, 0);
1233 }
1234 
1235 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1236 				struct mtk_tx_dma_desc_info *info)
1237 {
1238 	struct mtk_mac *mac = netdev_priv(dev);
1239 	struct mtk_eth *eth = mac->hw;
1240 
1241 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1242 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1243 	else
1244 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1245 }
1246 
1247 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1248 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1249 {
1250 	struct mtk_tx_dma_desc_info txd_info = {
1251 		.size = skb_headlen(skb),
1252 		.gso = gso,
1253 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1254 		.vlan = skb_vlan_tag_present(skb),
1255 		.qid = skb_get_queue_mapping(skb),
1256 		.vlan_tci = skb_vlan_tag_get(skb),
1257 		.first = true,
1258 		.last = !skb_is_nonlinear(skb),
1259 	};
1260 	struct netdev_queue *txq;
1261 	struct mtk_mac *mac = netdev_priv(dev);
1262 	struct mtk_eth *eth = mac->hw;
1263 	const struct mtk_soc_data *soc = eth->soc;
1264 	struct mtk_tx_dma *itxd, *txd;
1265 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1266 	struct mtk_tx_buf *itx_buf, *tx_buf;
1267 	int i, n_desc = 1;
1268 	int queue = skb_get_queue_mapping(skb);
1269 	int k = 0;
1270 
1271 	txq = netdev_get_tx_queue(dev, queue);
1272 	itxd = ring->next_free;
1273 	itxd_pdma = qdma_to_pdma(ring, itxd);
1274 	if (itxd == ring->last_free)
1275 		return -ENOMEM;
1276 
1277 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1278 	memset(itx_buf, 0, sizeof(*itx_buf));
1279 
1280 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1281 				       DMA_TO_DEVICE);
1282 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1283 		return -ENOMEM;
1284 
1285 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1286 
1287 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1288 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1289 			  MTK_TX_FLAGS_FPORT1;
1290 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1291 		     k++);
1292 
1293 	/* TX SG offload */
1294 	txd = itxd;
1295 	txd_pdma = qdma_to_pdma(ring, txd);
1296 
1297 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1298 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1299 		unsigned int offset = 0;
1300 		int frag_size = skb_frag_size(frag);
1301 
1302 		while (frag_size) {
1303 			bool new_desc = true;
1304 
1305 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1306 			    (i & 0x1)) {
1307 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1308 				txd_pdma = qdma_to_pdma(ring, txd);
1309 				if (txd == ring->last_free)
1310 					goto err_dma;
1311 
1312 				n_desc++;
1313 			} else {
1314 				new_desc = false;
1315 			}
1316 
1317 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1318 			txd_info.size = min_t(unsigned int, frag_size,
1319 					      soc->txrx.dma_max_len);
1320 			txd_info.qid = queue;
1321 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1322 					!(frag_size - txd_info.size);
1323 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1324 							 offset, txd_info.size,
1325 							 DMA_TO_DEVICE);
1326 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1327 				goto err_dma;
1328 
1329 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1330 
1331 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1332 						    soc->txrx.txd_size);
1333 			if (new_desc)
1334 				memset(tx_buf, 0, sizeof(*tx_buf));
1335 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1336 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1337 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1338 					 MTK_TX_FLAGS_FPORT1;
1339 
1340 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1341 				     txd_info.size, k++);
1342 
1343 			frag_size -= txd_info.size;
1344 			offset += txd_info.size;
1345 		}
1346 	}
1347 
1348 	/* store skb to cleanup */
1349 	itx_buf->type = MTK_TYPE_SKB;
1350 	itx_buf->data = skb;
1351 
1352 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1353 		if (k & 0x1)
1354 			txd_pdma->txd2 |= TX_DMA_LS0;
1355 		else
1356 			txd_pdma->txd2 |= TX_DMA_LS1;
1357 	}
1358 
1359 	netdev_tx_sent_queue(txq, skb->len);
1360 	skb_tx_timestamp(skb);
1361 
1362 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1363 	atomic_sub(n_desc, &ring->free_count);
1364 
1365 	/* make sure that all changes to the dma ring are flushed before we
1366 	 * continue
1367 	 */
1368 	wmb();
1369 
1370 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1371 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1372 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1373 	} else {
1374 		int next_idx;
1375 
1376 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1377 					 ring->dma_size);
1378 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1379 	}
1380 
1381 	return 0;
1382 
1383 err_dma:
1384 	do {
1385 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1386 
1387 		/* unmap dma */
1388 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1389 
1390 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1391 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1392 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1393 
1394 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1395 		itxd_pdma = qdma_to_pdma(ring, itxd);
1396 	} while (itxd != txd);
1397 
1398 	return -ENOMEM;
1399 }
1400 
1401 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1402 {
1403 	int i, nfrags = 1;
1404 	skb_frag_t *frag;
1405 
1406 	if (skb_is_gso(skb)) {
1407 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1408 			frag = &skb_shinfo(skb)->frags[i];
1409 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1410 					       eth->soc->txrx.dma_max_len);
1411 		}
1412 	} else {
1413 		nfrags += skb_shinfo(skb)->nr_frags;
1414 	}
1415 
1416 	return nfrags;
1417 }
1418 
1419 static int mtk_queue_stopped(struct mtk_eth *eth)
1420 {
1421 	int i;
1422 
1423 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1424 		if (!eth->netdev[i])
1425 			continue;
1426 		if (netif_queue_stopped(eth->netdev[i]))
1427 			return 1;
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 static void mtk_wake_queue(struct mtk_eth *eth)
1434 {
1435 	int i;
1436 
1437 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1438 		if (!eth->netdev[i])
1439 			continue;
1440 		netif_tx_wake_all_queues(eth->netdev[i]);
1441 	}
1442 }
1443 
1444 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1445 {
1446 	struct mtk_mac *mac = netdev_priv(dev);
1447 	struct mtk_eth *eth = mac->hw;
1448 	struct mtk_tx_ring *ring = &eth->tx_ring;
1449 	struct net_device_stats *stats = &dev->stats;
1450 	bool gso = false;
1451 	int tx_num;
1452 
1453 	/* normally we can rely on the stack not calling this more than once,
1454 	 * however we have 2 queues running on the same ring so we need to lock
1455 	 * the ring access
1456 	 */
1457 	spin_lock(&eth->page_lock);
1458 
1459 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1460 		goto drop;
1461 
1462 	tx_num = mtk_cal_txd_req(eth, skb);
1463 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1464 		netif_tx_stop_all_queues(dev);
1465 		netif_err(eth, tx_queued, dev,
1466 			  "Tx Ring full when queue awake!\n");
1467 		spin_unlock(&eth->page_lock);
1468 		return NETDEV_TX_BUSY;
1469 	}
1470 
1471 	/* TSO: fill MSS info in tcp checksum field */
1472 	if (skb_is_gso(skb)) {
1473 		if (skb_cow_head(skb, 0)) {
1474 			netif_warn(eth, tx_err, dev,
1475 				   "GSO expand head fail.\n");
1476 			goto drop;
1477 		}
1478 
1479 		if (skb_shinfo(skb)->gso_type &
1480 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1481 			gso = true;
1482 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1483 		}
1484 	}
1485 
1486 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1487 		goto drop;
1488 
1489 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1490 		netif_tx_stop_all_queues(dev);
1491 
1492 	spin_unlock(&eth->page_lock);
1493 
1494 	return NETDEV_TX_OK;
1495 
1496 drop:
1497 	spin_unlock(&eth->page_lock);
1498 	stats->tx_dropped++;
1499 	dev_kfree_skb_any(skb);
1500 	return NETDEV_TX_OK;
1501 }
1502 
1503 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1504 {
1505 	int i;
1506 	struct mtk_rx_ring *ring;
1507 	int idx;
1508 
1509 	if (!eth->hwlro)
1510 		return &eth->rx_ring[0];
1511 
1512 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1513 		struct mtk_rx_dma *rxd;
1514 
1515 		ring = &eth->rx_ring[i];
1516 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1517 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1518 		if (rxd->rxd2 & RX_DMA_DONE) {
1519 			ring->calc_idx_update = true;
1520 			return ring;
1521 		}
1522 	}
1523 
1524 	return NULL;
1525 }
1526 
1527 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1528 {
1529 	struct mtk_rx_ring *ring;
1530 	int i;
1531 
1532 	if (!eth->hwlro) {
1533 		ring = &eth->rx_ring[0];
1534 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1535 	} else {
1536 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1537 			ring = &eth->rx_ring[i];
1538 			if (ring->calc_idx_update) {
1539 				ring->calc_idx_update = false;
1540 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1541 			}
1542 		}
1543 	}
1544 }
1545 
1546 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1547 {
1548 	return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1549 }
1550 
1551 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1552 					      struct xdp_rxq_info *xdp_q,
1553 					      int id, int size)
1554 {
1555 	struct page_pool_params pp_params = {
1556 		.order = 0,
1557 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1558 		.pool_size = size,
1559 		.nid = NUMA_NO_NODE,
1560 		.dev = eth->dma_dev,
1561 		.offset = MTK_PP_HEADROOM,
1562 		.max_len = MTK_PP_MAX_BUF_SIZE,
1563 	};
1564 	struct page_pool *pp;
1565 	int err;
1566 
1567 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1568 							  : DMA_FROM_DEVICE;
1569 	pp = page_pool_create(&pp_params);
1570 	if (IS_ERR(pp))
1571 		return pp;
1572 
1573 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
1574 				 eth->rx_napi.napi_id, PAGE_SIZE);
1575 	if (err < 0)
1576 		goto err_free_pp;
1577 
1578 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1579 	if (err)
1580 		goto err_unregister_rxq;
1581 
1582 	return pp;
1583 
1584 err_unregister_rxq:
1585 	xdp_rxq_info_unreg(xdp_q);
1586 err_free_pp:
1587 	page_pool_destroy(pp);
1588 
1589 	return ERR_PTR(err);
1590 }
1591 
1592 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1593 				    gfp_t gfp_mask)
1594 {
1595 	struct page *page;
1596 
1597 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1598 	if (!page)
1599 		return NULL;
1600 
1601 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1602 	return page_address(page);
1603 }
1604 
1605 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1606 {
1607 	if (ring->page_pool)
1608 		page_pool_put_full_page(ring->page_pool,
1609 					virt_to_head_page(data), napi);
1610 	else
1611 		skb_free_frag(data);
1612 }
1613 
1614 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1615 			     struct mtk_tx_dma_desc_info *txd_info,
1616 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1617 			     void *data, u16 headroom, int index, bool dma_map)
1618 {
1619 	struct mtk_tx_ring *ring = &eth->tx_ring;
1620 	struct mtk_mac *mac = netdev_priv(dev);
1621 	struct mtk_tx_dma *txd_pdma;
1622 
1623 	if (dma_map) {  /* ndo_xdp_xmit */
1624 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1625 						txd_info->size, DMA_TO_DEVICE);
1626 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1627 			return -ENOMEM;
1628 
1629 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1630 	} else {
1631 		struct page *page = virt_to_head_page(data);
1632 
1633 		txd_info->addr = page_pool_get_dma_addr(page) +
1634 				 sizeof(struct xdp_frame) + headroom;
1635 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1636 					   txd_info->size, DMA_BIDIRECTIONAL);
1637 	}
1638 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1639 
1640 	tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1641 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1642 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1643 
1644 	txd_pdma = qdma_to_pdma(ring, txd);
1645 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1646 		     index);
1647 
1648 	return 0;
1649 }
1650 
1651 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1652 				struct net_device *dev, bool dma_map)
1653 {
1654 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1655 	const struct mtk_soc_data *soc = eth->soc;
1656 	struct mtk_tx_ring *ring = &eth->tx_ring;
1657 	struct mtk_mac *mac = netdev_priv(dev);
1658 	struct mtk_tx_dma_desc_info txd_info = {
1659 		.size	= xdpf->len,
1660 		.first	= true,
1661 		.last	= !xdp_frame_has_frags(xdpf),
1662 		.qid	= mac->id,
1663 	};
1664 	int err, index = 0, n_desc = 1, nr_frags;
1665 	struct mtk_tx_buf *htx_buf, *tx_buf;
1666 	struct mtk_tx_dma *htxd, *txd;
1667 	void *data = xdpf->data;
1668 
1669 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1670 		return -EBUSY;
1671 
1672 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1673 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1674 		return -EBUSY;
1675 
1676 	spin_lock(&eth->page_lock);
1677 
1678 	txd = ring->next_free;
1679 	if (txd == ring->last_free) {
1680 		spin_unlock(&eth->page_lock);
1681 		return -ENOMEM;
1682 	}
1683 	htxd = txd;
1684 
1685 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1686 	memset(tx_buf, 0, sizeof(*tx_buf));
1687 	htx_buf = tx_buf;
1688 
1689 	for (;;) {
1690 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1691 					data, xdpf->headroom, index, dma_map);
1692 		if (err < 0)
1693 			goto unmap;
1694 
1695 		if (txd_info.last)
1696 			break;
1697 
1698 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1699 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1700 			if (txd == ring->last_free)
1701 				goto unmap;
1702 
1703 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1704 						    soc->txrx.txd_size);
1705 			memset(tx_buf, 0, sizeof(*tx_buf));
1706 			n_desc++;
1707 		}
1708 
1709 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1710 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1711 		txd_info.last = index + 1 == nr_frags;
1712 		txd_info.qid = mac->id;
1713 		data = skb_frag_address(&sinfo->frags[index]);
1714 
1715 		index++;
1716 	}
1717 	/* store xdpf for cleanup */
1718 	htx_buf->data = xdpf;
1719 
1720 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1721 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1722 
1723 		if (index & 1)
1724 			txd_pdma->txd2 |= TX_DMA_LS0;
1725 		else
1726 			txd_pdma->txd2 |= TX_DMA_LS1;
1727 	}
1728 
1729 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1730 	atomic_sub(n_desc, &ring->free_count);
1731 
1732 	/* make sure that all changes to the dma ring are flushed before we
1733 	 * continue
1734 	 */
1735 	wmb();
1736 
1737 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1738 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1739 	} else {
1740 		int idx;
1741 
1742 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1743 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1744 			MT7628_TX_CTX_IDX0);
1745 	}
1746 
1747 	spin_unlock(&eth->page_lock);
1748 
1749 	return 0;
1750 
1751 unmap:
1752 	while (htxd != txd) {
1753 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1754 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1755 
1756 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1757 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1758 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1759 
1760 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1761 		}
1762 
1763 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1764 	}
1765 
1766 	spin_unlock(&eth->page_lock);
1767 
1768 	return err;
1769 }
1770 
1771 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1772 			struct xdp_frame **frames, u32 flags)
1773 {
1774 	struct mtk_mac *mac = netdev_priv(dev);
1775 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1776 	struct mtk_eth *eth = mac->hw;
1777 	int i, nxmit = 0;
1778 
1779 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1780 		return -EINVAL;
1781 
1782 	for (i = 0; i < num_frame; i++) {
1783 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1784 			break;
1785 		nxmit++;
1786 	}
1787 
1788 	u64_stats_update_begin(&hw_stats->syncp);
1789 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1790 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1791 	u64_stats_update_end(&hw_stats->syncp);
1792 
1793 	return nxmit;
1794 }
1795 
1796 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1797 		       struct xdp_buff *xdp, struct net_device *dev)
1798 {
1799 	struct mtk_mac *mac = netdev_priv(dev);
1800 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1801 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1802 	struct bpf_prog *prog;
1803 	u32 act = XDP_PASS;
1804 
1805 	rcu_read_lock();
1806 
1807 	prog = rcu_dereference(eth->prog);
1808 	if (!prog)
1809 		goto out;
1810 
1811 	act = bpf_prog_run_xdp(prog, xdp);
1812 	switch (act) {
1813 	case XDP_PASS:
1814 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1815 		goto update_stats;
1816 	case XDP_REDIRECT:
1817 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1818 			act = XDP_DROP;
1819 			break;
1820 		}
1821 
1822 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1823 		goto update_stats;
1824 	case XDP_TX: {
1825 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1826 
1827 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1828 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1829 			act = XDP_DROP;
1830 			break;
1831 		}
1832 
1833 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1834 		goto update_stats;
1835 	}
1836 	default:
1837 		bpf_warn_invalid_xdp_action(dev, prog, act);
1838 		fallthrough;
1839 	case XDP_ABORTED:
1840 		trace_xdp_exception(dev, prog, act);
1841 		fallthrough;
1842 	case XDP_DROP:
1843 		break;
1844 	}
1845 
1846 	page_pool_put_full_page(ring->page_pool,
1847 				virt_to_head_page(xdp->data), true);
1848 
1849 update_stats:
1850 	u64_stats_update_begin(&hw_stats->syncp);
1851 	*count = *count + 1;
1852 	u64_stats_update_end(&hw_stats->syncp);
1853 out:
1854 	rcu_read_unlock();
1855 
1856 	return act;
1857 }
1858 
1859 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1860 		       struct mtk_eth *eth)
1861 {
1862 	struct dim_sample dim_sample = {};
1863 	struct mtk_rx_ring *ring;
1864 	bool xdp_flush = false;
1865 	int idx;
1866 	struct sk_buff *skb;
1867 	u8 *data, *new_data;
1868 	struct mtk_rx_dma_v2 *rxd, trxd;
1869 	int done = 0, bytes = 0;
1870 
1871 	while (done < budget) {
1872 		unsigned int pktlen, *rxdcsum;
1873 		bool has_hwaccel_tag = false;
1874 		struct net_device *netdev;
1875 		u16 vlan_proto, vlan_tci;
1876 		dma_addr_t dma_addr;
1877 		u32 hash, reason;
1878 		int mac = 0;
1879 
1880 		ring = mtk_get_rx_ring(eth);
1881 		if (unlikely(!ring))
1882 			goto rx_done;
1883 
1884 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1885 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1886 		data = ring->data[idx];
1887 
1888 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
1889 			break;
1890 
1891 		/* find out which mac the packet come from. values start at 1 */
1892 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1893 			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1894 		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1895 			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1896 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1897 
1898 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1899 			     !eth->netdev[mac]))
1900 			goto release_desc;
1901 
1902 		netdev = eth->netdev[mac];
1903 
1904 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1905 			goto release_desc;
1906 
1907 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1908 
1909 		/* alloc new buffer */
1910 		if (ring->page_pool) {
1911 			struct page *page = virt_to_head_page(data);
1912 			struct xdp_buff xdp;
1913 			u32 ret;
1914 
1915 			new_data = mtk_page_pool_get_buff(ring->page_pool,
1916 							  &dma_addr,
1917 							  GFP_ATOMIC);
1918 			if (unlikely(!new_data)) {
1919 				netdev->stats.rx_dropped++;
1920 				goto release_desc;
1921 			}
1922 
1923 			dma_sync_single_for_cpu(eth->dma_dev,
1924 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1925 				pktlen, page_pool_get_dma_dir(ring->page_pool));
1926 
1927 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1928 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1929 					 false);
1930 			xdp_buff_clear_frags_flag(&xdp);
1931 
1932 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1933 			if (ret == XDP_REDIRECT)
1934 				xdp_flush = true;
1935 
1936 			if (ret != XDP_PASS)
1937 				goto skip_rx;
1938 
1939 			skb = build_skb(data, PAGE_SIZE);
1940 			if (unlikely(!skb)) {
1941 				page_pool_put_full_page(ring->page_pool,
1942 							page, true);
1943 				netdev->stats.rx_dropped++;
1944 				goto skip_rx;
1945 			}
1946 
1947 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
1948 			skb_put(skb, xdp.data_end - xdp.data);
1949 			skb_mark_for_recycle(skb);
1950 		} else {
1951 			if (ring->frag_size <= PAGE_SIZE)
1952 				new_data = napi_alloc_frag(ring->frag_size);
1953 			else
1954 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1955 
1956 			if (unlikely(!new_data)) {
1957 				netdev->stats.rx_dropped++;
1958 				goto release_desc;
1959 			}
1960 
1961 			dma_addr = dma_map_single(eth->dma_dev,
1962 				new_data + NET_SKB_PAD + eth->ip_align,
1963 				ring->buf_size, DMA_FROM_DEVICE);
1964 			if (unlikely(dma_mapping_error(eth->dma_dev,
1965 						       dma_addr))) {
1966 				skb_free_frag(new_data);
1967 				netdev->stats.rx_dropped++;
1968 				goto release_desc;
1969 			}
1970 
1971 			dma_unmap_single(eth->dma_dev, trxd.rxd1,
1972 					 ring->buf_size, DMA_FROM_DEVICE);
1973 
1974 			skb = build_skb(data, ring->frag_size);
1975 			if (unlikely(!skb)) {
1976 				netdev->stats.rx_dropped++;
1977 				skb_free_frag(data);
1978 				goto skip_rx;
1979 			}
1980 
1981 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1982 			skb_put(skb, pktlen);
1983 		}
1984 
1985 		skb->dev = netdev;
1986 		bytes += skb->len;
1987 
1988 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1989 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
1990 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
1991 			if (hash != MTK_RXD5_FOE_ENTRY)
1992 				skb_set_hash(skb, jhash_1word(hash, 0),
1993 					     PKT_HASH_TYPE_L4);
1994 			rxdcsum = &trxd.rxd3;
1995 		} else {
1996 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
1997 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
1998 			if (hash != MTK_RXD4_FOE_ENTRY)
1999 				skb_set_hash(skb, jhash_1word(hash, 0),
2000 					     PKT_HASH_TYPE_L4);
2001 			rxdcsum = &trxd.rxd4;
2002 		}
2003 
2004 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2005 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2006 		else
2007 			skb_checksum_none_assert(skb);
2008 		skb->protocol = eth_type_trans(skb, netdev);
2009 
2010 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2011 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2012 
2013 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2014 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2015 				if (trxd.rxd3 & RX_DMA_VTAG_V2) {
2016 					vlan_proto = RX_DMA_VPID(trxd.rxd4);
2017 					vlan_tci = RX_DMA_VID(trxd.rxd4);
2018 					has_hwaccel_tag = true;
2019 				}
2020 			} else if (trxd.rxd2 & RX_DMA_VTAG) {
2021 				vlan_proto = RX_DMA_VPID(trxd.rxd3);
2022 				vlan_tci = RX_DMA_VID(trxd.rxd3);
2023 				has_hwaccel_tag = true;
2024 			}
2025 		}
2026 
2027 		/* When using VLAN untagging in combination with DSA, the
2028 		 * hardware treats the MTK special tag as a VLAN and untags it.
2029 		 */
2030 		if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
2031 			unsigned int port = vlan_proto & GENMASK(2, 0);
2032 
2033 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2034 			    eth->dsa_meta[port])
2035 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2036 		} else if (has_hwaccel_tag) {
2037 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
2038 		}
2039 
2040 		skb_record_rx_queue(skb, 0);
2041 		napi_gro_receive(napi, skb);
2042 
2043 skip_rx:
2044 		ring->data[idx] = new_data;
2045 		rxd->rxd1 = (unsigned int)dma_addr;
2046 release_desc:
2047 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2048 			rxd->rxd2 = RX_DMA_LSO;
2049 		else
2050 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2051 
2052 		ring->calc_idx = idx;
2053 		done++;
2054 	}
2055 
2056 rx_done:
2057 	if (done) {
2058 		/* make sure that all changes to the dma ring are flushed before
2059 		 * we continue
2060 		 */
2061 		wmb();
2062 		mtk_update_rx_cpu_idx(eth);
2063 	}
2064 
2065 	eth->rx_packets += done;
2066 	eth->rx_bytes += bytes;
2067 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2068 			  &dim_sample);
2069 	net_dim(&eth->rx_dim, dim_sample);
2070 
2071 	if (xdp_flush)
2072 		xdp_do_flush_map();
2073 
2074 	return done;
2075 }
2076 
2077 struct mtk_poll_state {
2078     struct netdev_queue *txq;
2079     unsigned int total;
2080     unsigned int done;
2081     unsigned int bytes;
2082 };
2083 
2084 static void
2085 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2086 		 struct sk_buff *skb)
2087 {
2088 	struct netdev_queue *txq;
2089 	struct net_device *dev;
2090 	unsigned int bytes = skb->len;
2091 
2092 	state->total++;
2093 	eth->tx_packets++;
2094 	eth->tx_bytes += bytes;
2095 
2096 	dev = eth->netdev[mac];
2097 	if (!dev)
2098 		return;
2099 
2100 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2101 	if (state->txq == txq) {
2102 		state->done++;
2103 		state->bytes += bytes;
2104 		return;
2105 	}
2106 
2107 	if (state->txq)
2108 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2109 
2110 	state->txq = txq;
2111 	state->done = 1;
2112 	state->bytes = bytes;
2113 }
2114 
2115 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2116 			    struct mtk_poll_state *state)
2117 {
2118 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2119 	struct mtk_tx_ring *ring = &eth->tx_ring;
2120 	struct mtk_tx_buf *tx_buf;
2121 	struct xdp_frame_bulk bq;
2122 	struct mtk_tx_dma *desc;
2123 	u32 cpu, dma;
2124 
2125 	cpu = ring->last_free_ptr;
2126 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2127 
2128 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2129 	xdp_frame_bulk_init(&bq);
2130 
2131 	while ((cpu != dma) && budget) {
2132 		u32 next_cpu = desc->txd2;
2133 		int mac = 0;
2134 
2135 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2136 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2137 			break;
2138 
2139 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2140 					    eth->soc->txrx.txd_size);
2141 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2142 			mac = 1;
2143 
2144 		if (!tx_buf->data)
2145 			break;
2146 
2147 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2148 			if (tx_buf->type == MTK_TYPE_SKB)
2149 				mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2150 
2151 			budget--;
2152 		}
2153 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2154 
2155 		ring->last_free = desc;
2156 		atomic_inc(&ring->free_count);
2157 
2158 		cpu = next_cpu;
2159 	}
2160 	xdp_flush_frame_bulk(&bq);
2161 
2162 	ring->last_free_ptr = cpu;
2163 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2164 
2165 	return budget;
2166 }
2167 
2168 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2169 			    struct mtk_poll_state *state)
2170 {
2171 	struct mtk_tx_ring *ring = &eth->tx_ring;
2172 	struct mtk_tx_buf *tx_buf;
2173 	struct xdp_frame_bulk bq;
2174 	struct mtk_tx_dma *desc;
2175 	u32 cpu, dma;
2176 
2177 	cpu = ring->cpu_idx;
2178 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2179 	xdp_frame_bulk_init(&bq);
2180 
2181 	while ((cpu != dma) && budget) {
2182 		tx_buf = &ring->buf[cpu];
2183 		if (!tx_buf->data)
2184 			break;
2185 
2186 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2187 			if (tx_buf->type == MTK_TYPE_SKB)
2188 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2189 			budget--;
2190 		}
2191 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2192 
2193 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2194 		ring->last_free = desc;
2195 		atomic_inc(&ring->free_count);
2196 
2197 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2198 	}
2199 	xdp_flush_frame_bulk(&bq);
2200 
2201 	ring->cpu_idx = cpu;
2202 
2203 	return budget;
2204 }
2205 
2206 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2207 {
2208 	struct mtk_tx_ring *ring = &eth->tx_ring;
2209 	struct dim_sample dim_sample = {};
2210 	struct mtk_poll_state state = {};
2211 
2212 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2213 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2214 	else
2215 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2216 
2217 	if (state.txq)
2218 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2219 
2220 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2221 			  &dim_sample);
2222 	net_dim(&eth->tx_dim, dim_sample);
2223 
2224 	if (mtk_queue_stopped(eth) &&
2225 	    (atomic_read(&ring->free_count) > ring->thresh))
2226 		mtk_wake_queue(eth);
2227 
2228 	return state.total;
2229 }
2230 
2231 static void mtk_handle_status_irq(struct mtk_eth *eth)
2232 {
2233 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2234 
2235 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2236 		mtk_stats_update(eth);
2237 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2238 			MTK_INT_STATUS2);
2239 	}
2240 }
2241 
2242 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2243 {
2244 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2245 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2246 	int tx_done = 0;
2247 
2248 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2249 		mtk_handle_status_irq(eth);
2250 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2251 	tx_done = mtk_poll_tx(eth, budget);
2252 
2253 	if (unlikely(netif_msg_intr(eth))) {
2254 		dev_info(eth->dev,
2255 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2256 			 mtk_r32(eth, reg_map->tx_irq_status),
2257 			 mtk_r32(eth, reg_map->tx_irq_mask));
2258 	}
2259 
2260 	if (tx_done == budget)
2261 		return budget;
2262 
2263 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2264 		return budget;
2265 
2266 	if (napi_complete_done(napi, tx_done))
2267 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2268 
2269 	return tx_done;
2270 }
2271 
2272 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2273 {
2274 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2275 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2276 	int rx_done_total = 0;
2277 
2278 	mtk_handle_status_irq(eth);
2279 
2280 	do {
2281 		int rx_done;
2282 
2283 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2284 			reg_map->pdma.irq_status);
2285 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2286 		rx_done_total += rx_done;
2287 
2288 		if (unlikely(netif_msg_intr(eth))) {
2289 			dev_info(eth->dev,
2290 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2291 				 mtk_r32(eth, reg_map->pdma.irq_status),
2292 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2293 		}
2294 
2295 		if (rx_done_total == budget)
2296 			return budget;
2297 
2298 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2299 		 eth->soc->txrx.rx_irq_done_mask);
2300 
2301 	if (napi_complete_done(napi, rx_done_total))
2302 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2303 
2304 	return rx_done_total;
2305 }
2306 
2307 static int mtk_tx_alloc(struct mtk_eth *eth)
2308 {
2309 	const struct mtk_soc_data *soc = eth->soc;
2310 	struct mtk_tx_ring *ring = &eth->tx_ring;
2311 	int i, sz = soc->txrx.txd_size;
2312 	struct mtk_tx_dma_v2 *txd;
2313 	int ring_size;
2314 	u32 ofs, val;
2315 
2316 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2317 		ring_size = MTK_QDMA_RING_SIZE;
2318 	else
2319 		ring_size = MTK_DMA_SIZE;
2320 
2321 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2322 			       GFP_KERNEL);
2323 	if (!ring->buf)
2324 		goto no_tx_mem;
2325 
2326 	ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2327 				       &ring->phys, GFP_KERNEL);
2328 	if (!ring->dma)
2329 		goto no_tx_mem;
2330 
2331 	for (i = 0; i < ring_size; i++) {
2332 		int next = (i + 1) % ring_size;
2333 		u32 next_ptr = ring->phys + next * sz;
2334 
2335 		txd = ring->dma + i * sz;
2336 		txd->txd2 = next_ptr;
2337 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2338 		txd->txd4 = 0;
2339 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2340 			txd->txd5 = 0;
2341 			txd->txd6 = 0;
2342 			txd->txd7 = 0;
2343 			txd->txd8 = 0;
2344 		}
2345 	}
2346 
2347 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2348 	 * only as the framework. The real HW descriptors are the PDMA
2349 	 * descriptors in ring->dma_pdma.
2350 	 */
2351 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2352 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2353 						    &ring->phys_pdma, GFP_KERNEL);
2354 		if (!ring->dma_pdma)
2355 			goto no_tx_mem;
2356 
2357 		for (i = 0; i < ring_size; i++) {
2358 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2359 			ring->dma_pdma[i].txd4 = 0;
2360 		}
2361 	}
2362 
2363 	ring->dma_size = ring_size;
2364 	atomic_set(&ring->free_count, ring_size - 2);
2365 	ring->next_free = ring->dma;
2366 	ring->last_free = (void *)txd;
2367 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2368 	ring->thresh = MAX_SKB_FRAGS;
2369 
2370 	/* make sure that all changes to the dma ring are flushed before we
2371 	 * continue
2372 	 */
2373 	wmb();
2374 
2375 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2376 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2377 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2378 		mtk_w32(eth,
2379 			ring->phys + ((ring_size - 1) * sz),
2380 			soc->reg_map->qdma.crx_ptr);
2381 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2382 
2383 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2384 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2385 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2386 
2387 			val = MTK_QTX_SCH_MIN_RATE_EN |
2388 			      /* minimum: 10 Mbps */
2389 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2390 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2391 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2392 			if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2393 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2394 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2395 			ofs += MTK_QTX_OFFSET;
2396 		}
2397 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2398 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2399 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2400 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2401 	} else {
2402 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2403 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2404 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2405 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2406 	}
2407 
2408 	return 0;
2409 
2410 no_tx_mem:
2411 	return -ENOMEM;
2412 }
2413 
2414 static void mtk_tx_clean(struct mtk_eth *eth)
2415 {
2416 	const struct mtk_soc_data *soc = eth->soc;
2417 	struct mtk_tx_ring *ring = &eth->tx_ring;
2418 	int i;
2419 
2420 	if (ring->buf) {
2421 		for (i = 0; i < ring->dma_size; i++)
2422 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2423 		kfree(ring->buf);
2424 		ring->buf = NULL;
2425 	}
2426 
2427 	if (ring->dma) {
2428 		dma_free_coherent(eth->dma_dev,
2429 				  ring->dma_size * soc->txrx.txd_size,
2430 				  ring->dma, ring->phys);
2431 		ring->dma = NULL;
2432 	}
2433 
2434 	if (ring->dma_pdma) {
2435 		dma_free_coherent(eth->dma_dev,
2436 				  ring->dma_size * soc->txrx.txd_size,
2437 				  ring->dma_pdma, ring->phys_pdma);
2438 		ring->dma_pdma = NULL;
2439 	}
2440 }
2441 
2442 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2443 {
2444 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2445 	struct mtk_rx_ring *ring;
2446 	int rx_data_len, rx_dma_size;
2447 	int i;
2448 
2449 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2450 		if (ring_no)
2451 			return -EINVAL;
2452 		ring = &eth->rx_ring_qdma;
2453 	} else {
2454 		ring = &eth->rx_ring[ring_no];
2455 	}
2456 
2457 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2458 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2459 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2460 	} else {
2461 		rx_data_len = ETH_DATA_LEN;
2462 		rx_dma_size = MTK_DMA_SIZE;
2463 	}
2464 
2465 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2466 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2467 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2468 			     GFP_KERNEL);
2469 	if (!ring->data)
2470 		return -ENOMEM;
2471 
2472 	if (mtk_page_pool_enabled(eth)) {
2473 		struct page_pool *pp;
2474 
2475 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2476 					  rx_dma_size);
2477 		if (IS_ERR(pp))
2478 			return PTR_ERR(pp);
2479 
2480 		ring->page_pool = pp;
2481 	}
2482 
2483 	ring->dma = dma_alloc_coherent(eth->dma_dev,
2484 				       rx_dma_size * eth->soc->txrx.rxd_size,
2485 				       &ring->phys, GFP_KERNEL);
2486 	if (!ring->dma)
2487 		return -ENOMEM;
2488 
2489 	for (i = 0; i < rx_dma_size; i++) {
2490 		struct mtk_rx_dma_v2 *rxd;
2491 		dma_addr_t dma_addr;
2492 		void *data;
2493 
2494 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2495 		if (ring->page_pool) {
2496 			data = mtk_page_pool_get_buff(ring->page_pool,
2497 						      &dma_addr, GFP_KERNEL);
2498 			if (!data)
2499 				return -ENOMEM;
2500 		} else {
2501 			if (ring->frag_size <= PAGE_SIZE)
2502 				data = netdev_alloc_frag(ring->frag_size);
2503 			else
2504 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2505 
2506 			if (!data)
2507 				return -ENOMEM;
2508 
2509 			dma_addr = dma_map_single(eth->dma_dev,
2510 				data + NET_SKB_PAD + eth->ip_align,
2511 				ring->buf_size, DMA_FROM_DEVICE);
2512 			if (unlikely(dma_mapping_error(eth->dma_dev,
2513 						       dma_addr))) {
2514 				skb_free_frag(data);
2515 				return -ENOMEM;
2516 			}
2517 		}
2518 		rxd->rxd1 = (unsigned int)dma_addr;
2519 		ring->data[i] = data;
2520 
2521 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2522 			rxd->rxd2 = RX_DMA_LSO;
2523 		else
2524 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2525 
2526 		rxd->rxd3 = 0;
2527 		rxd->rxd4 = 0;
2528 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2529 			rxd->rxd5 = 0;
2530 			rxd->rxd6 = 0;
2531 			rxd->rxd7 = 0;
2532 			rxd->rxd8 = 0;
2533 		}
2534 	}
2535 
2536 	ring->dma_size = rx_dma_size;
2537 	ring->calc_idx_update = false;
2538 	ring->calc_idx = rx_dma_size - 1;
2539 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2540 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2541 				    ring_no * MTK_QRX_OFFSET;
2542 	else
2543 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2544 				    ring_no * MTK_QRX_OFFSET;
2545 	/* make sure that all changes to the dma ring are flushed before we
2546 	 * continue
2547 	 */
2548 	wmb();
2549 
2550 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2551 		mtk_w32(eth, ring->phys,
2552 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2553 		mtk_w32(eth, rx_dma_size,
2554 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2555 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2556 			reg_map->qdma.rst_idx);
2557 	} else {
2558 		mtk_w32(eth, ring->phys,
2559 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2560 		mtk_w32(eth, rx_dma_size,
2561 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2562 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2563 			reg_map->pdma.rst_idx);
2564 	}
2565 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2566 
2567 	return 0;
2568 }
2569 
2570 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2571 {
2572 	int i;
2573 
2574 	if (ring->data && ring->dma) {
2575 		for (i = 0; i < ring->dma_size; i++) {
2576 			struct mtk_rx_dma *rxd;
2577 
2578 			if (!ring->data[i])
2579 				continue;
2580 
2581 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2582 			if (!rxd->rxd1)
2583 				continue;
2584 
2585 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2586 					 ring->buf_size, DMA_FROM_DEVICE);
2587 			mtk_rx_put_buff(ring, ring->data[i], false);
2588 		}
2589 		kfree(ring->data);
2590 		ring->data = NULL;
2591 	}
2592 
2593 	if (ring->dma) {
2594 		dma_free_coherent(eth->dma_dev,
2595 				  ring->dma_size * eth->soc->txrx.rxd_size,
2596 				  ring->dma, ring->phys);
2597 		ring->dma = NULL;
2598 	}
2599 
2600 	if (ring->page_pool) {
2601 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2602 			xdp_rxq_info_unreg(&ring->xdp_q);
2603 		page_pool_destroy(ring->page_pool);
2604 		ring->page_pool = NULL;
2605 	}
2606 }
2607 
2608 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2609 {
2610 	int i;
2611 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2612 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2613 
2614 	/* set LRO rings to auto-learn modes */
2615 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2616 
2617 	/* validate LRO ring */
2618 	ring_ctrl_dw2 |= MTK_RING_VLD;
2619 
2620 	/* set AGE timer (unit: 20us) */
2621 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2622 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2623 
2624 	/* set max AGG timer (unit: 20us) */
2625 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2626 
2627 	/* set max LRO AGG count */
2628 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2629 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2630 
2631 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2632 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2633 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2634 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2635 	}
2636 
2637 	/* IPv4 checksum update enable */
2638 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2639 
2640 	/* switch priority comparison to packet count mode */
2641 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2642 
2643 	/* bandwidth threshold setting */
2644 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2645 
2646 	/* auto-learn score delta setting */
2647 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2648 
2649 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2650 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2651 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2652 
2653 	/* set HW LRO mode & the max aggregation count for rx packets */
2654 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2655 
2656 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2657 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2658 
2659 	/* enable HW LRO */
2660 	lro_ctrl_dw0 |= MTK_LRO_EN;
2661 
2662 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2663 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2664 
2665 	return 0;
2666 }
2667 
2668 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2669 {
2670 	int i;
2671 	u32 val;
2672 
2673 	/* relinquish lro rings, flush aggregated packets */
2674 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2675 
2676 	/* wait for relinquishments done */
2677 	for (i = 0; i < 10; i++) {
2678 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2679 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2680 			msleep(20);
2681 			continue;
2682 		}
2683 		break;
2684 	}
2685 
2686 	/* invalidate lro rings */
2687 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2688 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2689 
2690 	/* disable HW LRO */
2691 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2692 }
2693 
2694 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2695 {
2696 	u32 reg_val;
2697 
2698 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2699 
2700 	/* invalidate the IP setting */
2701 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2702 
2703 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2704 
2705 	/* validate the IP setting */
2706 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2707 }
2708 
2709 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2710 {
2711 	u32 reg_val;
2712 
2713 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2714 
2715 	/* invalidate the IP setting */
2716 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2717 
2718 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2719 }
2720 
2721 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2722 {
2723 	int cnt = 0;
2724 	int i;
2725 
2726 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2727 		if (mac->hwlro_ip[i])
2728 			cnt++;
2729 	}
2730 
2731 	return cnt;
2732 }
2733 
2734 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2735 				struct ethtool_rxnfc *cmd)
2736 {
2737 	struct ethtool_rx_flow_spec *fsp =
2738 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2739 	struct mtk_mac *mac = netdev_priv(dev);
2740 	struct mtk_eth *eth = mac->hw;
2741 	int hwlro_idx;
2742 
2743 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2744 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2745 	    (fsp->location > 1))
2746 		return -EINVAL;
2747 
2748 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2749 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2750 
2751 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2752 
2753 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2754 
2755 	return 0;
2756 }
2757 
2758 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2759 				struct ethtool_rxnfc *cmd)
2760 {
2761 	struct ethtool_rx_flow_spec *fsp =
2762 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2763 	struct mtk_mac *mac = netdev_priv(dev);
2764 	struct mtk_eth *eth = mac->hw;
2765 	int hwlro_idx;
2766 
2767 	if (fsp->location > 1)
2768 		return -EINVAL;
2769 
2770 	mac->hwlro_ip[fsp->location] = 0;
2771 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2772 
2773 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2774 
2775 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2776 
2777 	return 0;
2778 }
2779 
2780 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2781 {
2782 	struct mtk_mac *mac = netdev_priv(dev);
2783 	struct mtk_eth *eth = mac->hw;
2784 	int i, hwlro_idx;
2785 
2786 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2787 		mac->hwlro_ip[i] = 0;
2788 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2789 
2790 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2791 	}
2792 
2793 	mac->hwlro_ip_cnt = 0;
2794 }
2795 
2796 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2797 				    struct ethtool_rxnfc *cmd)
2798 {
2799 	struct mtk_mac *mac = netdev_priv(dev);
2800 	struct ethtool_rx_flow_spec *fsp =
2801 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2802 
2803 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2804 		return -EINVAL;
2805 
2806 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2807 	fsp->flow_type = TCP_V4_FLOW;
2808 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2809 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2810 
2811 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2812 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2813 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2814 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2815 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2816 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2817 	fsp->h_u.tcp_ip4_spec.tos = 0;
2818 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2819 
2820 	return 0;
2821 }
2822 
2823 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2824 				  struct ethtool_rxnfc *cmd,
2825 				  u32 *rule_locs)
2826 {
2827 	struct mtk_mac *mac = netdev_priv(dev);
2828 	int cnt = 0;
2829 	int i;
2830 
2831 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2832 		if (mac->hwlro_ip[i]) {
2833 			rule_locs[cnt] = i;
2834 			cnt++;
2835 		}
2836 	}
2837 
2838 	cmd->rule_cnt = cnt;
2839 
2840 	return 0;
2841 }
2842 
2843 static netdev_features_t mtk_fix_features(struct net_device *dev,
2844 					  netdev_features_t features)
2845 {
2846 	if (!(features & NETIF_F_LRO)) {
2847 		struct mtk_mac *mac = netdev_priv(dev);
2848 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2849 
2850 		if (ip_cnt) {
2851 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2852 
2853 			features |= NETIF_F_LRO;
2854 		}
2855 	}
2856 
2857 	return features;
2858 }
2859 
2860 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2861 {
2862 	struct mtk_mac *mac = netdev_priv(dev);
2863 	struct mtk_eth *eth = mac->hw;
2864 	netdev_features_t diff = dev->features ^ features;
2865 	int i;
2866 
2867 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2868 		mtk_hwlro_netdev_disable(dev);
2869 
2870 	/* Set RX VLAN offloading */
2871 	if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2872 		return 0;
2873 
2874 	mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2875 		MTK_CDMP_EG_CTRL);
2876 
2877 	/* sync features with other MAC */
2878 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2879 		if (!eth->netdev[i] || eth->netdev[i] == dev)
2880 			continue;
2881 		eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2882 		eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2883 	}
2884 
2885 	return 0;
2886 }
2887 
2888 /* wait for DMA to finish whatever it is doing before we start using it again */
2889 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2890 {
2891 	unsigned int reg;
2892 	int ret;
2893 	u32 val;
2894 
2895 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2896 		reg = eth->soc->reg_map->qdma.glo_cfg;
2897 	else
2898 		reg = eth->soc->reg_map->pdma.glo_cfg;
2899 
2900 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2901 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2902 					5, MTK_DMA_BUSY_TIMEOUT_US);
2903 	if (ret)
2904 		dev_err(eth->dev, "DMA init timeout\n");
2905 
2906 	return ret;
2907 }
2908 
2909 static int mtk_dma_init(struct mtk_eth *eth)
2910 {
2911 	int err;
2912 	u32 i;
2913 
2914 	if (mtk_dma_busy_wait(eth))
2915 		return -EBUSY;
2916 
2917 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2918 		/* QDMA needs scratch memory for internal reordering of the
2919 		 * descriptors
2920 		 */
2921 		err = mtk_init_fq_dma(eth);
2922 		if (err)
2923 			return err;
2924 	}
2925 
2926 	err = mtk_tx_alloc(eth);
2927 	if (err)
2928 		return err;
2929 
2930 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2931 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2932 		if (err)
2933 			return err;
2934 	}
2935 
2936 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2937 	if (err)
2938 		return err;
2939 
2940 	if (eth->hwlro) {
2941 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2942 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2943 			if (err)
2944 				return err;
2945 		}
2946 		err = mtk_hwlro_rx_init(eth);
2947 		if (err)
2948 			return err;
2949 	}
2950 
2951 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2952 		/* Enable random early drop and set drop threshold
2953 		 * automatically
2954 		 */
2955 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2956 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2957 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2958 	}
2959 
2960 	return 0;
2961 }
2962 
2963 static void mtk_dma_free(struct mtk_eth *eth)
2964 {
2965 	const struct mtk_soc_data *soc = eth->soc;
2966 	int i;
2967 
2968 	for (i = 0; i < MTK_MAC_COUNT; i++)
2969 		if (eth->netdev[i])
2970 			netdev_reset_queue(eth->netdev[i]);
2971 	if (eth->scratch_ring) {
2972 		dma_free_coherent(eth->dma_dev,
2973 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
2974 				  eth->scratch_ring, eth->phy_scratch_ring);
2975 		eth->scratch_ring = NULL;
2976 		eth->phy_scratch_ring = 0;
2977 	}
2978 	mtk_tx_clean(eth);
2979 	mtk_rx_clean(eth, &eth->rx_ring[0]);
2980 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
2981 
2982 	if (eth->hwlro) {
2983 		mtk_hwlro_rx_uninit(eth);
2984 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2985 			mtk_rx_clean(eth, &eth->rx_ring[i]);
2986 	}
2987 
2988 	kfree(eth->scratch_head);
2989 }
2990 
2991 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2992 {
2993 	struct mtk_mac *mac = netdev_priv(dev);
2994 	struct mtk_eth *eth = mac->hw;
2995 
2996 	eth->netdev[mac->id]->stats.tx_errors++;
2997 	netif_err(eth, tx_err, dev,
2998 		  "transmit timed out\n");
2999 	schedule_work(&eth->pending_work);
3000 }
3001 
3002 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3003 {
3004 	struct mtk_eth *eth = _eth;
3005 
3006 	eth->rx_events++;
3007 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3008 		__napi_schedule(&eth->rx_napi);
3009 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3010 	}
3011 
3012 	return IRQ_HANDLED;
3013 }
3014 
3015 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3016 {
3017 	struct mtk_eth *eth = _eth;
3018 
3019 	eth->tx_events++;
3020 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3021 		__napi_schedule(&eth->tx_napi);
3022 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3023 	}
3024 
3025 	return IRQ_HANDLED;
3026 }
3027 
3028 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3029 {
3030 	struct mtk_eth *eth = _eth;
3031 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3032 
3033 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3034 	    eth->soc->txrx.rx_irq_done_mask) {
3035 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3036 		    eth->soc->txrx.rx_irq_done_mask)
3037 			mtk_handle_irq_rx(irq, _eth);
3038 	}
3039 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3040 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3041 			mtk_handle_irq_tx(irq, _eth);
3042 	}
3043 
3044 	return IRQ_HANDLED;
3045 }
3046 
3047 #ifdef CONFIG_NET_POLL_CONTROLLER
3048 static void mtk_poll_controller(struct net_device *dev)
3049 {
3050 	struct mtk_mac *mac = netdev_priv(dev);
3051 	struct mtk_eth *eth = mac->hw;
3052 
3053 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3054 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3055 	mtk_handle_irq_rx(eth->irq[2], dev);
3056 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3057 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3058 }
3059 #endif
3060 
3061 static int mtk_start_dma(struct mtk_eth *eth)
3062 {
3063 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3064 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3065 	int err;
3066 
3067 	err = mtk_dma_init(eth);
3068 	if (err) {
3069 		mtk_dma_free(eth);
3070 		return err;
3071 	}
3072 
3073 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3074 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3075 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3076 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3077 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3078 
3079 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3080 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3081 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3082 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3083 		else
3084 			val |= MTK_RX_BT_32DWORDS;
3085 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3086 
3087 		mtk_w32(eth,
3088 			MTK_RX_DMA_EN | rx_2b_offset |
3089 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3090 			reg_map->pdma.glo_cfg);
3091 	} else {
3092 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3093 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3094 			reg_map->pdma.glo_cfg);
3095 	}
3096 
3097 	return 0;
3098 }
3099 
3100 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3101 {
3102 	int i;
3103 
3104 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3105 		return;
3106 
3107 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3108 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3109 
3110 		/* default setup the forward port to send frame to PDMA */
3111 		val &= ~0xffff;
3112 
3113 		/* Enable RX checksum */
3114 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3115 
3116 		val |= config;
3117 
3118 		if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3119 			val |= MTK_GDMA_SPECIAL_TAG;
3120 
3121 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3122 	}
3123 	/* Reset and enable PSE */
3124 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3125 	mtk_w32(eth, 0, MTK_RST_GL);
3126 }
3127 
3128 
3129 static bool mtk_uses_dsa(struct net_device *dev)
3130 {
3131 #if IS_ENABLED(CONFIG_NET_DSA)
3132 	return netdev_uses_dsa(dev) &&
3133 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3134 #else
3135 	return false;
3136 #endif
3137 }
3138 
3139 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3140 {
3141 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3142 	struct mtk_eth *eth = mac->hw;
3143 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3144 	struct ethtool_link_ksettings s;
3145 	struct net_device *ldev;
3146 	struct list_head *iter;
3147 	struct dsa_port *dp;
3148 
3149 	if (event != NETDEV_CHANGE)
3150 		return NOTIFY_DONE;
3151 
3152 	netdev_for_each_lower_dev(dev, ldev, iter) {
3153 		if (netdev_priv(ldev) == mac)
3154 			goto found;
3155 	}
3156 
3157 	return NOTIFY_DONE;
3158 
3159 found:
3160 	if (!dsa_slave_dev_check(dev))
3161 		return NOTIFY_DONE;
3162 
3163 	if (__ethtool_get_link_ksettings(dev, &s))
3164 		return NOTIFY_DONE;
3165 
3166 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3167 		return NOTIFY_DONE;
3168 
3169 	dp = dsa_port_from_netdev(dev);
3170 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3171 		return NOTIFY_DONE;
3172 
3173 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3174 
3175 	return NOTIFY_DONE;
3176 }
3177 
3178 static int mtk_open(struct net_device *dev)
3179 {
3180 	struct mtk_mac *mac = netdev_priv(dev);
3181 	struct mtk_eth *eth = mac->hw;
3182 	int i, err;
3183 
3184 	if (mtk_uses_dsa(dev) && !eth->prog) {
3185 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3186 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3187 
3188 			if (md_dst)
3189 				continue;
3190 
3191 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3192 						    GFP_KERNEL);
3193 			if (!md_dst)
3194 				return -ENOMEM;
3195 
3196 			md_dst->u.port_info.port_id = i;
3197 			eth->dsa_meta[i] = md_dst;
3198 		}
3199 	} else {
3200 		/* Hardware special tag parsing needs to be disabled if at least
3201 		 * one MAC does not use DSA.
3202 		 */
3203 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3204 		val &= ~MTK_CDMP_STAG_EN;
3205 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3206 	}
3207 
3208 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3209 	if (err) {
3210 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3211 			   err);
3212 		return err;
3213 	}
3214 
3215 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3216 	if (!refcount_read(&eth->dma_refcnt)) {
3217 		const struct mtk_soc_data *soc = eth->soc;
3218 		u32 gdm_config;
3219 		int i;
3220 
3221 		err = mtk_start_dma(eth);
3222 		if (err) {
3223 			phylink_disconnect_phy(mac->phylink);
3224 			return err;
3225 		}
3226 
3227 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3228 			mtk_ppe_start(eth->ppe[i]);
3229 
3230 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3231 						  : MTK_GDMA_TO_PDMA;
3232 		mtk_gdm_config(eth, gdm_config);
3233 
3234 		napi_enable(&eth->tx_napi);
3235 		napi_enable(&eth->rx_napi);
3236 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3237 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3238 		refcount_set(&eth->dma_refcnt, 1);
3239 	}
3240 	else
3241 		refcount_inc(&eth->dma_refcnt);
3242 
3243 	phylink_start(mac->phylink);
3244 	netif_tx_start_all_queues(dev);
3245 
3246 	return 0;
3247 }
3248 
3249 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3250 {
3251 	u32 val;
3252 	int i;
3253 
3254 	/* stop the dma engine */
3255 	spin_lock_bh(&eth->page_lock);
3256 	val = mtk_r32(eth, glo_cfg);
3257 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3258 		glo_cfg);
3259 	spin_unlock_bh(&eth->page_lock);
3260 
3261 	/* wait for dma stop */
3262 	for (i = 0; i < 10; i++) {
3263 		val = mtk_r32(eth, glo_cfg);
3264 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3265 			msleep(20);
3266 			continue;
3267 		}
3268 		break;
3269 	}
3270 }
3271 
3272 static int mtk_stop(struct net_device *dev)
3273 {
3274 	struct mtk_mac *mac = netdev_priv(dev);
3275 	struct mtk_eth *eth = mac->hw;
3276 	int i;
3277 
3278 	phylink_stop(mac->phylink);
3279 
3280 	netif_tx_disable(dev);
3281 
3282 	phylink_disconnect_phy(mac->phylink);
3283 
3284 	/* only shutdown DMA if this is the last user */
3285 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3286 		return 0;
3287 
3288 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3289 
3290 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3291 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3292 	napi_disable(&eth->tx_napi);
3293 	napi_disable(&eth->rx_napi);
3294 
3295 	cancel_work_sync(&eth->rx_dim.work);
3296 	cancel_work_sync(&eth->tx_dim.work);
3297 
3298 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3299 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3300 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3301 
3302 	mtk_dma_free(eth);
3303 
3304 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3305 		mtk_ppe_stop(eth->ppe[i]);
3306 
3307 	return 0;
3308 }
3309 
3310 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3311 			 struct netlink_ext_ack *extack)
3312 {
3313 	struct mtk_mac *mac = netdev_priv(dev);
3314 	struct mtk_eth *eth = mac->hw;
3315 	struct bpf_prog *old_prog;
3316 	bool need_update;
3317 
3318 	if (eth->hwlro) {
3319 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3320 		return -EOPNOTSUPP;
3321 	}
3322 
3323 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3324 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3325 		return -EOPNOTSUPP;
3326 	}
3327 
3328 	need_update = !!eth->prog != !!prog;
3329 	if (netif_running(dev) && need_update)
3330 		mtk_stop(dev);
3331 
3332 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3333 	if (old_prog)
3334 		bpf_prog_put(old_prog);
3335 
3336 	if (netif_running(dev) && need_update)
3337 		return mtk_open(dev);
3338 
3339 	return 0;
3340 }
3341 
3342 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3343 {
3344 	switch (xdp->command) {
3345 	case XDP_SETUP_PROG:
3346 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3347 	default:
3348 		return -EINVAL;
3349 	}
3350 }
3351 
3352 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3353 {
3354 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3355 			   reset_bits,
3356 			   reset_bits);
3357 
3358 	usleep_range(1000, 1100);
3359 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3360 			   reset_bits,
3361 			   ~reset_bits);
3362 	mdelay(10);
3363 }
3364 
3365 static void mtk_clk_disable(struct mtk_eth *eth)
3366 {
3367 	int clk;
3368 
3369 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3370 		clk_disable_unprepare(eth->clks[clk]);
3371 }
3372 
3373 static int mtk_clk_enable(struct mtk_eth *eth)
3374 {
3375 	int clk, ret;
3376 
3377 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3378 		ret = clk_prepare_enable(eth->clks[clk]);
3379 		if (ret)
3380 			goto err_disable_clks;
3381 	}
3382 
3383 	return 0;
3384 
3385 err_disable_clks:
3386 	while (--clk >= 0)
3387 		clk_disable_unprepare(eth->clks[clk]);
3388 
3389 	return ret;
3390 }
3391 
3392 static void mtk_dim_rx(struct work_struct *work)
3393 {
3394 	struct dim *dim = container_of(work, struct dim, work);
3395 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3396 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3397 	struct dim_cq_moder cur_profile;
3398 	u32 val, cur;
3399 
3400 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3401 						dim->profile_ix);
3402 	spin_lock_bh(&eth->dim_lock);
3403 
3404 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3405 	val &= MTK_PDMA_DELAY_TX_MASK;
3406 	val |= MTK_PDMA_DELAY_RX_EN;
3407 
3408 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3409 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3410 
3411 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3412 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3413 
3414 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3415 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3416 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3417 
3418 	spin_unlock_bh(&eth->dim_lock);
3419 
3420 	dim->state = DIM_START_MEASURE;
3421 }
3422 
3423 static void mtk_dim_tx(struct work_struct *work)
3424 {
3425 	struct dim *dim = container_of(work, struct dim, work);
3426 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3427 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3428 	struct dim_cq_moder cur_profile;
3429 	u32 val, cur;
3430 
3431 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3432 						dim->profile_ix);
3433 	spin_lock_bh(&eth->dim_lock);
3434 
3435 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3436 	val &= MTK_PDMA_DELAY_RX_MASK;
3437 	val |= MTK_PDMA_DELAY_TX_EN;
3438 
3439 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3440 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3441 
3442 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3443 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3444 
3445 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3446 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3447 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3448 
3449 	spin_unlock_bh(&eth->dim_lock);
3450 
3451 	dim->state = DIM_START_MEASURE;
3452 }
3453 
3454 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3455 {
3456 	struct mtk_eth *eth = mac->hw;
3457 	u32 mcr_cur, mcr_new;
3458 
3459 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3460 		return;
3461 
3462 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3463 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3464 
3465 	if (val <= 1518)
3466 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3467 	else if (val <= 1536)
3468 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3469 	else if (val <= 1552)
3470 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3471 	else
3472 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3473 
3474 	if (mcr_new != mcr_cur)
3475 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3476 }
3477 
3478 static int mtk_hw_init(struct mtk_eth *eth)
3479 {
3480 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3481 		       ETHSYS_DMA_AG_MAP_PPE;
3482 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3483 	int i, val, ret;
3484 
3485 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
3486 		return 0;
3487 
3488 	pm_runtime_enable(eth->dev);
3489 	pm_runtime_get_sync(eth->dev);
3490 
3491 	ret = mtk_clk_enable(eth);
3492 	if (ret)
3493 		goto err_disable_pm;
3494 
3495 	if (eth->ethsys)
3496 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3497 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3498 
3499 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3500 		ret = device_reset(eth->dev);
3501 		if (ret) {
3502 			dev_err(eth->dev, "MAC reset failed!\n");
3503 			goto err_disable_pm;
3504 		}
3505 
3506 		/* set interrupt delays based on current Net DIM sample */
3507 		mtk_dim_rx(&eth->rx_dim.work);
3508 		mtk_dim_tx(&eth->tx_dim.work);
3509 
3510 		/* disable delay and normal interrupt */
3511 		mtk_tx_irq_disable(eth, ~0);
3512 		mtk_rx_irq_disable(eth, ~0);
3513 
3514 		return 0;
3515 	}
3516 
3517 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3518 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3519 		val = RSTCTRL_PPE0_V2;
3520 	} else {
3521 		val = RSTCTRL_PPE0;
3522 	}
3523 
3524 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3525 		val |= RSTCTRL_PPE1;
3526 
3527 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3528 
3529 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3530 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3531 			     0x3ffffff);
3532 
3533 		/* Set FE to PDMAv2 if necessary */
3534 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3535 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3536 	}
3537 
3538 	if (eth->pctl) {
3539 		/* Set GE2 driving and slew rate */
3540 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3541 
3542 		/* set GE2 TDSEL */
3543 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3544 
3545 		/* set GE2 TUNE */
3546 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3547 	}
3548 
3549 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3550 	 * up with the more appropriate value when mtk_mac_config call is being
3551 	 * invoked.
3552 	 */
3553 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3554 		struct net_device *dev = eth->netdev[i];
3555 
3556 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3557 		if (dev) {
3558 			struct mtk_mac *mac = netdev_priv(dev);
3559 
3560 			mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3561 		}
3562 	}
3563 
3564 	/* Indicates CDM to parse the MTK special tag from CPU
3565 	 * which also is working out for untag packets.
3566 	 */
3567 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3568 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3569 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3570 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3571 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3572 	}
3573 
3574 	/* Enable RX VLan Offloading */
3575 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3576 
3577 	/* set interrupt delays based on current Net DIM sample */
3578 	mtk_dim_rx(&eth->rx_dim.work);
3579 	mtk_dim_tx(&eth->tx_dim.work);
3580 
3581 	/* disable delay and normal interrupt */
3582 	mtk_tx_irq_disable(eth, ~0);
3583 	mtk_rx_irq_disable(eth, ~0);
3584 
3585 	/* FE int grouping */
3586 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3587 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3588 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3589 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3590 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3591 
3592 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3593 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3594 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3595 
3596 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3597 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3598 
3599 		/* PSE Free Queue Flow Control  */
3600 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3601 
3602 		/* PSE config input queue threshold */
3603 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3604 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3605 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3606 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3607 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3608 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3609 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3610 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3611 
3612 		/* PSE config output queue threshold */
3613 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3614 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3615 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3616 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3617 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3618 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3619 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3620 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3621 
3622 		/* GDM and CDM Threshold */
3623 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3624 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3625 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3626 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3627 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3628 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3629 	}
3630 
3631 	return 0;
3632 
3633 err_disable_pm:
3634 	pm_runtime_put_sync(eth->dev);
3635 	pm_runtime_disable(eth->dev);
3636 
3637 	return ret;
3638 }
3639 
3640 static int mtk_hw_deinit(struct mtk_eth *eth)
3641 {
3642 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3643 		return 0;
3644 
3645 	mtk_clk_disable(eth);
3646 
3647 	pm_runtime_put_sync(eth->dev);
3648 	pm_runtime_disable(eth->dev);
3649 
3650 	return 0;
3651 }
3652 
3653 static int __init mtk_init(struct net_device *dev)
3654 {
3655 	struct mtk_mac *mac = netdev_priv(dev);
3656 	struct mtk_eth *eth = mac->hw;
3657 	int ret;
3658 
3659 	ret = of_get_ethdev_address(mac->of_node, dev);
3660 	if (ret) {
3661 		/* If the mac address is invalid, use random mac address */
3662 		eth_hw_addr_random(dev);
3663 		dev_err(eth->dev, "generated random MAC address %pM\n",
3664 			dev->dev_addr);
3665 	}
3666 
3667 	return 0;
3668 }
3669 
3670 static void mtk_uninit(struct net_device *dev)
3671 {
3672 	struct mtk_mac *mac = netdev_priv(dev);
3673 	struct mtk_eth *eth = mac->hw;
3674 
3675 	phylink_disconnect_phy(mac->phylink);
3676 	mtk_tx_irq_disable(eth, ~0);
3677 	mtk_rx_irq_disable(eth, ~0);
3678 }
3679 
3680 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3681 {
3682 	int length = new_mtu + MTK_RX_ETH_HLEN;
3683 	struct mtk_mac *mac = netdev_priv(dev);
3684 	struct mtk_eth *eth = mac->hw;
3685 
3686 	if (rcu_access_pointer(eth->prog) &&
3687 	    length > MTK_PP_MAX_BUF_SIZE) {
3688 		netdev_err(dev, "Invalid MTU for XDP mode\n");
3689 		return -EINVAL;
3690 	}
3691 
3692 	mtk_set_mcr_max_rx(mac, length);
3693 	dev->mtu = new_mtu;
3694 
3695 	return 0;
3696 }
3697 
3698 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3699 {
3700 	struct mtk_mac *mac = netdev_priv(dev);
3701 
3702 	switch (cmd) {
3703 	case SIOCGMIIPHY:
3704 	case SIOCGMIIREG:
3705 	case SIOCSMIIREG:
3706 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3707 	default:
3708 		break;
3709 	}
3710 
3711 	return -EOPNOTSUPP;
3712 }
3713 
3714 static void mtk_pending_work(struct work_struct *work)
3715 {
3716 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3717 	int err, i;
3718 	unsigned long restart = 0;
3719 
3720 	rtnl_lock();
3721 
3722 	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
3723 	set_bit(MTK_RESETTING, &eth->state);
3724 
3725 	/* stop all devices to make sure that dma is properly shut down */
3726 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3727 		if (!eth->netdev[i])
3728 			continue;
3729 		mtk_stop(eth->netdev[i]);
3730 		__set_bit(i, &restart);
3731 	}
3732 	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
3733 
3734 	/* restart underlying hardware such as power, clock, pin mux
3735 	 * and the connected phy
3736 	 */
3737 	mtk_hw_deinit(eth);
3738 
3739 	if (eth->dev->pins)
3740 		pinctrl_select_state(eth->dev->pins->p,
3741 				     eth->dev->pins->default_state);
3742 	mtk_hw_init(eth);
3743 
3744 	/* restart DMA and enable IRQs */
3745 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3746 		if (!test_bit(i, &restart))
3747 			continue;
3748 		err = mtk_open(eth->netdev[i]);
3749 		if (err) {
3750 			netif_alert(eth, ifup, eth->netdev[i],
3751 			      "Driver up/down cycle failed, closing device.\n");
3752 			dev_close(eth->netdev[i]);
3753 		}
3754 	}
3755 
3756 	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
3757 
3758 	clear_bit(MTK_RESETTING, &eth->state);
3759 
3760 	rtnl_unlock();
3761 }
3762 
3763 static int mtk_free_dev(struct mtk_eth *eth)
3764 {
3765 	int i;
3766 
3767 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3768 		if (!eth->netdev[i])
3769 			continue;
3770 		free_netdev(eth->netdev[i]);
3771 	}
3772 
3773 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3774 		if (!eth->dsa_meta[i])
3775 			break;
3776 		metadata_dst_free(eth->dsa_meta[i]);
3777 	}
3778 
3779 	return 0;
3780 }
3781 
3782 static int mtk_unreg_dev(struct mtk_eth *eth)
3783 {
3784 	int i;
3785 
3786 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3787 		struct mtk_mac *mac;
3788 		if (!eth->netdev[i])
3789 			continue;
3790 		mac = netdev_priv(eth->netdev[i]);
3791 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3792 			unregister_netdevice_notifier(&mac->device_notifier);
3793 		unregister_netdev(eth->netdev[i]);
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static int mtk_cleanup(struct mtk_eth *eth)
3800 {
3801 	mtk_unreg_dev(eth);
3802 	mtk_free_dev(eth);
3803 	cancel_work_sync(&eth->pending_work);
3804 
3805 	return 0;
3806 }
3807 
3808 static int mtk_get_link_ksettings(struct net_device *ndev,
3809 				  struct ethtool_link_ksettings *cmd)
3810 {
3811 	struct mtk_mac *mac = netdev_priv(ndev);
3812 
3813 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3814 		return -EBUSY;
3815 
3816 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3817 }
3818 
3819 static int mtk_set_link_ksettings(struct net_device *ndev,
3820 				  const struct ethtool_link_ksettings *cmd)
3821 {
3822 	struct mtk_mac *mac = netdev_priv(ndev);
3823 
3824 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3825 		return -EBUSY;
3826 
3827 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3828 }
3829 
3830 static void mtk_get_drvinfo(struct net_device *dev,
3831 			    struct ethtool_drvinfo *info)
3832 {
3833 	struct mtk_mac *mac = netdev_priv(dev);
3834 
3835 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3836 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3837 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3838 }
3839 
3840 static u32 mtk_get_msglevel(struct net_device *dev)
3841 {
3842 	struct mtk_mac *mac = netdev_priv(dev);
3843 
3844 	return mac->hw->msg_enable;
3845 }
3846 
3847 static void mtk_set_msglevel(struct net_device *dev, u32 value)
3848 {
3849 	struct mtk_mac *mac = netdev_priv(dev);
3850 
3851 	mac->hw->msg_enable = value;
3852 }
3853 
3854 static int mtk_nway_reset(struct net_device *dev)
3855 {
3856 	struct mtk_mac *mac = netdev_priv(dev);
3857 
3858 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3859 		return -EBUSY;
3860 
3861 	if (!mac->phylink)
3862 		return -ENOTSUPP;
3863 
3864 	return phylink_ethtool_nway_reset(mac->phylink);
3865 }
3866 
3867 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3868 {
3869 	int i;
3870 
3871 	switch (stringset) {
3872 	case ETH_SS_STATS: {
3873 		struct mtk_mac *mac = netdev_priv(dev);
3874 
3875 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3876 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3877 			data += ETH_GSTRING_LEN;
3878 		}
3879 		if (mtk_page_pool_enabled(mac->hw))
3880 			page_pool_ethtool_stats_get_strings(data);
3881 		break;
3882 	}
3883 	default:
3884 		break;
3885 	}
3886 }
3887 
3888 static int mtk_get_sset_count(struct net_device *dev, int sset)
3889 {
3890 	switch (sset) {
3891 	case ETH_SS_STATS: {
3892 		int count = ARRAY_SIZE(mtk_ethtool_stats);
3893 		struct mtk_mac *mac = netdev_priv(dev);
3894 
3895 		if (mtk_page_pool_enabled(mac->hw))
3896 			count += page_pool_ethtool_stats_get_count();
3897 		return count;
3898 	}
3899 	default:
3900 		return -EOPNOTSUPP;
3901 	}
3902 }
3903 
3904 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
3905 {
3906 	struct page_pool_stats stats = {};
3907 	int i;
3908 
3909 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
3910 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
3911 
3912 		if (!ring->page_pool)
3913 			continue;
3914 
3915 		page_pool_get_stats(ring->page_pool, &stats);
3916 	}
3917 	page_pool_ethtool_stats_get(data, &stats);
3918 }
3919 
3920 static void mtk_get_ethtool_stats(struct net_device *dev,
3921 				  struct ethtool_stats *stats, u64 *data)
3922 {
3923 	struct mtk_mac *mac = netdev_priv(dev);
3924 	struct mtk_hw_stats *hwstats = mac->hw_stats;
3925 	u64 *data_src, *data_dst;
3926 	unsigned int start;
3927 	int i;
3928 
3929 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3930 		return;
3931 
3932 	if (netif_running(dev) && netif_device_present(dev)) {
3933 		if (spin_trylock_bh(&hwstats->stats_lock)) {
3934 			mtk_stats_update_mac(mac);
3935 			spin_unlock_bh(&hwstats->stats_lock);
3936 		}
3937 	}
3938 
3939 	data_src = (u64 *)hwstats;
3940 
3941 	do {
3942 		data_dst = data;
3943 		start = u64_stats_fetch_begin(&hwstats->syncp);
3944 
3945 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3946 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3947 		if (mtk_page_pool_enabled(mac->hw))
3948 			mtk_ethtool_pp_stats(mac->hw, data_dst);
3949 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
3950 }
3951 
3952 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3953 			 u32 *rule_locs)
3954 {
3955 	int ret = -EOPNOTSUPP;
3956 
3957 	switch (cmd->cmd) {
3958 	case ETHTOOL_GRXRINGS:
3959 		if (dev->hw_features & NETIF_F_LRO) {
3960 			cmd->data = MTK_MAX_RX_RING_NUM;
3961 			ret = 0;
3962 		}
3963 		break;
3964 	case ETHTOOL_GRXCLSRLCNT:
3965 		if (dev->hw_features & NETIF_F_LRO) {
3966 			struct mtk_mac *mac = netdev_priv(dev);
3967 
3968 			cmd->rule_cnt = mac->hwlro_ip_cnt;
3969 			ret = 0;
3970 		}
3971 		break;
3972 	case ETHTOOL_GRXCLSRULE:
3973 		if (dev->hw_features & NETIF_F_LRO)
3974 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3975 		break;
3976 	case ETHTOOL_GRXCLSRLALL:
3977 		if (dev->hw_features & NETIF_F_LRO)
3978 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
3979 						     rule_locs);
3980 		break;
3981 	default:
3982 		break;
3983 	}
3984 
3985 	return ret;
3986 }
3987 
3988 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3989 {
3990 	int ret = -EOPNOTSUPP;
3991 
3992 	switch (cmd->cmd) {
3993 	case ETHTOOL_SRXCLSRLINS:
3994 		if (dev->hw_features & NETIF_F_LRO)
3995 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
3996 		break;
3997 	case ETHTOOL_SRXCLSRLDEL:
3998 		if (dev->hw_features & NETIF_F_LRO)
3999 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4000 		break;
4001 	default:
4002 		break;
4003 	}
4004 
4005 	return ret;
4006 }
4007 
4008 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4009 			    struct net_device *sb_dev)
4010 {
4011 	struct mtk_mac *mac = netdev_priv(dev);
4012 	unsigned int queue = 0;
4013 
4014 	if (netdev_uses_dsa(dev))
4015 		queue = skb_get_queue_mapping(skb) + 3;
4016 	else
4017 		queue = mac->id;
4018 
4019 	if (queue >= dev->num_tx_queues)
4020 		queue = 0;
4021 
4022 	return queue;
4023 }
4024 
4025 static const struct ethtool_ops mtk_ethtool_ops = {
4026 	.get_link_ksettings	= mtk_get_link_ksettings,
4027 	.set_link_ksettings	= mtk_set_link_ksettings,
4028 	.get_drvinfo		= mtk_get_drvinfo,
4029 	.get_msglevel		= mtk_get_msglevel,
4030 	.set_msglevel		= mtk_set_msglevel,
4031 	.nway_reset		= mtk_nway_reset,
4032 	.get_link		= ethtool_op_get_link,
4033 	.get_strings		= mtk_get_strings,
4034 	.get_sset_count		= mtk_get_sset_count,
4035 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4036 	.get_rxnfc		= mtk_get_rxnfc,
4037 	.set_rxnfc              = mtk_set_rxnfc,
4038 };
4039 
4040 static const struct net_device_ops mtk_netdev_ops = {
4041 	.ndo_init		= mtk_init,
4042 	.ndo_uninit		= mtk_uninit,
4043 	.ndo_open		= mtk_open,
4044 	.ndo_stop		= mtk_stop,
4045 	.ndo_start_xmit		= mtk_start_xmit,
4046 	.ndo_set_mac_address	= mtk_set_mac_address,
4047 	.ndo_validate_addr	= eth_validate_addr,
4048 	.ndo_eth_ioctl		= mtk_do_ioctl,
4049 	.ndo_change_mtu		= mtk_change_mtu,
4050 	.ndo_tx_timeout		= mtk_tx_timeout,
4051 	.ndo_get_stats64        = mtk_get_stats64,
4052 	.ndo_fix_features	= mtk_fix_features,
4053 	.ndo_set_features	= mtk_set_features,
4054 #ifdef CONFIG_NET_POLL_CONTROLLER
4055 	.ndo_poll_controller	= mtk_poll_controller,
4056 #endif
4057 	.ndo_setup_tc		= mtk_eth_setup_tc,
4058 	.ndo_bpf		= mtk_xdp,
4059 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4060 	.ndo_select_queue	= mtk_select_queue,
4061 };
4062 
4063 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4064 {
4065 	const __be32 *_id = of_get_property(np, "reg", NULL);
4066 	phy_interface_t phy_mode;
4067 	struct phylink *phylink;
4068 	struct mtk_mac *mac;
4069 	int id, err;
4070 	int txqs = 1;
4071 
4072 	if (!_id) {
4073 		dev_err(eth->dev, "missing mac id\n");
4074 		return -EINVAL;
4075 	}
4076 
4077 	id = be32_to_cpup(_id);
4078 	if (id >= MTK_MAC_COUNT) {
4079 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4080 		return -EINVAL;
4081 	}
4082 
4083 	if (eth->netdev[id]) {
4084 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4085 		return -EINVAL;
4086 	}
4087 
4088 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4089 		txqs = MTK_QDMA_NUM_QUEUES;
4090 
4091 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4092 	if (!eth->netdev[id]) {
4093 		dev_err(eth->dev, "alloc_etherdev failed\n");
4094 		return -ENOMEM;
4095 	}
4096 	mac = netdev_priv(eth->netdev[id]);
4097 	eth->mac[id] = mac;
4098 	mac->id = id;
4099 	mac->hw = eth;
4100 	mac->of_node = np;
4101 
4102 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4103 	mac->hwlro_ip_cnt = 0;
4104 
4105 	mac->hw_stats = devm_kzalloc(eth->dev,
4106 				     sizeof(*mac->hw_stats),
4107 				     GFP_KERNEL);
4108 	if (!mac->hw_stats) {
4109 		dev_err(eth->dev, "failed to allocate counter memory\n");
4110 		err = -ENOMEM;
4111 		goto free_netdev;
4112 	}
4113 	spin_lock_init(&mac->hw_stats->stats_lock);
4114 	u64_stats_init(&mac->hw_stats->syncp);
4115 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4116 
4117 	/* phylink create */
4118 	err = of_get_phy_mode(np, &phy_mode);
4119 	if (err) {
4120 		dev_err(eth->dev, "incorrect phy-mode\n");
4121 		goto free_netdev;
4122 	}
4123 
4124 	/* mac config is not set */
4125 	mac->interface = PHY_INTERFACE_MODE_NA;
4126 	mac->speed = SPEED_UNKNOWN;
4127 
4128 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4129 	mac->phylink_config.type = PHYLINK_NETDEV;
4130 	/* This driver makes use of state->speed in mac_config */
4131 	mac->phylink_config.legacy_pre_march2020 = true;
4132 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4133 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4134 
4135 	__set_bit(PHY_INTERFACE_MODE_MII,
4136 		  mac->phylink_config.supported_interfaces);
4137 	__set_bit(PHY_INTERFACE_MODE_GMII,
4138 		  mac->phylink_config.supported_interfaces);
4139 
4140 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4141 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4142 
4143 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4144 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4145 			  mac->phylink_config.supported_interfaces);
4146 
4147 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4148 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4149 			  mac->phylink_config.supported_interfaces);
4150 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4151 			  mac->phylink_config.supported_interfaces);
4152 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4153 			  mac->phylink_config.supported_interfaces);
4154 	}
4155 
4156 	phylink = phylink_create(&mac->phylink_config,
4157 				 of_fwnode_handle(mac->of_node),
4158 				 phy_mode, &mtk_phylink_ops);
4159 	if (IS_ERR(phylink)) {
4160 		err = PTR_ERR(phylink);
4161 		goto free_netdev;
4162 	}
4163 
4164 	mac->phylink = phylink;
4165 
4166 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4167 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4168 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4169 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4170 
4171 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4172 	if (eth->hwlro)
4173 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4174 
4175 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4176 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4177 	eth->netdev[id]->features |= eth->soc->hw_features;
4178 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4179 
4180 	eth->netdev[id]->irq = eth->irq[0];
4181 	eth->netdev[id]->dev.of_node = np;
4182 
4183 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4184 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4185 	else
4186 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4187 
4188 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4189 		mac->device_notifier.notifier_call = mtk_device_event;
4190 		register_netdevice_notifier(&mac->device_notifier);
4191 	}
4192 
4193 	return 0;
4194 
4195 free_netdev:
4196 	free_netdev(eth->netdev[id]);
4197 	return err;
4198 }
4199 
4200 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4201 {
4202 	struct net_device *dev, *tmp;
4203 	LIST_HEAD(dev_list);
4204 	int i;
4205 
4206 	rtnl_lock();
4207 
4208 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4209 		dev = eth->netdev[i];
4210 
4211 		if (!dev || !(dev->flags & IFF_UP))
4212 			continue;
4213 
4214 		list_add_tail(&dev->close_list, &dev_list);
4215 	}
4216 
4217 	dev_close_many(&dev_list, false);
4218 
4219 	eth->dma_dev = dma_dev;
4220 
4221 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4222 		list_del_init(&dev->close_list);
4223 		dev_open(dev, NULL);
4224 	}
4225 
4226 	rtnl_unlock();
4227 }
4228 
4229 static int mtk_probe(struct platform_device *pdev)
4230 {
4231 	struct resource *res = NULL;
4232 	struct device_node *mac_np;
4233 	struct mtk_eth *eth;
4234 	int err, i;
4235 
4236 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4237 	if (!eth)
4238 		return -ENOMEM;
4239 
4240 	eth->soc = of_device_get_match_data(&pdev->dev);
4241 
4242 	eth->dev = &pdev->dev;
4243 	eth->dma_dev = &pdev->dev;
4244 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4245 	if (IS_ERR(eth->base))
4246 		return PTR_ERR(eth->base);
4247 
4248 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4249 		eth->ip_align = NET_IP_ALIGN;
4250 
4251 	spin_lock_init(&eth->page_lock);
4252 	spin_lock_init(&eth->tx_irq_lock);
4253 	spin_lock_init(&eth->rx_irq_lock);
4254 	spin_lock_init(&eth->dim_lock);
4255 
4256 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4257 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4258 
4259 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4260 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4261 
4262 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4263 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4264 							      "mediatek,ethsys");
4265 		if (IS_ERR(eth->ethsys)) {
4266 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4267 			return PTR_ERR(eth->ethsys);
4268 		}
4269 	}
4270 
4271 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4272 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4273 							     "mediatek,infracfg");
4274 		if (IS_ERR(eth->infra)) {
4275 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4276 			return PTR_ERR(eth->infra);
4277 		}
4278 	}
4279 
4280 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4281 		struct regmap *cci;
4282 
4283 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4284 						      "cci-control-port");
4285 		/* enable CPU/bus coherency */
4286 		if (!IS_ERR(cci))
4287 			regmap_write(cci, 0, 3);
4288 	}
4289 
4290 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4291 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4292 					  GFP_KERNEL);
4293 		if (!eth->sgmii)
4294 			return -ENOMEM;
4295 
4296 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4297 				     eth->soc->ana_rgc3);
4298 
4299 		if (err)
4300 			return err;
4301 	}
4302 
4303 	if (eth->soc->required_pctl) {
4304 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4305 							    "mediatek,pctl");
4306 		if (IS_ERR(eth->pctl)) {
4307 			dev_err(&pdev->dev, "no pctl regmap found\n");
4308 			return PTR_ERR(eth->pctl);
4309 		}
4310 	}
4311 
4312 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4313 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4314 		if (!res)
4315 			return -EINVAL;
4316 	}
4317 
4318 	if (eth->soc->offload_version) {
4319 		for (i = 0;; i++) {
4320 			struct device_node *np;
4321 			phys_addr_t wdma_phy;
4322 			u32 wdma_base;
4323 
4324 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4325 				break;
4326 
4327 			np = of_parse_phandle(pdev->dev.of_node,
4328 					      "mediatek,wed", i);
4329 			if (!np)
4330 				break;
4331 
4332 			wdma_base = eth->soc->reg_map->wdma_base[i];
4333 			wdma_phy = res ? res->start + wdma_base : 0;
4334 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4335 				       wdma_phy, i);
4336 		}
4337 	}
4338 
4339 	for (i = 0; i < 3; i++) {
4340 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4341 			eth->irq[i] = eth->irq[0];
4342 		else
4343 			eth->irq[i] = platform_get_irq(pdev, i);
4344 		if (eth->irq[i] < 0) {
4345 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4346 			err = -ENXIO;
4347 			goto err_wed_exit;
4348 		}
4349 	}
4350 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4351 		eth->clks[i] = devm_clk_get(eth->dev,
4352 					    mtk_clks_source_name[i]);
4353 		if (IS_ERR(eth->clks[i])) {
4354 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4355 				err = -EPROBE_DEFER;
4356 				goto err_wed_exit;
4357 			}
4358 			if (eth->soc->required_clks & BIT(i)) {
4359 				dev_err(&pdev->dev, "clock %s not found\n",
4360 					mtk_clks_source_name[i]);
4361 				err = -EINVAL;
4362 				goto err_wed_exit;
4363 			}
4364 			eth->clks[i] = NULL;
4365 		}
4366 	}
4367 
4368 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4369 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4370 
4371 	err = mtk_hw_init(eth);
4372 	if (err)
4373 		goto err_wed_exit;
4374 
4375 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4376 
4377 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4378 		if (!of_device_is_compatible(mac_np,
4379 					     "mediatek,eth-mac"))
4380 			continue;
4381 
4382 		if (!of_device_is_available(mac_np))
4383 			continue;
4384 
4385 		err = mtk_add_mac(eth, mac_np);
4386 		if (err) {
4387 			of_node_put(mac_np);
4388 			goto err_deinit_hw;
4389 		}
4390 	}
4391 
4392 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4393 		err = devm_request_irq(eth->dev, eth->irq[0],
4394 				       mtk_handle_irq, 0,
4395 				       dev_name(eth->dev), eth);
4396 	} else {
4397 		err = devm_request_irq(eth->dev, eth->irq[1],
4398 				       mtk_handle_irq_tx, 0,
4399 				       dev_name(eth->dev), eth);
4400 		if (err)
4401 			goto err_free_dev;
4402 
4403 		err = devm_request_irq(eth->dev, eth->irq[2],
4404 				       mtk_handle_irq_rx, 0,
4405 				       dev_name(eth->dev), eth);
4406 	}
4407 	if (err)
4408 		goto err_free_dev;
4409 
4410 	/* No MT7628/88 support yet */
4411 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4412 		err = mtk_mdio_init(eth);
4413 		if (err)
4414 			goto err_free_dev;
4415 	}
4416 
4417 	if (eth->soc->offload_version) {
4418 		u32 num_ppe;
4419 
4420 		num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4421 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4422 		for (i = 0; i < num_ppe; i++) {
4423 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4424 
4425 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4426 						   eth->soc->offload_version, i);
4427 			if (!eth->ppe[i]) {
4428 				err = -ENOMEM;
4429 				goto err_deinit_ppe;
4430 			}
4431 		}
4432 
4433 		err = mtk_eth_offload_init(eth);
4434 		if (err)
4435 			goto err_deinit_ppe;
4436 	}
4437 
4438 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4439 		if (!eth->netdev[i])
4440 			continue;
4441 
4442 		err = register_netdev(eth->netdev[i]);
4443 		if (err) {
4444 			dev_err(eth->dev, "error bringing up device\n");
4445 			goto err_deinit_ppe;
4446 		} else
4447 			netif_info(eth, probe, eth->netdev[i],
4448 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4449 				   eth->netdev[i]->base_addr, eth->irq[0]);
4450 	}
4451 
4452 	/* we run 2 devices on the same DMA ring so we need a dummy device
4453 	 * for NAPI to work
4454 	 */
4455 	init_dummy_netdev(&eth->dummy_dev);
4456 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4457 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4458 
4459 	platform_set_drvdata(pdev, eth);
4460 
4461 	return 0;
4462 
4463 err_deinit_ppe:
4464 	mtk_ppe_deinit(eth);
4465 	mtk_mdio_cleanup(eth);
4466 err_free_dev:
4467 	mtk_free_dev(eth);
4468 err_deinit_hw:
4469 	mtk_hw_deinit(eth);
4470 err_wed_exit:
4471 	mtk_wed_exit();
4472 
4473 	return err;
4474 }
4475 
4476 static int mtk_remove(struct platform_device *pdev)
4477 {
4478 	struct mtk_eth *eth = platform_get_drvdata(pdev);
4479 	struct mtk_mac *mac;
4480 	int i;
4481 
4482 	/* stop all devices to make sure that dma is properly shut down */
4483 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4484 		if (!eth->netdev[i])
4485 			continue;
4486 		mtk_stop(eth->netdev[i]);
4487 		mac = netdev_priv(eth->netdev[i]);
4488 		phylink_disconnect_phy(mac->phylink);
4489 	}
4490 
4491 	mtk_wed_exit();
4492 	mtk_hw_deinit(eth);
4493 
4494 	netif_napi_del(&eth->tx_napi);
4495 	netif_napi_del(&eth->rx_napi);
4496 	mtk_cleanup(eth);
4497 	mtk_mdio_cleanup(eth);
4498 
4499 	return 0;
4500 }
4501 
4502 static const struct mtk_soc_data mt2701_data = {
4503 	.reg_map = &mtk_reg_map,
4504 	.caps = MT7623_CAPS | MTK_HWLRO,
4505 	.hw_features = MTK_HW_FEATURES,
4506 	.required_clks = MT7623_CLKS_BITMAP,
4507 	.required_pctl = true,
4508 	.txrx = {
4509 		.txd_size = sizeof(struct mtk_tx_dma),
4510 		.rxd_size = sizeof(struct mtk_rx_dma),
4511 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4512 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4513 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4514 		.dma_len_offset = 16,
4515 	},
4516 };
4517 
4518 static const struct mtk_soc_data mt7621_data = {
4519 	.reg_map = &mtk_reg_map,
4520 	.caps = MT7621_CAPS,
4521 	.hw_features = MTK_HW_FEATURES,
4522 	.required_clks = MT7621_CLKS_BITMAP,
4523 	.required_pctl = false,
4524 	.offload_version = 1,
4525 	.hash_offset = 2,
4526 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4527 	.txrx = {
4528 		.txd_size = sizeof(struct mtk_tx_dma),
4529 		.rxd_size = sizeof(struct mtk_rx_dma),
4530 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4531 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4532 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4533 		.dma_len_offset = 16,
4534 	},
4535 };
4536 
4537 static const struct mtk_soc_data mt7622_data = {
4538 	.reg_map = &mtk_reg_map,
4539 	.ana_rgc3 = 0x2028,
4540 	.caps = MT7622_CAPS | MTK_HWLRO,
4541 	.hw_features = MTK_HW_FEATURES,
4542 	.required_clks = MT7622_CLKS_BITMAP,
4543 	.required_pctl = false,
4544 	.offload_version = 2,
4545 	.hash_offset = 2,
4546 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4547 	.txrx = {
4548 		.txd_size = sizeof(struct mtk_tx_dma),
4549 		.rxd_size = sizeof(struct mtk_rx_dma),
4550 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4551 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4552 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4553 		.dma_len_offset = 16,
4554 	},
4555 };
4556 
4557 static const struct mtk_soc_data mt7623_data = {
4558 	.reg_map = &mtk_reg_map,
4559 	.caps = MT7623_CAPS | MTK_HWLRO,
4560 	.hw_features = MTK_HW_FEATURES,
4561 	.required_clks = MT7623_CLKS_BITMAP,
4562 	.required_pctl = true,
4563 	.offload_version = 1,
4564 	.hash_offset = 2,
4565 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4566 	.txrx = {
4567 		.txd_size = sizeof(struct mtk_tx_dma),
4568 		.rxd_size = sizeof(struct mtk_rx_dma),
4569 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4570 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4571 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4572 		.dma_len_offset = 16,
4573 	},
4574 };
4575 
4576 static const struct mtk_soc_data mt7629_data = {
4577 	.reg_map = &mtk_reg_map,
4578 	.ana_rgc3 = 0x128,
4579 	.caps = MT7629_CAPS | MTK_HWLRO,
4580 	.hw_features = MTK_HW_FEATURES,
4581 	.required_clks = MT7629_CLKS_BITMAP,
4582 	.required_pctl = false,
4583 	.txrx = {
4584 		.txd_size = sizeof(struct mtk_tx_dma),
4585 		.rxd_size = sizeof(struct mtk_rx_dma),
4586 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4587 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4588 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4589 		.dma_len_offset = 16,
4590 	},
4591 };
4592 
4593 static const struct mtk_soc_data mt7986_data = {
4594 	.reg_map = &mt7986_reg_map,
4595 	.ana_rgc3 = 0x128,
4596 	.caps = MT7986_CAPS,
4597 	.hw_features = MTK_HW_FEATURES,
4598 	.required_clks = MT7986_CLKS_BITMAP,
4599 	.required_pctl = false,
4600 	.offload_version = 2,
4601 	.hash_offset = 4,
4602 	.foe_entry_size = sizeof(struct mtk_foe_entry),
4603 	.txrx = {
4604 		.txd_size = sizeof(struct mtk_tx_dma_v2),
4605 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
4606 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4607 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4608 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4609 		.dma_len_offset = 8,
4610 	},
4611 };
4612 
4613 static const struct mtk_soc_data rt5350_data = {
4614 	.reg_map = &mt7628_reg_map,
4615 	.caps = MT7628_CAPS,
4616 	.hw_features = MTK_HW_FEATURES_MT7628,
4617 	.required_clks = MT7628_CLKS_BITMAP,
4618 	.required_pctl = false,
4619 	.txrx = {
4620 		.txd_size = sizeof(struct mtk_tx_dma),
4621 		.rxd_size = sizeof(struct mtk_rx_dma),
4622 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4623 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4624 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4625 		.dma_len_offset = 16,
4626 	},
4627 };
4628 
4629 const struct of_device_id of_mtk_match[] = {
4630 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4631 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4632 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4633 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4634 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4635 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4636 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4637 	{},
4638 };
4639 MODULE_DEVICE_TABLE(of, of_mtk_match);
4640 
4641 static struct platform_driver mtk_driver = {
4642 	.probe = mtk_probe,
4643 	.remove = mtk_remove,
4644 	.driver = {
4645 		.name = "mtk_soc_eth",
4646 		.of_match_table = of_mtk_match,
4647 	},
4648 };
4649 
4650 module_platform_driver(mtk_driver);
4651 
4652 MODULE_LICENSE("GPL");
4653 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4654 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
4655