1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
25 #include <net/dsa.h>
26 #include <net/dst_metadata.h>
27 
28 #include "mtk_eth_soc.h"
29 #include "mtk_wed.h"
30 
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
40 				  sizeof(u64) }
41 
42 static const struct mtk_reg_map mtk_reg_map = {
43 	.tx_irq_mask		= 0x1a1c,
44 	.tx_irq_status		= 0x1a18,
45 	.pdma = {
46 		.rx_ptr		= 0x0900,
47 		.rx_cnt_cfg	= 0x0904,
48 		.pcrx_ptr	= 0x0908,
49 		.glo_cfg	= 0x0a04,
50 		.rst_idx	= 0x0a08,
51 		.delay_irq	= 0x0a0c,
52 		.irq_status	= 0x0a20,
53 		.irq_mask	= 0x0a28,
54 		.adma_rx_dbg0	= 0x0a38,
55 		.int_grp	= 0x0a50,
56 	},
57 	.qdma = {
58 		.qtx_cfg	= 0x1800,
59 		.qtx_sch	= 0x1804,
60 		.rx_ptr		= 0x1900,
61 		.rx_cnt_cfg	= 0x1904,
62 		.qcrx_ptr	= 0x1908,
63 		.glo_cfg	= 0x1a04,
64 		.rst_idx	= 0x1a08,
65 		.delay_irq	= 0x1a0c,
66 		.fc_th		= 0x1a10,
67 		.tx_sch_rate	= 0x1a14,
68 		.int_grp	= 0x1a20,
69 		.hred		= 0x1a44,
70 		.ctx_ptr	= 0x1b00,
71 		.dtx_ptr	= 0x1b04,
72 		.crx_ptr	= 0x1b10,
73 		.drx_ptr	= 0x1b14,
74 		.fq_head	= 0x1b20,
75 		.fq_tail	= 0x1b24,
76 		.fq_count	= 0x1b28,
77 		.fq_blen	= 0x1b2c,
78 	},
79 	.gdm1_cnt		= 0x2400,
80 	.gdma_to_ppe		= 0x4444,
81 	.ppe_base		= 0x0c00,
82 	.wdma_base = {
83 		[0]		= 0x2800,
84 		[1]		= 0x2c00,
85 	},
86 	.pse_iq_sta		= 0x0110,
87 	.pse_oq_sta		= 0x0118,
88 };
89 
90 static const struct mtk_reg_map mt7628_reg_map = {
91 	.tx_irq_mask		= 0x0a28,
92 	.tx_irq_status		= 0x0a20,
93 	.pdma = {
94 		.rx_ptr		= 0x0900,
95 		.rx_cnt_cfg	= 0x0904,
96 		.pcrx_ptr	= 0x0908,
97 		.glo_cfg	= 0x0a04,
98 		.rst_idx	= 0x0a08,
99 		.delay_irq	= 0x0a0c,
100 		.irq_status	= 0x0a20,
101 		.irq_mask	= 0x0a28,
102 		.int_grp	= 0x0a50,
103 	},
104 };
105 
106 static const struct mtk_reg_map mt7986_reg_map = {
107 	.tx_irq_mask		= 0x461c,
108 	.tx_irq_status		= 0x4618,
109 	.pdma = {
110 		.rx_ptr		= 0x6100,
111 		.rx_cnt_cfg	= 0x6104,
112 		.pcrx_ptr	= 0x6108,
113 		.glo_cfg	= 0x6204,
114 		.rst_idx	= 0x6208,
115 		.delay_irq	= 0x620c,
116 		.irq_status	= 0x6220,
117 		.irq_mask	= 0x6228,
118 		.adma_rx_dbg0	= 0x6238,
119 		.int_grp	= 0x6250,
120 	},
121 	.qdma = {
122 		.qtx_cfg	= 0x4400,
123 		.qtx_sch	= 0x4404,
124 		.rx_ptr		= 0x4500,
125 		.rx_cnt_cfg	= 0x4504,
126 		.qcrx_ptr	= 0x4508,
127 		.glo_cfg	= 0x4604,
128 		.rst_idx	= 0x4608,
129 		.delay_irq	= 0x460c,
130 		.fc_th		= 0x4610,
131 		.int_grp	= 0x4620,
132 		.hred		= 0x4644,
133 		.ctx_ptr	= 0x4700,
134 		.dtx_ptr	= 0x4704,
135 		.crx_ptr	= 0x4710,
136 		.drx_ptr	= 0x4714,
137 		.fq_head	= 0x4720,
138 		.fq_tail	= 0x4724,
139 		.fq_count	= 0x4728,
140 		.fq_blen	= 0x472c,
141 		.tx_sch_rate	= 0x4798,
142 	},
143 	.gdm1_cnt		= 0x1c00,
144 	.gdma_to_ppe		= 0x3333,
145 	.ppe_base		= 0x2000,
146 	.wdma_base = {
147 		[0]		= 0x4800,
148 		[1]		= 0x4c00,
149 	},
150 	.pse_iq_sta		= 0x0180,
151 	.pse_oq_sta		= 0x01a0,
152 };
153 
154 /* strings used by ethtool */
155 static const struct mtk_ethtool_stats {
156 	char str[ETH_GSTRING_LEN];
157 	u32 offset;
158 } mtk_ethtool_stats[] = {
159 	MTK_ETHTOOL_STAT(tx_bytes),
160 	MTK_ETHTOOL_STAT(tx_packets),
161 	MTK_ETHTOOL_STAT(tx_skip),
162 	MTK_ETHTOOL_STAT(tx_collisions),
163 	MTK_ETHTOOL_STAT(rx_bytes),
164 	MTK_ETHTOOL_STAT(rx_packets),
165 	MTK_ETHTOOL_STAT(rx_overflow),
166 	MTK_ETHTOOL_STAT(rx_fcs_errors),
167 	MTK_ETHTOOL_STAT(rx_short_errors),
168 	MTK_ETHTOOL_STAT(rx_long_errors),
169 	MTK_ETHTOOL_STAT(rx_checksum_errors),
170 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
171 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
172 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
173 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
174 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
175 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
176 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
177 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
178 };
179 
180 static const char * const mtk_clks_source_name[] = {
181 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
182 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
183 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
184 	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
185 };
186 
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
188 {
189 	__raw_writel(val, eth->base + reg);
190 }
191 
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
193 {
194 	return __raw_readl(eth->base + reg);
195 }
196 
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
198 {
199 	u32 val;
200 
201 	val = mtk_r32(eth, reg);
202 	val &= ~mask;
203 	val |= set;
204 	mtk_w32(eth, val, reg);
205 	return reg;
206 }
207 
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
209 {
210 	unsigned long t_start = jiffies;
211 
212 	while (1) {
213 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
214 			return 0;
215 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
216 			break;
217 		cond_resched();
218 	}
219 
220 	dev_err(eth->dev, "mdio: MDIO timeout\n");
221 	return -ETIMEDOUT;
222 }
223 
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
225 			       u32 write_data)
226 {
227 	int ret;
228 
229 	ret = mtk_mdio_busy_wait(eth);
230 	if (ret < 0)
231 		return ret;
232 
233 	mtk_w32(eth, PHY_IAC_ACCESS |
234 		PHY_IAC_START_C22 |
235 		PHY_IAC_CMD_WRITE |
236 		PHY_IAC_REG(phy_reg) |
237 		PHY_IAC_ADDR(phy_addr) |
238 		PHY_IAC_DATA(write_data),
239 		MTK_PHY_IAC);
240 
241 	ret = mtk_mdio_busy_wait(eth);
242 	if (ret < 0)
243 		return ret;
244 
245 	return 0;
246 }
247 
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
249 			       u32 devad, u32 phy_reg, u32 write_data)
250 {
251 	int ret;
252 
253 	ret = mtk_mdio_busy_wait(eth);
254 	if (ret < 0)
255 		return ret;
256 
257 	mtk_w32(eth, PHY_IAC_ACCESS |
258 		PHY_IAC_START_C45 |
259 		PHY_IAC_CMD_C45_ADDR |
260 		PHY_IAC_REG(devad) |
261 		PHY_IAC_ADDR(phy_addr) |
262 		PHY_IAC_DATA(phy_reg),
263 		MTK_PHY_IAC);
264 
265 	ret = mtk_mdio_busy_wait(eth);
266 	if (ret < 0)
267 		return ret;
268 
269 	mtk_w32(eth, PHY_IAC_ACCESS |
270 		PHY_IAC_START_C45 |
271 		PHY_IAC_CMD_WRITE |
272 		PHY_IAC_REG(devad) |
273 		PHY_IAC_ADDR(phy_addr) |
274 		PHY_IAC_DATA(write_data),
275 		MTK_PHY_IAC);
276 
277 	ret = mtk_mdio_busy_wait(eth);
278 	if (ret < 0)
279 		return ret;
280 
281 	return 0;
282 }
283 
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
285 {
286 	int ret;
287 
288 	ret = mtk_mdio_busy_wait(eth);
289 	if (ret < 0)
290 		return ret;
291 
292 	mtk_w32(eth, PHY_IAC_ACCESS |
293 		PHY_IAC_START_C22 |
294 		PHY_IAC_CMD_C22_READ |
295 		PHY_IAC_REG(phy_reg) |
296 		PHY_IAC_ADDR(phy_addr),
297 		MTK_PHY_IAC);
298 
299 	ret = mtk_mdio_busy_wait(eth);
300 	if (ret < 0)
301 		return ret;
302 
303 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
304 }
305 
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
307 			      u32 devad, u32 phy_reg)
308 {
309 	int ret;
310 
311 	ret = mtk_mdio_busy_wait(eth);
312 	if (ret < 0)
313 		return ret;
314 
315 	mtk_w32(eth, PHY_IAC_ACCESS |
316 		PHY_IAC_START_C45 |
317 		PHY_IAC_CMD_C45_ADDR |
318 		PHY_IAC_REG(devad) |
319 		PHY_IAC_ADDR(phy_addr) |
320 		PHY_IAC_DATA(phy_reg),
321 		MTK_PHY_IAC);
322 
323 	ret = mtk_mdio_busy_wait(eth);
324 	if (ret < 0)
325 		return ret;
326 
327 	mtk_w32(eth, PHY_IAC_ACCESS |
328 		PHY_IAC_START_C45 |
329 		PHY_IAC_CMD_C45_READ |
330 		PHY_IAC_REG(devad) |
331 		PHY_IAC_ADDR(phy_addr),
332 		MTK_PHY_IAC);
333 
334 	ret = mtk_mdio_busy_wait(eth);
335 	if (ret < 0)
336 		return ret;
337 
338 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
339 }
340 
341 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
342 			      int phy_reg, u16 val)
343 {
344 	struct mtk_eth *eth = bus->priv;
345 
346 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
347 }
348 
349 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
350 			      int devad, int phy_reg, u16 val)
351 {
352 	struct mtk_eth *eth = bus->priv;
353 
354 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
355 }
356 
357 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
358 {
359 	struct mtk_eth *eth = bus->priv;
360 
361 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
362 }
363 
364 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
365 			     int phy_reg)
366 {
367 	struct mtk_eth *eth = bus->priv;
368 
369 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
370 }
371 
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
373 				     phy_interface_t interface)
374 {
375 	u32 val;
376 
377 	/* Check DDR memory type.
378 	 * Currently TRGMII mode with DDR2 memory is not supported.
379 	 */
380 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
381 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
382 	    val & SYSCFG_DRAM_TYPE_DDR2) {
383 		dev_err(eth->dev,
384 			"TRGMII mode with DDR2 memory is not supported!\n");
385 		return -EOPNOTSUPP;
386 	}
387 
388 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
389 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
390 
391 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
392 			   ETHSYS_TRGMII_MT7621_MASK, val);
393 
394 	return 0;
395 }
396 
397 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
398 				   phy_interface_t interface, int speed)
399 {
400 	u32 val;
401 	int ret;
402 
403 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
404 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
405 		val = 500000000;
406 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
407 		if (ret)
408 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
409 		return;
410 	}
411 
412 	val = (speed == SPEED_1000) ?
413 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
414 	mtk_w32(eth, val, INTF_MODE);
415 
416 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
417 			   ETHSYS_TRGMII_CLK_SEL362_5,
418 			   ETHSYS_TRGMII_CLK_SEL362_5);
419 
420 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
421 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
422 	if (ret)
423 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
424 
425 	val = (speed == SPEED_1000) ?
426 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
427 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
428 
429 	val = (speed == SPEED_1000) ?
430 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
431 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
432 }
433 
434 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
435 					      phy_interface_t interface)
436 {
437 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
438 					   phylink_config);
439 	struct mtk_eth *eth = mac->hw;
440 	unsigned int sid;
441 
442 	if (interface == PHY_INTERFACE_MODE_SGMII ||
443 	    phy_interface_mode_is_8023z(interface)) {
444 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
445 		       0 : mac->id;
446 
447 		return mtk_sgmii_select_pcs(eth->sgmii, sid);
448 	}
449 
450 	return NULL;
451 }
452 
453 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
454 			   const struct phylink_link_state *state)
455 {
456 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
457 					   phylink_config);
458 	struct mtk_eth *eth = mac->hw;
459 	int val, ge_mode, err = 0;
460 	u32 i;
461 
462 	/* MT76x8 has no hardware settings between for the MAC */
463 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
464 	    mac->interface != state->interface) {
465 		/* Setup soc pin functions */
466 		switch (state->interface) {
467 		case PHY_INTERFACE_MODE_TRGMII:
468 			if (mac->id)
469 				goto err_phy;
470 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
471 					  MTK_GMAC1_TRGMII))
472 				goto err_phy;
473 			fallthrough;
474 		case PHY_INTERFACE_MODE_RGMII_TXID:
475 		case PHY_INTERFACE_MODE_RGMII_RXID:
476 		case PHY_INTERFACE_MODE_RGMII_ID:
477 		case PHY_INTERFACE_MODE_RGMII:
478 		case PHY_INTERFACE_MODE_MII:
479 		case PHY_INTERFACE_MODE_REVMII:
480 		case PHY_INTERFACE_MODE_RMII:
481 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
482 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
483 				if (err)
484 					goto init_err;
485 			}
486 			break;
487 		case PHY_INTERFACE_MODE_1000BASEX:
488 		case PHY_INTERFACE_MODE_2500BASEX:
489 		case PHY_INTERFACE_MODE_SGMII:
490 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
491 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
492 				if (err)
493 					goto init_err;
494 			}
495 			break;
496 		case PHY_INTERFACE_MODE_GMII:
497 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
498 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
499 				if (err)
500 					goto init_err;
501 			}
502 			break;
503 		default:
504 			goto err_phy;
505 		}
506 
507 		/* Setup clock for 1st gmac */
508 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
509 		    !phy_interface_mode_is_8023z(state->interface) &&
510 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
511 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
512 					 MTK_TRGMII_MT7621_CLK)) {
513 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
514 							      state->interface))
515 					goto err_phy;
516 			} else {
517 				/* FIXME: this is incorrect. Not only does it
518 				 * use state->speed (which is not guaranteed
519 				 * to be correct) but it also makes use of it
520 				 * in a code path that will only be reachable
521 				 * when the PHY interface mode changes, not
522 				 * when the speed changes. Consequently, RGMII
523 				 * is probably broken.
524 				 */
525 				mtk_gmac0_rgmii_adjust(mac->hw,
526 						       state->interface,
527 						       state->speed);
528 
529 				/* mt7623_pad_clk_setup */
530 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
531 					mtk_w32(mac->hw,
532 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
533 						TRGMII_TD_ODT(i));
534 
535 				/* Assert/release MT7623 RXC reset */
536 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
537 					TRGMII_RCK_CTRL);
538 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
539 			}
540 		}
541 
542 		ge_mode = 0;
543 		switch (state->interface) {
544 		case PHY_INTERFACE_MODE_MII:
545 		case PHY_INTERFACE_MODE_GMII:
546 			ge_mode = 1;
547 			break;
548 		case PHY_INTERFACE_MODE_REVMII:
549 			ge_mode = 2;
550 			break;
551 		case PHY_INTERFACE_MODE_RMII:
552 			if (mac->id)
553 				goto err_phy;
554 			ge_mode = 3;
555 			break;
556 		default:
557 			break;
558 		}
559 
560 		/* put the gmac into the right mode */
561 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
562 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
563 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
564 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
565 
566 		mac->interface = state->interface;
567 	}
568 
569 	/* SGMII */
570 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
571 	    phy_interface_mode_is_8023z(state->interface)) {
572 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
573 		 * being setup done.
574 		 */
575 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
576 
577 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
578 				   SYSCFG0_SGMII_MASK,
579 				   ~(u32)SYSCFG0_SGMII_MASK);
580 
581 		/* Save the syscfg0 value for mac_finish */
582 		mac->syscfg0 = val;
583 	} else if (phylink_autoneg_inband(mode)) {
584 		dev_err(eth->dev,
585 			"In-band mode not supported in non SGMII mode!\n");
586 		return;
587 	}
588 
589 	return;
590 
591 err_phy:
592 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
593 		mac->id, phy_modes(state->interface));
594 	return;
595 
596 init_err:
597 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
598 		mac->id, phy_modes(state->interface), err);
599 }
600 
601 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
602 			  phy_interface_t interface)
603 {
604 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
605 					   phylink_config);
606 	struct mtk_eth *eth = mac->hw;
607 	u32 mcr_cur, mcr_new;
608 
609 	/* Enable SGMII */
610 	if (interface == PHY_INTERFACE_MODE_SGMII ||
611 	    phy_interface_mode_is_8023z(interface))
612 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
613 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
614 
615 	/* Setup gmac */
616 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
617 	mcr_new = mcr_cur;
618 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
619 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
620 
621 	/* Only update control register when needed! */
622 	if (mcr_new != mcr_cur)
623 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
624 
625 	return 0;
626 }
627 
628 static void mtk_mac_pcs_get_state(struct phylink_config *config,
629 				  struct phylink_link_state *state)
630 {
631 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
632 					   phylink_config);
633 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
634 
635 	state->link = (pmsr & MAC_MSR_LINK);
636 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
637 
638 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
639 	case 0:
640 		state->speed = SPEED_10;
641 		break;
642 	case MAC_MSR_SPEED_100:
643 		state->speed = SPEED_100;
644 		break;
645 	case MAC_MSR_SPEED_1000:
646 		state->speed = SPEED_1000;
647 		break;
648 	default:
649 		state->speed = SPEED_UNKNOWN;
650 		break;
651 	}
652 
653 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
654 	if (pmsr & MAC_MSR_RX_FC)
655 		state->pause |= MLO_PAUSE_RX;
656 	if (pmsr & MAC_MSR_TX_FC)
657 		state->pause |= MLO_PAUSE_TX;
658 }
659 
660 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
661 			      phy_interface_t interface)
662 {
663 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
664 					   phylink_config);
665 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
666 
667 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
668 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
669 }
670 
671 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
672 				int speed)
673 {
674 	const struct mtk_soc_data *soc = eth->soc;
675 	u32 ofs, val;
676 
677 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
678 		return;
679 
680 	val = MTK_QTX_SCH_MIN_RATE_EN |
681 	      /* minimum: 10 Mbps */
682 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
683 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
684 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
685 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
686 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
687 
688 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
689 		switch (speed) {
690 		case SPEED_10:
691 			val |= MTK_QTX_SCH_MAX_RATE_EN |
692 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
693 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
694 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
695 			break;
696 		case SPEED_100:
697 			val |= MTK_QTX_SCH_MAX_RATE_EN |
698 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
699 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
700 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
701 			break;
702 		case SPEED_1000:
703 			val |= MTK_QTX_SCH_MAX_RATE_EN |
704 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
705 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
706 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
707 			break;
708 		default:
709 			break;
710 		}
711 	} else {
712 		switch (speed) {
713 		case SPEED_10:
714 			val |= MTK_QTX_SCH_MAX_RATE_EN |
715 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
716 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
717 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
718 			break;
719 		case SPEED_100:
720 			val |= MTK_QTX_SCH_MAX_RATE_EN |
721 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
722 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
723 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
724 			break;
725 		case SPEED_1000:
726 			val |= MTK_QTX_SCH_MAX_RATE_EN |
727 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
728 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
729 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
730 			break;
731 		default:
732 			break;
733 		}
734 	}
735 
736 	ofs = MTK_QTX_OFFSET * idx;
737 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
738 }
739 
740 static void mtk_mac_link_up(struct phylink_config *config,
741 			    struct phy_device *phy,
742 			    unsigned int mode, phy_interface_t interface,
743 			    int speed, int duplex, bool tx_pause, bool rx_pause)
744 {
745 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
746 					   phylink_config);
747 	u32 mcr;
748 
749 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
750 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
751 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
752 		 MAC_MCR_FORCE_RX_FC);
753 
754 	/* Configure speed */
755 	switch (speed) {
756 	case SPEED_2500:
757 	case SPEED_1000:
758 		mcr |= MAC_MCR_SPEED_1000;
759 		break;
760 	case SPEED_100:
761 		mcr |= MAC_MCR_SPEED_100;
762 		break;
763 	}
764 
765 	mtk_set_queue_speed(mac->hw, mac->id, speed);
766 
767 	/* Configure duplex */
768 	if (duplex == DUPLEX_FULL)
769 		mcr |= MAC_MCR_FORCE_DPX;
770 
771 	/* Configure pause modes - phylink will avoid these for half duplex */
772 	if (tx_pause)
773 		mcr |= MAC_MCR_FORCE_TX_FC;
774 	if (rx_pause)
775 		mcr |= MAC_MCR_FORCE_RX_FC;
776 
777 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
778 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
779 }
780 
781 static const struct phylink_mac_ops mtk_phylink_ops = {
782 	.mac_select_pcs = mtk_mac_select_pcs,
783 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
784 	.mac_config = mtk_mac_config,
785 	.mac_finish = mtk_mac_finish,
786 	.mac_link_down = mtk_mac_link_down,
787 	.mac_link_up = mtk_mac_link_up,
788 };
789 
790 static int mtk_mdio_init(struct mtk_eth *eth)
791 {
792 	struct device_node *mii_np;
793 	int ret;
794 
795 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
796 	if (!mii_np) {
797 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
798 		return -ENODEV;
799 	}
800 
801 	if (!of_device_is_available(mii_np)) {
802 		ret = -ENODEV;
803 		goto err_put_node;
804 	}
805 
806 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
807 	if (!eth->mii_bus) {
808 		ret = -ENOMEM;
809 		goto err_put_node;
810 	}
811 
812 	eth->mii_bus->name = "mdio";
813 	eth->mii_bus->read = mtk_mdio_read_c22;
814 	eth->mii_bus->write = mtk_mdio_write_c22;
815 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
816 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
817 	eth->mii_bus->priv = eth;
818 	eth->mii_bus->parent = eth->dev;
819 
820 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
821 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
822 
823 err_put_node:
824 	of_node_put(mii_np);
825 	return ret;
826 }
827 
828 static void mtk_mdio_cleanup(struct mtk_eth *eth)
829 {
830 	if (!eth->mii_bus)
831 		return;
832 
833 	mdiobus_unregister(eth->mii_bus);
834 }
835 
836 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
837 {
838 	unsigned long flags;
839 	u32 val;
840 
841 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
842 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
843 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
844 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
845 }
846 
847 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
848 {
849 	unsigned long flags;
850 	u32 val;
851 
852 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
853 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
854 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
855 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
856 }
857 
858 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
859 {
860 	unsigned long flags;
861 	u32 val;
862 
863 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
864 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
865 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
866 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
867 }
868 
869 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
870 {
871 	unsigned long flags;
872 	u32 val;
873 
874 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
875 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
876 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
877 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
878 }
879 
880 static int mtk_set_mac_address(struct net_device *dev, void *p)
881 {
882 	int ret = eth_mac_addr(dev, p);
883 	struct mtk_mac *mac = netdev_priv(dev);
884 	struct mtk_eth *eth = mac->hw;
885 	const char *macaddr = dev->dev_addr;
886 
887 	if (ret)
888 		return ret;
889 
890 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
891 		return -EBUSY;
892 
893 	spin_lock_bh(&mac->hw->page_lock);
894 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
895 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
896 			MT7628_SDM_MAC_ADRH);
897 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
898 			(macaddr[4] << 8) | macaddr[5],
899 			MT7628_SDM_MAC_ADRL);
900 	} else {
901 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
902 			MTK_GDMA_MAC_ADRH(mac->id));
903 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
904 			(macaddr[4] << 8) | macaddr[5],
905 			MTK_GDMA_MAC_ADRL(mac->id));
906 	}
907 	spin_unlock_bh(&mac->hw->page_lock);
908 
909 	return 0;
910 }
911 
912 void mtk_stats_update_mac(struct mtk_mac *mac)
913 {
914 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
915 	struct mtk_eth *eth = mac->hw;
916 
917 	u64_stats_update_begin(&hw_stats->syncp);
918 
919 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
920 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
921 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
922 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
923 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
924 		hw_stats->rx_checksum_errors +=
925 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
926 	} else {
927 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
928 		unsigned int offs = hw_stats->reg_offset;
929 		u64 stats;
930 
931 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
932 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
933 		if (stats)
934 			hw_stats->rx_bytes += (stats << 32);
935 		hw_stats->rx_packets +=
936 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
937 		hw_stats->rx_overflow +=
938 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
939 		hw_stats->rx_fcs_errors +=
940 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
941 		hw_stats->rx_short_errors +=
942 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
943 		hw_stats->rx_long_errors +=
944 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
945 		hw_stats->rx_checksum_errors +=
946 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
947 		hw_stats->rx_flow_control_packets +=
948 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
949 		hw_stats->tx_skip +=
950 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
951 		hw_stats->tx_collisions +=
952 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
953 		hw_stats->tx_bytes +=
954 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
955 		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
956 		if (stats)
957 			hw_stats->tx_bytes += (stats << 32);
958 		hw_stats->tx_packets +=
959 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
960 	}
961 
962 	u64_stats_update_end(&hw_stats->syncp);
963 }
964 
965 static void mtk_stats_update(struct mtk_eth *eth)
966 {
967 	int i;
968 
969 	for (i = 0; i < MTK_MAC_COUNT; i++) {
970 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
971 			continue;
972 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
973 			mtk_stats_update_mac(eth->mac[i]);
974 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
975 		}
976 	}
977 }
978 
979 static void mtk_get_stats64(struct net_device *dev,
980 			    struct rtnl_link_stats64 *storage)
981 {
982 	struct mtk_mac *mac = netdev_priv(dev);
983 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
984 	unsigned int start;
985 
986 	if (netif_running(dev) && netif_device_present(dev)) {
987 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
988 			mtk_stats_update_mac(mac);
989 			spin_unlock_bh(&hw_stats->stats_lock);
990 		}
991 	}
992 
993 	do {
994 		start = u64_stats_fetch_begin(&hw_stats->syncp);
995 		storage->rx_packets = hw_stats->rx_packets;
996 		storage->tx_packets = hw_stats->tx_packets;
997 		storage->rx_bytes = hw_stats->rx_bytes;
998 		storage->tx_bytes = hw_stats->tx_bytes;
999 		storage->collisions = hw_stats->tx_collisions;
1000 		storage->rx_length_errors = hw_stats->rx_short_errors +
1001 			hw_stats->rx_long_errors;
1002 		storage->rx_over_errors = hw_stats->rx_overflow;
1003 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1004 		storage->rx_errors = hw_stats->rx_checksum_errors;
1005 		storage->tx_aborted_errors = hw_stats->tx_skip;
1006 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1007 
1008 	storage->tx_errors = dev->stats.tx_errors;
1009 	storage->rx_dropped = dev->stats.rx_dropped;
1010 	storage->tx_dropped = dev->stats.tx_dropped;
1011 }
1012 
1013 static inline int mtk_max_frag_size(int mtu)
1014 {
1015 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1016 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1017 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1018 
1019 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1020 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1021 }
1022 
1023 static inline int mtk_max_buf_size(int frag_size)
1024 {
1025 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1026 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1027 
1028 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1029 
1030 	return buf_size;
1031 }
1032 
1033 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1034 			    struct mtk_rx_dma_v2 *dma_rxd)
1035 {
1036 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1037 	if (!(rxd->rxd2 & RX_DMA_DONE))
1038 		return false;
1039 
1040 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1041 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1042 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1043 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1044 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1045 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1046 	}
1047 
1048 	return true;
1049 }
1050 
1051 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1052 {
1053 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1054 	unsigned long data;
1055 
1056 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1057 				get_order(size));
1058 
1059 	return (void *)data;
1060 }
1061 
1062 /* the qdma core needs scratch memory to be setup */
1063 static int mtk_init_fq_dma(struct mtk_eth *eth)
1064 {
1065 	const struct mtk_soc_data *soc = eth->soc;
1066 	dma_addr_t phy_ring_tail;
1067 	int cnt = MTK_QDMA_RING_SIZE;
1068 	dma_addr_t dma_addr;
1069 	int i;
1070 
1071 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1072 					       cnt * soc->txrx.txd_size,
1073 					       &eth->phy_scratch_ring,
1074 					       GFP_KERNEL);
1075 	if (unlikely(!eth->scratch_ring))
1076 		return -ENOMEM;
1077 
1078 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1079 	if (unlikely(!eth->scratch_head))
1080 		return -ENOMEM;
1081 
1082 	dma_addr = dma_map_single(eth->dma_dev,
1083 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1084 				  DMA_FROM_DEVICE);
1085 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1086 		return -ENOMEM;
1087 
1088 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1089 
1090 	for (i = 0; i < cnt; i++) {
1091 		struct mtk_tx_dma_v2 *txd;
1092 
1093 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1094 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1095 		if (i < cnt - 1)
1096 			txd->txd2 = eth->phy_scratch_ring +
1097 				    (i + 1) * soc->txrx.txd_size;
1098 
1099 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1100 		txd->txd4 = 0;
1101 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1102 			txd->txd5 = 0;
1103 			txd->txd6 = 0;
1104 			txd->txd7 = 0;
1105 			txd->txd8 = 0;
1106 		}
1107 	}
1108 
1109 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1110 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1111 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1112 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1113 
1114 	return 0;
1115 }
1116 
1117 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1118 {
1119 	return ring->dma + (desc - ring->phys);
1120 }
1121 
1122 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1123 					     void *txd, u32 txd_size)
1124 {
1125 	int idx = (txd - ring->dma) / txd_size;
1126 
1127 	return &ring->buf[idx];
1128 }
1129 
1130 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1131 				       struct mtk_tx_dma *dma)
1132 {
1133 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1134 }
1135 
1136 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1137 {
1138 	return (dma - ring->dma) / txd_size;
1139 }
1140 
1141 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1142 			 struct xdp_frame_bulk *bq, bool napi)
1143 {
1144 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1145 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1146 			dma_unmap_single(eth->dma_dev,
1147 					 dma_unmap_addr(tx_buf, dma_addr0),
1148 					 dma_unmap_len(tx_buf, dma_len0),
1149 					 DMA_TO_DEVICE);
1150 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1151 			dma_unmap_page(eth->dma_dev,
1152 				       dma_unmap_addr(tx_buf, dma_addr0),
1153 				       dma_unmap_len(tx_buf, dma_len0),
1154 				       DMA_TO_DEVICE);
1155 		}
1156 	} else {
1157 		if (dma_unmap_len(tx_buf, dma_len0)) {
1158 			dma_unmap_page(eth->dma_dev,
1159 				       dma_unmap_addr(tx_buf, dma_addr0),
1160 				       dma_unmap_len(tx_buf, dma_len0),
1161 				       DMA_TO_DEVICE);
1162 		}
1163 
1164 		if (dma_unmap_len(tx_buf, dma_len1)) {
1165 			dma_unmap_page(eth->dma_dev,
1166 				       dma_unmap_addr(tx_buf, dma_addr1),
1167 				       dma_unmap_len(tx_buf, dma_len1),
1168 				       DMA_TO_DEVICE);
1169 		}
1170 	}
1171 
1172 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1173 		if (tx_buf->type == MTK_TYPE_SKB) {
1174 			struct sk_buff *skb = tx_buf->data;
1175 
1176 			if (napi)
1177 				napi_consume_skb(skb, napi);
1178 			else
1179 				dev_kfree_skb_any(skb);
1180 		} else {
1181 			struct xdp_frame *xdpf = tx_buf->data;
1182 
1183 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1184 				xdp_return_frame_rx_napi(xdpf);
1185 			else if (bq)
1186 				xdp_return_frame_bulk(xdpf, bq);
1187 			else
1188 				xdp_return_frame(xdpf);
1189 		}
1190 	}
1191 	tx_buf->flags = 0;
1192 	tx_buf->data = NULL;
1193 }
1194 
1195 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1196 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1197 			 size_t size, int idx)
1198 {
1199 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1200 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1201 		dma_unmap_len_set(tx_buf, dma_len0, size);
1202 	} else {
1203 		if (idx & 1) {
1204 			txd->txd3 = mapped_addr;
1205 			txd->txd2 |= TX_DMA_PLEN1(size);
1206 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1207 			dma_unmap_len_set(tx_buf, dma_len1, size);
1208 		} else {
1209 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1210 			txd->txd1 = mapped_addr;
1211 			txd->txd2 = TX_DMA_PLEN0(size);
1212 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1213 			dma_unmap_len_set(tx_buf, dma_len0, size);
1214 		}
1215 	}
1216 }
1217 
1218 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1219 				   struct mtk_tx_dma_desc_info *info)
1220 {
1221 	struct mtk_mac *mac = netdev_priv(dev);
1222 	struct mtk_eth *eth = mac->hw;
1223 	struct mtk_tx_dma *desc = txd;
1224 	u32 data;
1225 
1226 	WRITE_ONCE(desc->txd1, info->addr);
1227 
1228 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1229 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1230 	if (info->last)
1231 		data |= TX_DMA_LS0;
1232 	WRITE_ONCE(desc->txd3, data);
1233 
1234 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1235 	if (info->first) {
1236 		if (info->gso)
1237 			data |= TX_DMA_TSO;
1238 		/* tx checksum offload */
1239 		if (info->csum)
1240 			data |= TX_DMA_CHKSUM;
1241 		/* vlan header offload */
1242 		if (info->vlan)
1243 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1244 	}
1245 	WRITE_ONCE(desc->txd4, data);
1246 }
1247 
1248 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1249 				   struct mtk_tx_dma_desc_info *info)
1250 {
1251 	struct mtk_mac *mac = netdev_priv(dev);
1252 	struct mtk_tx_dma_v2 *desc = txd;
1253 	struct mtk_eth *eth = mac->hw;
1254 	u32 data;
1255 
1256 	WRITE_ONCE(desc->txd1, info->addr);
1257 
1258 	data = TX_DMA_PLEN0(info->size);
1259 	if (info->last)
1260 		data |= TX_DMA_LS0;
1261 	WRITE_ONCE(desc->txd3, data);
1262 
1263 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1264 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1265 	WRITE_ONCE(desc->txd4, data);
1266 
1267 	data = 0;
1268 	if (info->first) {
1269 		if (info->gso)
1270 			data |= TX_DMA_TSO_V2;
1271 		/* tx checksum offload */
1272 		if (info->csum)
1273 			data |= TX_DMA_CHKSUM_V2;
1274 	}
1275 	WRITE_ONCE(desc->txd5, data);
1276 
1277 	data = 0;
1278 	if (info->first && info->vlan)
1279 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1280 	WRITE_ONCE(desc->txd6, data);
1281 
1282 	WRITE_ONCE(desc->txd7, 0);
1283 	WRITE_ONCE(desc->txd8, 0);
1284 }
1285 
1286 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1287 				struct mtk_tx_dma_desc_info *info)
1288 {
1289 	struct mtk_mac *mac = netdev_priv(dev);
1290 	struct mtk_eth *eth = mac->hw;
1291 
1292 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1293 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1294 	else
1295 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1296 }
1297 
1298 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1299 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1300 {
1301 	struct mtk_tx_dma_desc_info txd_info = {
1302 		.size = skb_headlen(skb),
1303 		.gso = gso,
1304 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1305 		.vlan = skb_vlan_tag_present(skb),
1306 		.qid = skb_get_queue_mapping(skb),
1307 		.vlan_tci = skb_vlan_tag_get(skb),
1308 		.first = true,
1309 		.last = !skb_is_nonlinear(skb),
1310 	};
1311 	struct netdev_queue *txq;
1312 	struct mtk_mac *mac = netdev_priv(dev);
1313 	struct mtk_eth *eth = mac->hw;
1314 	const struct mtk_soc_data *soc = eth->soc;
1315 	struct mtk_tx_dma *itxd, *txd;
1316 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1317 	struct mtk_tx_buf *itx_buf, *tx_buf;
1318 	int i, n_desc = 1;
1319 	int queue = skb_get_queue_mapping(skb);
1320 	int k = 0;
1321 
1322 	txq = netdev_get_tx_queue(dev, queue);
1323 	itxd = ring->next_free;
1324 	itxd_pdma = qdma_to_pdma(ring, itxd);
1325 	if (itxd == ring->last_free)
1326 		return -ENOMEM;
1327 
1328 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1329 	memset(itx_buf, 0, sizeof(*itx_buf));
1330 
1331 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1332 				       DMA_TO_DEVICE);
1333 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1334 		return -ENOMEM;
1335 
1336 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1337 
1338 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1339 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1340 			  MTK_TX_FLAGS_FPORT1;
1341 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1342 		     k++);
1343 
1344 	/* TX SG offload */
1345 	txd = itxd;
1346 	txd_pdma = qdma_to_pdma(ring, txd);
1347 
1348 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1349 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1350 		unsigned int offset = 0;
1351 		int frag_size = skb_frag_size(frag);
1352 
1353 		while (frag_size) {
1354 			bool new_desc = true;
1355 
1356 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1357 			    (i & 0x1)) {
1358 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1359 				txd_pdma = qdma_to_pdma(ring, txd);
1360 				if (txd == ring->last_free)
1361 					goto err_dma;
1362 
1363 				n_desc++;
1364 			} else {
1365 				new_desc = false;
1366 			}
1367 
1368 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1369 			txd_info.size = min_t(unsigned int, frag_size,
1370 					      soc->txrx.dma_max_len);
1371 			txd_info.qid = queue;
1372 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1373 					!(frag_size - txd_info.size);
1374 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1375 							 offset, txd_info.size,
1376 							 DMA_TO_DEVICE);
1377 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1378 				goto err_dma;
1379 
1380 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1381 
1382 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1383 						    soc->txrx.txd_size);
1384 			if (new_desc)
1385 				memset(tx_buf, 0, sizeof(*tx_buf));
1386 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1387 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1388 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1389 					 MTK_TX_FLAGS_FPORT1;
1390 
1391 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1392 				     txd_info.size, k++);
1393 
1394 			frag_size -= txd_info.size;
1395 			offset += txd_info.size;
1396 		}
1397 	}
1398 
1399 	/* store skb to cleanup */
1400 	itx_buf->type = MTK_TYPE_SKB;
1401 	itx_buf->data = skb;
1402 
1403 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1404 		if (k & 0x1)
1405 			txd_pdma->txd2 |= TX_DMA_LS0;
1406 		else
1407 			txd_pdma->txd2 |= TX_DMA_LS1;
1408 	}
1409 
1410 	netdev_tx_sent_queue(txq, skb->len);
1411 	skb_tx_timestamp(skb);
1412 
1413 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1414 	atomic_sub(n_desc, &ring->free_count);
1415 
1416 	/* make sure that all changes to the dma ring are flushed before we
1417 	 * continue
1418 	 */
1419 	wmb();
1420 
1421 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1422 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1423 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1424 	} else {
1425 		int next_idx;
1426 
1427 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1428 					 ring->dma_size);
1429 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1430 	}
1431 
1432 	return 0;
1433 
1434 err_dma:
1435 	do {
1436 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1437 
1438 		/* unmap dma */
1439 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1440 
1441 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1442 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1443 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1444 
1445 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1446 		itxd_pdma = qdma_to_pdma(ring, itxd);
1447 	} while (itxd != txd);
1448 
1449 	return -ENOMEM;
1450 }
1451 
1452 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1453 {
1454 	int i, nfrags = 1;
1455 	skb_frag_t *frag;
1456 
1457 	if (skb_is_gso(skb)) {
1458 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1459 			frag = &skb_shinfo(skb)->frags[i];
1460 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1461 					       eth->soc->txrx.dma_max_len);
1462 		}
1463 	} else {
1464 		nfrags += skb_shinfo(skb)->nr_frags;
1465 	}
1466 
1467 	return nfrags;
1468 }
1469 
1470 static int mtk_queue_stopped(struct mtk_eth *eth)
1471 {
1472 	int i;
1473 
1474 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1475 		if (!eth->netdev[i])
1476 			continue;
1477 		if (netif_queue_stopped(eth->netdev[i]))
1478 			return 1;
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static void mtk_wake_queue(struct mtk_eth *eth)
1485 {
1486 	int i;
1487 
1488 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1489 		if (!eth->netdev[i])
1490 			continue;
1491 		netif_tx_wake_all_queues(eth->netdev[i]);
1492 	}
1493 }
1494 
1495 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1496 {
1497 	struct mtk_mac *mac = netdev_priv(dev);
1498 	struct mtk_eth *eth = mac->hw;
1499 	struct mtk_tx_ring *ring = &eth->tx_ring;
1500 	struct net_device_stats *stats = &dev->stats;
1501 	bool gso = false;
1502 	int tx_num;
1503 
1504 	/* normally we can rely on the stack not calling this more than once,
1505 	 * however we have 2 queues running on the same ring so we need to lock
1506 	 * the ring access
1507 	 */
1508 	spin_lock(&eth->page_lock);
1509 
1510 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1511 		goto drop;
1512 
1513 	tx_num = mtk_cal_txd_req(eth, skb);
1514 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1515 		netif_tx_stop_all_queues(dev);
1516 		netif_err(eth, tx_queued, dev,
1517 			  "Tx Ring full when queue awake!\n");
1518 		spin_unlock(&eth->page_lock);
1519 		return NETDEV_TX_BUSY;
1520 	}
1521 
1522 	/* TSO: fill MSS info in tcp checksum field */
1523 	if (skb_is_gso(skb)) {
1524 		if (skb_cow_head(skb, 0)) {
1525 			netif_warn(eth, tx_err, dev,
1526 				   "GSO expand head fail.\n");
1527 			goto drop;
1528 		}
1529 
1530 		if (skb_shinfo(skb)->gso_type &
1531 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1532 			gso = true;
1533 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1534 		}
1535 	}
1536 
1537 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1538 		goto drop;
1539 
1540 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1541 		netif_tx_stop_all_queues(dev);
1542 
1543 	spin_unlock(&eth->page_lock);
1544 
1545 	return NETDEV_TX_OK;
1546 
1547 drop:
1548 	spin_unlock(&eth->page_lock);
1549 	stats->tx_dropped++;
1550 	dev_kfree_skb_any(skb);
1551 	return NETDEV_TX_OK;
1552 }
1553 
1554 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1555 {
1556 	int i;
1557 	struct mtk_rx_ring *ring;
1558 	int idx;
1559 
1560 	if (!eth->hwlro)
1561 		return &eth->rx_ring[0];
1562 
1563 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1564 		struct mtk_rx_dma *rxd;
1565 
1566 		ring = &eth->rx_ring[i];
1567 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1568 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1569 		if (rxd->rxd2 & RX_DMA_DONE) {
1570 			ring->calc_idx_update = true;
1571 			return ring;
1572 		}
1573 	}
1574 
1575 	return NULL;
1576 }
1577 
1578 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1579 {
1580 	struct mtk_rx_ring *ring;
1581 	int i;
1582 
1583 	if (!eth->hwlro) {
1584 		ring = &eth->rx_ring[0];
1585 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1586 	} else {
1587 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1588 			ring = &eth->rx_ring[i];
1589 			if (ring->calc_idx_update) {
1590 				ring->calc_idx_update = false;
1591 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1592 			}
1593 		}
1594 	}
1595 }
1596 
1597 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1598 {
1599 	return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1600 }
1601 
1602 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1603 					      struct xdp_rxq_info *xdp_q,
1604 					      int id, int size)
1605 {
1606 	struct page_pool_params pp_params = {
1607 		.order = 0,
1608 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1609 		.pool_size = size,
1610 		.nid = NUMA_NO_NODE,
1611 		.dev = eth->dma_dev,
1612 		.offset = MTK_PP_HEADROOM,
1613 		.max_len = MTK_PP_MAX_BUF_SIZE,
1614 	};
1615 	struct page_pool *pp;
1616 	int err;
1617 
1618 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1619 							  : DMA_FROM_DEVICE;
1620 	pp = page_pool_create(&pp_params);
1621 	if (IS_ERR(pp))
1622 		return pp;
1623 
1624 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
1625 				 eth->rx_napi.napi_id, PAGE_SIZE);
1626 	if (err < 0)
1627 		goto err_free_pp;
1628 
1629 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1630 	if (err)
1631 		goto err_unregister_rxq;
1632 
1633 	return pp;
1634 
1635 err_unregister_rxq:
1636 	xdp_rxq_info_unreg(xdp_q);
1637 err_free_pp:
1638 	page_pool_destroy(pp);
1639 
1640 	return ERR_PTR(err);
1641 }
1642 
1643 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1644 				    gfp_t gfp_mask)
1645 {
1646 	struct page *page;
1647 
1648 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1649 	if (!page)
1650 		return NULL;
1651 
1652 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1653 	return page_address(page);
1654 }
1655 
1656 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1657 {
1658 	if (ring->page_pool)
1659 		page_pool_put_full_page(ring->page_pool,
1660 					virt_to_head_page(data), napi);
1661 	else
1662 		skb_free_frag(data);
1663 }
1664 
1665 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1666 			     struct mtk_tx_dma_desc_info *txd_info,
1667 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1668 			     void *data, u16 headroom, int index, bool dma_map)
1669 {
1670 	struct mtk_tx_ring *ring = &eth->tx_ring;
1671 	struct mtk_mac *mac = netdev_priv(dev);
1672 	struct mtk_tx_dma *txd_pdma;
1673 
1674 	if (dma_map) {  /* ndo_xdp_xmit */
1675 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1676 						txd_info->size, DMA_TO_DEVICE);
1677 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1678 			return -ENOMEM;
1679 
1680 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1681 	} else {
1682 		struct page *page = virt_to_head_page(data);
1683 
1684 		txd_info->addr = page_pool_get_dma_addr(page) +
1685 				 sizeof(struct xdp_frame) + headroom;
1686 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1687 					   txd_info->size, DMA_BIDIRECTIONAL);
1688 	}
1689 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1690 
1691 	tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1692 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1693 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1694 
1695 	txd_pdma = qdma_to_pdma(ring, txd);
1696 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1697 		     index);
1698 
1699 	return 0;
1700 }
1701 
1702 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1703 				struct net_device *dev, bool dma_map)
1704 {
1705 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1706 	const struct mtk_soc_data *soc = eth->soc;
1707 	struct mtk_tx_ring *ring = &eth->tx_ring;
1708 	struct mtk_mac *mac = netdev_priv(dev);
1709 	struct mtk_tx_dma_desc_info txd_info = {
1710 		.size	= xdpf->len,
1711 		.first	= true,
1712 		.last	= !xdp_frame_has_frags(xdpf),
1713 		.qid	= mac->id,
1714 	};
1715 	int err, index = 0, n_desc = 1, nr_frags;
1716 	struct mtk_tx_buf *htx_buf, *tx_buf;
1717 	struct mtk_tx_dma *htxd, *txd;
1718 	void *data = xdpf->data;
1719 
1720 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1721 		return -EBUSY;
1722 
1723 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1724 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1725 		return -EBUSY;
1726 
1727 	spin_lock(&eth->page_lock);
1728 
1729 	txd = ring->next_free;
1730 	if (txd == ring->last_free) {
1731 		spin_unlock(&eth->page_lock);
1732 		return -ENOMEM;
1733 	}
1734 	htxd = txd;
1735 
1736 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1737 	memset(tx_buf, 0, sizeof(*tx_buf));
1738 	htx_buf = tx_buf;
1739 
1740 	for (;;) {
1741 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1742 					data, xdpf->headroom, index, dma_map);
1743 		if (err < 0)
1744 			goto unmap;
1745 
1746 		if (txd_info.last)
1747 			break;
1748 
1749 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1750 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1751 			if (txd == ring->last_free)
1752 				goto unmap;
1753 
1754 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1755 						    soc->txrx.txd_size);
1756 			memset(tx_buf, 0, sizeof(*tx_buf));
1757 			n_desc++;
1758 		}
1759 
1760 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1761 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1762 		txd_info.last = index + 1 == nr_frags;
1763 		txd_info.qid = mac->id;
1764 		data = skb_frag_address(&sinfo->frags[index]);
1765 
1766 		index++;
1767 	}
1768 	/* store xdpf for cleanup */
1769 	htx_buf->data = xdpf;
1770 
1771 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1772 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1773 
1774 		if (index & 1)
1775 			txd_pdma->txd2 |= TX_DMA_LS0;
1776 		else
1777 			txd_pdma->txd2 |= TX_DMA_LS1;
1778 	}
1779 
1780 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1781 	atomic_sub(n_desc, &ring->free_count);
1782 
1783 	/* make sure that all changes to the dma ring are flushed before we
1784 	 * continue
1785 	 */
1786 	wmb();
1787 
1788 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1789 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1790 	} else {
1791 		int idx;
1792 
1793 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1794 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1795 			MT7628_TX_CTX_IDX0);
1796 	}
1797 
1798 	spin_unlock(&eth->page_lock);
1799 
1800 	return 0;
1801 
1802 unmap:
1803 	while (htxd != txd) {
1804 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1805 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1806 
1807 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1808 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1809 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1810 
1811 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1812 		}
1813 
1814 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1815 	}
1816 
1817 	spin_unlock(&eth->page_lock);
1818 
1819 	return err;
1820 }
1821 
1822 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1823 			struct xdp_frame **frames, u32 flags)
1824 {
1825 	struct mtk_mac *mac = netdev_priv(dev);
1826 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1827 	struct mtk_eth *eth = mac->hw;
1828 	int i, nxmit = 0;
1829 
1830 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1831 		return -EINVAL;
1832 
1833 	for (i = 0; i < num_frame; i++) {
1834 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1835 			break;
1836 		nxmit++;
1837 	}
1838 
1839 	u64_stats_update_begin(&hw_stats->syncp);
1840 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1841 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1842 	u64_stats_update_end(&hw_stats->syncp);
1843 
1844 	return nxmit;
1845 }
1846 
1847 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1848 		       struct xdp_buff *xdp, struct net_device *dev)
1849 {
1850 	struct mtk_mac *mac = netdev_priv(dev);
1851 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1852 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1853 	struct bpf_prog *prog;
1854 	u32 act = XDP_PASS;
1855 
1856 	rcu_read_lock();
1857 
1858 	prog = rcu_dereference(eth->prog);
1859 	if (!prog)
1860 		goto out;
1861 
1862 	act = bpf_prog_run_xdp(prog, xdp);
1863 	switch (act) {
1864 	case XDP_PASS:
1865 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1866 		goto update_stats;
1867 	case XDP_REDIRECT:
1868 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1869 			act = XDP_DROP;
1870 			break;
1871 		}
1872 
1873 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1874 		goto update_stats;
1875 	case XDP_TX: {
1876 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1877 
1878 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1879 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1880 			act = XDP_DROP;
1881 			break;
1882 		}
1883 
1884 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1885 		goto update_stats;
1886 	}
1887 	default:
1888 		bpf_warn_invalid_xdp_action(dev, prog, act);
1889 		fallthrough;
1890 	case XDP_ABORTED:
1891 		trace_xdp_exception(dev, prog, act);
1892 		fallthrough;
1893 	case XDP_DROP:
1894 		break;
1895 	}
1896 
1897 	page_pool_put_full_page(ring->page_pool,
1898 				virt_to_head_page(xdp->data), true);
1899 
1900 update_stats:
1901 	u64_stats_update_begin(&hw_stats->syncp);
1902 	*count = *count + 1;
1903 	u64_stats_update_end(&hw_stats->syncp);
1904 out:
1905 	rcu_read_unlock();
1906 
1907 	return act;
1908 }
1909 
1910 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1911 		       struct mtk_eth *eth)
1912 {
1913 	struct dim_sample dim_sample = {};
1914 	struct mtk_rx_ring *ring;
1915 	bool xdp_flush = false;
1916 	int idx;
1917 	struct sk_buff *skb;
1918 	u8 *data, *new_data;
1919 	struct mtk_rx_dma_v2 *rxd, trxd;
1920 	int done = 0, bytes = 0;
1921 
1922 	while (done < budget) {
1923 		unsigned int pktlen, *rxdcsum;
1924 		bool has_hwaccel_tag = false;
1925 		struct net_device *netdev;
1926 		u16 vlan_proto, vlan_tci;
1927 		dma_addr_t dma_addr;
1928 		u32 hash, reason;
1929 		int mac = 0;
1930 
1931 		ring = mtk_get_rx_ring(eth);
1932 		if (unlikely(!ring))
1933 			goto rx_done;
1934 
1935 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1936 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1937 		data = ring->data[idx];
1938 
1939 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
1940 			break;
1941 
1942 		/* find out which mac the packet come from. values start at 1 */
1943 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1944 			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1945 		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1946 			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1947 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1948 
1949 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1950 			     !eth->netdev[mac]))
1951 			goto release_desc;
1952 
1953 		netdev = eth->netdev[mac];
1954 
1955 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1956 			goto release_desc;
1957 
1958 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1959 
1960 		/* alloc new buffer */
1961 		if (ring->page_pool) {
1962 			struct page *page = virt_to_head_page(data);
1963 			struct xdp_buff xdp;
1964 			u32 ret;
1965 
1966 			new_data = mtk_page_pool_get_buff(ring->page_pool,
1967 							  &dma_addr,
1968 							  GFP_ATOMIC);
1969 			if (unlikely(!new_data)) {
1970 				netdev->stats.rx_dropped++;
1971 				goto release_desc;
1972 			}
1973 
1974 			dma_sync_single_for_cpu(eth->dma_dev,
1975 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1976 				pktlen, page_pool_get_dma_dir(ring->page_pool));
1977 
1978 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1979 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1980 					 false);
1981 			xdp_buff_clear_frags_flag(&xdp);
1982 
1983 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1984 			if (ret == XDP_REDIRECT)
1985 				xdp_flush = true;
1986 
1987 			if (ret != XDP_PASS)
1988 				goto skip_rx;
1989 
1990 			skb = build_skb(data, PAGE_SIZE);
1991 			if (unlikely(!skb)) {
1992 				page_pool_put_full_page(ring->page_pool,
1993 							page, true);
1994 				netdev->stats.rx_dropped++;
1995 				goto skip_rx;
1996 			}
1997 
1998 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
1999 			skb_put(skb, xdp.data_end - xdp.data);
2000 			skb_mark_for_recycle(skb);
2001 		} else {
2002 			if (ring->frag_size <= PAGE_SIZE)
2003 				new_data = napi_alloc_frag(ring->frag_size);
2004 			else
2005 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2006 
2007 			if (unlikely(!new_data)) {
2008 				netdev->stats.rx_dropped++;
2009 				goto release_desc;
2010 			}
2011 
2012 			dma_addr = dma_map_single(eth->dma_dev,
2013 				new_data + NET_SKB_PAD + eth->ip_align,
2014 				ring->buf_size, DMA_FROM_DEVICE);
2015 			if (unlikely(dma_mapping_error(eth->dma_dev,
2016 						       dma_addr))) {
2017 				skb_free_frag(new_data);
2018 				netdev->stats.rx_dropped++;
2019 				goto release_desc;
2020 			}
2021 
2022 			dma_unmap_single(eth->dma_dev, trxd.rxd1,
2023 					 ring->buf_size, DMA_FROM_DEVICE);
2024 
2025 			skb = build_skb(data, ring->frag_size);
2026 			if (unlikely(!skb)) {
2027 				netdev->stats.rx_dropped++;
2028 				skb_free_frag(data);
2029 				goto skip_rx;
2030 			}
2031 
2032 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2033 			skb_put(skb, pktlen);
2034 		}
2035 
2036 		skb->dev = netdev;
2037 		bytes += skb->len;
2038 
2039 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2040 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2041 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2042 			if (hash != MTK_RXD5_FOE_ENTRY)
2043 				skb_set_hash(skb, jhash_1word(hash, 0),
2044 					     PKT_HASH_TYPE_L4);
2045 			rxdcsum = &trxd.rxd3;
2046 		} else {
2047 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2048 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2049 			if (hash != MTK_RXD4_FOE_ENTRY)
2050 				skb_set_hash(skb, jhash_1word(hash, 0),
2051 					     PKT_HASH_TYPE_L4);
2052 			rxdcsum = &trxd.rxd4;
2053 		}
2054 
2055 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2056 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2057 		else
2058 			skb_checksum_none_assert(skb);
2059 		skb->protocol = eth_type_trans(skb, netdev);
2060 
2061 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2062 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2063 
2064 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2065 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2066 				if (trxd.rxd3 & RX_DMA_VTAG_V2) {
2067 					vlan_proto = RX_DMA_VPID(trxd.rxd4);
2068 					vlan_tci = RX_DMA_VID(trxd.rxd4);
2069 					has_hwaccel_tag = true;
2070 				}
2071 			} else if (trxd.rxd2 & RX_DMA_VTAG) {
2072 				vlan_proto = RX_DMA_VPID(trxd.rxd3);
2073 				vlan_tci = RX_DMA_VID(trxd.rxd3);
2074 				has_hwaccel_tag = true;
2075 			}
2076 		}
2077 
2078 		/* When using VLAN untagging in combination with DSA, the
2079 		 * hardware treats the MTK special tag as a VLAN and untags it.
2080 		 */
2081 		if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
2082 			unsigned int port = vlan_proto & GENMASK(2, 0);
2083 
2084 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2085 			    eth->dsa_meta[port])
2086 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2087 		} else if (has_hwaccel_tag) {
2088 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
2089 		}
2090 
2091 		skb_record_rx_queue(skb, 0);
2092 		napi_gro_receive(napi, skb);
2093 
2094 skip_rx:
2095 		ring->data[idx] = new_data;
2096 		rxd->rxd1 = (unsigned int)dma_addr;
2097 release_desc:
2098 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2099 			rxd->rxd2 = RX_DMA_LSO;
2100 		else
2101 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2102 
2103 		ring->calc_idx = idx;
2104 		done++;
2105 	}
2106 
2107 rx_done:
2108 	if (done) {
2109 		/* make sure that all changes to the dma ring are flushed before
2110 		 * we continue
2111 		 */
2112 		wmb();
2113 		mtk_update_rx_cpu_idx(eth);
2114 	}
2115 
2116 	eth->rx_packets += done;
2117 	eth->rx_bytes += bytes;
2118 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2119 			  &dim_sample);
2120 	net_dim(&eth->rx_dim, dim_sample);
2121 
2122 	if (xdp_flush)
2123 		xdp_do_flush_map();
2124 
2125 	return done;
2126 }
2127 
2128 struct mtk_poll_state {
2129     struct netdev_queue *txq;
2130     unsigned int total;
2131     unsigned int done;
2132     unsigned int bytes;
2133 };
2134 
2135 static void
2136 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2137 		 struct sk_buff *skb)
2138 {
2139 	struct netdev_queue *txq;
2140 	struct net_device *dev;
2141 	unsigned int bytes = skb->len;
2142 
2143 	state->total++;
2144 	eth->tx_packets++;
2145 	eth->tx_bytes += bytes;
2146 
2147 	dev = eth->netdev[mac];
2148 	if (!dev)
2149 		return;
2150 
2151 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2152 	if (state->txq == txq) {
2153 		state->done++;
2154 		state->bytes += bytes;
2155 		return;
2156 	}
2157 
2158 	if (state->txq)
2159 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2160 
2161 	state->txq = txq;
2162 	state->done = 1;
2163 	state->bytes = bytes;
2164 }
2165 
2166 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2167 			    struct mtk_poll_state *state)
2168 {
2169 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2170 	struct mtk_tx_ring *ring = &eth->tx_ring;
2171 	struct mtk_tx_buf *tx_buf;
2172 	struct xdp_frame_bulk bq;
2173 	struct mtk_tx_dma *desc;
2174 	u32 cpu, dma;
2175 
2176 	cpu = ring->last_free_ptr;
2177 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2178 
2179 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2180 	xdp_frame_bulk_init(&bq);
2181 
2182 	while ((cpu != dma) && budget) {
2183 		u32 next_cpu = desc->txd2;
2184 		int mac = 0;
2185 
2186 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2187 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2188 			break;
2189 
2190 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2191 					    eth->soc->txrx.txd_size);
2192 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2193 			mac = 1;
2194 
2195 		if (!tx_buf->data)
2196 			break;
2197 
2198 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2199 			if (tx_buf->type == MTK_TYPE_SKB)
2200 				mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2201 
2202 			budget--;
2203 		}
2204 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2205 
2206 		ring->last_free = desc;
2207 		atomic_inc(&ring->free_count);
2208 
2209 		cpu = next_cpu;
2210 	}
2211 	xdp_flush_frame_bulk(&bq);
2212 
2213 	ring->last_free_ptr = cpu;
2214 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2215 
2216 	return budget;
2217 }
2218 
2219 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2220 			    struct mtk_poll_state *state)
2221 {
2222 	struct mtk_tx_ring *ring = &eth->tx_ring;
2223 	struct mtk_tx_buf *tx_buf;
2224 	struct xdp_frame_bulk bq;
2225 	struct mtk_tx_dma *desc;
2226 	u32 cpu, dma;
2227 
2228 	cpu = ring->cpu_idx;
2229 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2230 	xdp_frame_bulk_init(&bq);
2231 
2232 	while ((cpu != dma) && budget) {
2233 		tx_buf = &ring->buf[cpu];
2234 		if (!tx_buf->data)
2235 			break;
2236 
2237 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2238 			if (tx_buf->type == MTK_TYPE_SKB)
2239 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2240 			budget--;
2241 		}
2242 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2243 
2244 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2245 		ring->last_free = desc;
2246 		atomic_inc(&ring->free_count);
2247 
2248 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2249 	}
2250 	xdp_flush_frame_bulk(&bq);
2251 
2252 	ring->cpu_idx = cpu;
2253 
2254 	return budget;
2255 }
2256 
2257 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2258 {
2259 	struct mtk_tx_ring *ring = &eth->tx_ring;
2260 	struct dim_sample dim_sample = {};
2261 	struct mtk_poll_state state = {};
2262 
2263 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2264 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2265 	else
2266 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2267 
2268 	if (state.txq)
2269 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2270 
2271 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2272 			  &dim_sample);
2273 	net_dim(&eth->tx_dim, dim_sample);
2274 
2275 	if (mtk_queue_stopped(eth) &&
2276 	    (atomic_read(&ring->free_count) > ring->thresh))
2277 		mtk_wake_queue(eth);
2278 
2279 	return state.total;
2280 }
2281 
2282 static void mtk_handle_status_irq(struct mtk_eth *eth)
2283 {
2284 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2285 
2286 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2287 		mtk_stats_update(eth);
2288 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2289 			MTK_INT_STATUS2);
2290 	}
2291 }
2292 
2293 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2294 {
2295 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2296 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2297 	int tx_done = 0;
2298 
2299 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2300 		mtk_handle_status_irq(eth);
2301 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2302 	tx_done = mtk_poll_tx(eth, budget);
2303 
2304 	if (unlikely(netif_msg_intr(eth))) {
2305 		dev_info(eth->dev,
2306 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2307 			 mtk_r32(eth, reg_map->tx_irq_status),
2308 			 mtk_r32(eth, reg_map->tx_irq_mask));
2309 	}
2310 
2311 	if (tx_done == budget)
2312 		return budget;
2313 
2314 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2315 		return budget;
2316 
2317 	if (napi_complete_done(napi, tx_done))
2318 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2319 
2320 	return tx_done;
2321 }
2322 
2323 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2324 {
2325 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2326 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2327 	int rx_done_total = 0;
2328 
2329 	mtk_handle_status_irq(eth);
2330 
2331 	do {
2332 		int rx_done;
2333 
2334 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2335 			reg_map->pdma.irq_status);
2336 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2337 		rx_done_total += rx_done;
2338 
2339 		if (unlikely(netif_msg_intr(eth))) {
2340 			dev_info(eth->dev,
2341 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2342 				 mtk_r32(eth, reg_map->pdma.irq_status),
2343 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2344 		}
2345 
2346 		if (rx_done_total == budget)
2347 			return budget;
2348 
2349 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2350 		 eth->soc->txrx.rx_irq_done_mask);
2351 
2352 	if (napi_complete_done(napi, rx_done_total))
2353 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2354 
2355 	return rx_done_total;
2356 }
2357 
2358 static int mtk_tx_alloc(struct mtk_eth *eth)
2359 {
2360 	const struct mtk_soc_data *soc = eth->soc;
2361 	struct mtk_tx_ring *ring = &eth->tx_ring;
2362 	int i, sz = soc->txrx.txd_size;
2363 	struct mtk_tx_dma_v2 *txd;
2364 	int ring_size;
2365 	u32 ofs, val;
2366 
2367 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2368 		ring_size = MTK_QDMA_RING_SIZE;
2369 	else
2370 		ring_size = MTK_DMA_SIZE;
2371 
2372 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2373 			       GFP_KERNEL);
2374 	if (!ring->buf)
2375 		goto no_tx_mem;
2376 
2377 	ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2378 				       &ring->phys, GFP_KERNEL);
2379 	if (!ring->dma)
2380 		goto no_tx_mem;
2381 
2382 	for (i = 0; i < ring_size; i++) {
2383 		int next = (i + 1) % ring_size;
2384 		u32 next_ptr = ring->phys + next * sz;
2385 
2386 		txd = ring->dma + i * sz;
2387 		txd->txd2 = next_ptr;
2388 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2389 		txd->txd4 = 0;
2390 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2391 			txd->txd5 = 0;
2392 			txd->txd6 = 0;
2393 			txd->txd7 = 0;
2394 			txd->txd8 = 0;
2395 		}
2396 	}
2397 
2398 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2399 	 * only as the framework. The real HW descriptors are the PDMA
2400 	 * descriptors in ring->dma_pdma.
2401 	 */
2402 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2403 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2404 						    &ring->phys_pdma, GFP_KERNEL);
2405 		if (!ring->dma_pdma)
2406 			goto no_tx_mem;
2407 
2408 		for (i = 0; i < ring_size; i++) {
2409 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2410 			ring->dma_pdma[i].txd4 = 0;
2411 		}
2412 	}
2413 
2414 	ring->dma_size = ring_size;
2415 	atomic_set(&ring->free_count, ring_size - 2);
2416 	ring->next_free = ring->dma;
2417 	ring->last_free = (void *)txd;
2418 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2419 	ring->thresh = MAX_SKB_FRAGS;
2420 
2421 	/* make sure that all changes to the dma ring are flushed before we
2422 	 * continue
2423 	 */
2424 	wmb();
2425 
2426 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2427 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2428 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2429 		mtk_w32(eth,
2430 			ring->phys + ((ring_size - 1) * sz),
2431 			soc->reg_map->qdma.crx_ptr);
2432 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2433 
2434 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2435 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2436 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2437 
2438 			val = MTK_QTX_SCH_MIN_RATE_EN |
2439 			      /* minimum: 10 Mbps */
2440 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2441 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2442 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2443 			if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2444 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2445 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2446 			ofs += MTK_QTX_OFFSET;
2447 		}
2448 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2449 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2450 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2451 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2452 	} else {
2453 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2454 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2455 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2456 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2457 	}
2458 
2459 	return 0;
2460 
2461 no_tx_mem:
2462 	return -ENOMEM;
2463 }
2464 
2465 static void mtk_tx_clean(struct mtk_eth *eth)
2466 {
2467 	const struct mtk_soc_data *soc = eth->soc;
2468 	struct mtk_tx_ring *ring = &eth->tx_ring;
2469 	int i;
2470 
2471 	if (ring->buf) {
2472 		for (i = 0; i < ring->dma_size; i++)
2473 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2474 		kfree(ring->buf);
2475 		ring->buf = NULL;
2476 	}
2477 
2478 	if (ring->dma) {
2479 		dma_free_coherent(eth->dma_dev,
2480 				  ring->dma_size * soc->txrx.txd_size,
2481 				  ring->dma, ring->phys);
2482 		ring->dma = NULL;
2483 	}
2484 
2485 	if (ring->dma_pdma) {
2486 		dma_free_coherent(eth->dma_dev,
2487 				  ring->dma_size * soc->txrx.txd_size,
2488 				  ring->dma_pdma, ring->phys_pdma);
2489 		ring->dma_pdma = NULL;
2490 	}
2491 }
2492 
2493 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2494 {
2495 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2496 	struct mtk_rx_ring *ring;
2497 	int rx_data_len, rx_dma_size;
2498 	int i;
2499 
2500 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2501 		if (ring_no)
2502 			return -EINVAL;
2503 		ring = &eth->rx_ring_qdma;
2504 	} else {
2505 		ring = &eth->rx_ring[ring_no];
2506 	}
2507 
2508 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2509 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2510 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2511 	} else {
2512 		rx_data_len = ETH_DATA_LEN;
2513 		rx_dma_size = MTK_DMA_SIZE;
2514 	}
2515 
2516 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2517 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2518 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2519 			     GFP_KERNEL);
2520 	if (!ring->data)
2521 		return -ENOMEM;
2522 
2523 	if (mtk_page_pool_enabled(eth)) {
2524 		struct page_pool *pp;
2525 
2526 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2527 					  rx_dma_size);
2528 		if (IS_ERR(pp))
2529 			return PTR_ERR(pp);
2530 
2531 		ring->page_pool = pp;
2532 	}
2533 
2534 	ring->dma = dma_alloc_coherent(eth->dma_dev,
2535 				       rx_dma_size * eth->soc->txrx.rxd_size,
2536 				       &ring->phys, GFP_KERNEL);
2537 	if (!ring->dma)
2538 		return -ENOMEM;
2539 
2540 	for (i = 0; i < rx_dma_size; i++) {
2541 		struct mtk_rx_dma_v2 *rxd;
2542 		dma_addr_t dma_addr;
2543 		void *data;
2544 
2545 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2546 		if (ring->page_pool) {
2547 			data = mtk_page_pool_get_buff(ring->page_pool,
2548 						      &dma_addr, GFP_KERNEL);
2549 			if (!data)
2550 				return -ENOMEM;
2551 		} else {
2552 			if (ring->frag_size <= PAGE_SIZE)
2553 				data = netdev_alloc_frag(ring->frag_size);
2554 			else
2555 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2556 
2557 			if (!data)
2558 				return -ENOMEM;
2559 
2560 			dma_addr = dma_map_single(eth->dma_dev,
2561 				data + NET_SKB_PAD + eth->ip_align,
2562 				ring->buf_size, DMA_FROM_DEVICE);
2563 			if (unlikely(dma_mapping_error(eth->dma_dev,
2564 						       dma_addr))) {
2565 				skb_free_frag(data);
2566 				return -ENOMEM;
2567 			}
2568 		}
2569 		rxd->rxd1 = (unsigned int)dma_addr;
2570 		ring->data[i] = data;
2571 
2572 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2573 			rxd->rxd2 = RX_DMA_LSO;
2574 		else
2575 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2576 
2577 		rxd->rxd3 = 0;
2578 		rxd->rxd4 = 0;
2579 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2580 			rxd->rxd5 = 0;
2581 			rxd->rxd6 = 0;
2582 			rxd->rxd7 = 0;
2583 			rxd->rxd8 = 0;
2584 		}
2585 	}
2586 
2587 	ring->dma_size = rx_dma_size;
2588 	ring->calc_idx_update = false;
2589 	ring->calc_idx = rx_dma_size - 1;
2590 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2591 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2592 				    ring_no * MTK_QRX_OFFSET;
2593 	else
2594 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2595 				    ring_no * MTK_QRX_OFFSET;
2596 	/* make sure that all changes to the dma ring are flushed before we
2597 	 * continue
2598 	 */
2599 	wmb();
2600 
2601 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2602 		mtk_w32(eth, ring->phys,
2603 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2604 		mtk_w32(eth, rx_dma_size,
2605 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2606 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2607 			reg_map->qdma.rst_idx);
2608 	} else {
2609 		mtk_w32(eth, ring->phys,
2610 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2611 		mtk_w32(eth, rx_dma_size,
2612 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2613 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2614 			reg_map->pdma.rst_idx);
2615 	}
2616 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2617 
2618 	return 0;
2619 }
2620 
2621 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2622 {
2623 	int i;
2624 
2625 	if (ring->data && ring->dma) {
2626 		for (i = 0; i < ring->dma_size; i++) {
2627 			struct mtk_rx_dma *rxd;
2628 
2629 			if (!ring->data[i])
2630 				continue;
2631 
2632 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2633 			if (!rxd->rxd1)
2634 				continue;
2635 
2636 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2637 					 ring->buf_size, DMA_FROM_DEVICE);
2638 			mtk_rx_put_buff(ring, ring->data[i], false);
2639 		}
2640 		kfree(ring->data);
2641 		ring->data = NULL;
2642 	}
2643 
2644 	if (ring->dma) {
2645 		dma_free_coherent(eth->dma_dev,
2646 				  ring->dma_size * eth->soc->txrx.rxd_size,
2647 				  ring->dma, ring->phys);
2648 		ring->dma = NULL;
2649 	}
2650 
2651 	if (ring->page_pool) {
2652 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2653 			xdp_rxq_info_unreg(&ring->xdp_q);
2654 		page_pool_destroy(ring->page_pool);
2655 		ring->page_pool = NULL;
2656 	}
2657 }
2658 
2659 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2660 {
2661 	int i;
2662 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2663 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2664 
2665 	/* set LRO rings to auto-learn modes */
2666 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2667 
2668 	/* validate LRO ring */
2669 	ring_ctrl_dw2 |= MTK_RING_VLD;
2670 
2671 	/* set AGE timer (unit: 20us) */
2672 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2673 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2674 
2675 	/* set max AGG timer (unit: 20us) */
2676 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2677 
2678 	/* set max LRO AGG count */
2679 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2680 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2681 
2682 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2683 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2684 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2685 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2686 	}
2687 
2688 	/* IPv4 checksum update enable */
2689 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2690 
2691 	/* switch priority comparison to packet count mode */
2692 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2693 
2694 	/* bandwidth threshold setting */
2695 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2696 
2697 	/* auto-learn score delta setting */
2698 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2699 
2700 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2701 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2702 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2703 
2704 	/* set HW LRO mode & the max aggregation count for rx packets */
2705 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2706 
2707 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2708 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2709 
2710 	/* enable HW LRO */
2711 	lro_ctrl_dw0 |= MTK_LRO_EN;
2712 
2713 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2714 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2715 
2716 	return 0;
2717 }
2718 
2719 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2720 {
2721 	int i;
2722 	u32 val;
2723 
2724 	/* relinquish lro rings, flush aggregated packets */
2725 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2726 
2727 	/* wait for relinquishments done */
2728 	for (i = 0; i < 10; i++) {
2729 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2730 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2731 			msleep(20);
2732 			continue;
2733 		}
2734 		break;
2735 	}
2736 
2737 	/* invalidate lro rings */
2738 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2739 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2740 
2741 	/* disable HW LRO */
2742 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2743 }
2744 
2745 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2746 {
2747 	u32 reg_val;
2748 
2749 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2750 
2751 	/* invalidate the IP setting */
2752 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2753 
2754 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2755 
2756 	/* validate the IP setting */
2757 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2758 }
2759 
2760 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2761 {
2762 	u32 reg_val;
2763 
2764 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2765 
2766 	/* invalidate the IP setting */
2767 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2768 
2769 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2770 }
2771 
2772 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2773 {
2774 	int cnt = 0;
2775 	int i;
2776 
2777 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2778 		if (mac->hwlro_ip[i])
2779 			cnt++;
2780 	}
2781 
2782 	return cnt;
2783 }
2784 
2785 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2786 				struct ethtool_rxnfc *cmd)
2787 {
2788 	struct ethtool_rx_flow_spec *fsp =
2789 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2790 	struct mtk_mac *mac = netdev_priv(dev);
2791 	struct mtk_eth *eth = mac->hw;
2792 	int hwlro_idx;
2793 
2794 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2795 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2796 	    (fsp->location > 1))
2797 		return -EINVAL;
2798 
2799 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2800 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2801 
2802 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2803 
2804 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2805 
2806 	return 0;
2807 }
2808 
2809 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2810 				struct ethtool_rxnfc *cmd)
2811 {
2812 	struct ethtool_rx_flow_spec *fsp =
2813 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2814 	struct mtk_mac *mac = netdev_priv(dev);
2815 	struct mtk_eth *eth = mac->hw;
2816 	int hwlro_idx;
2817 
2818 	if (fsp->location > 1)
2819 		return -EINVAL;
2820 
2821 	mac->hwlro_ip[fsp->location] = 0;
2822 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2823 
2824 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2825 
2826 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2827 
2828 	return 0;
2829 }
2830 
2831 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2832 {
2833 	struct mtk_mac *mac = netdev_priv(dev);
2834 	struct mtk_eth *eth = mac->hw;
2835 	int i, hwlro_idx;
2836 
2837 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2838 		mac->hwlro_ip[i] = 0;
2839 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2840 
2841 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2842 	}
2843 
2844 	mac->hwlro_ip_cnt = 0;
2845 }
2846 
2847 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2848 				    struct ethtool_rxnfc *cmd)
2849 {
2850 	struct mtk_mac *mac = netdev_priv(dev);
2851 	struct ethtool_rx_flow_spec *fsp =
2852 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2853 
2854 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2855 		return -EINVAL;
2856 
2857 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2858 	fsp->flow_type = TCP_V4_FLOW;
2859 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2860 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2861 
2862 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2863 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2864 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2865 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2866 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2867 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2868 	fsp->h_u.tcp_ip4_spec.tos = 0;
2869 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2870 
2871 	return 0;
2872 }
2873 
2874 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2875 				  struct ethtool_rxnfc *cmd,
2876 				  u32 *rule_locs)
2877 {
2878 	struct mtk_mac *mac = netdev_priv(dev);
2879 	int cnt = 0;
2880 	int i;
2881 
2882 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2883 		if (mac->hwlro_ip[i]) {
2884 			rule_locs[cnt] = i;
2885 			cnt++;
2886 		}
2887 	}
2888 
2889 	cmd->rule_cnt = cnt;
2890 
2891 	return 0;
2892 }
2893 
2894 static netdev_features_t mtk_fix_features(struct net_device *dev,
2895 					  netdev_features_t features)
2896 {
2897 	if (!(features & NETIF_F_LRO)) {
2898 		struct mtk_mac *mac = netdev_priv(dev);
2899 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2900 
2901 		if (ip_cnt) {
2902 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2903 
2904 			features |= NETIF_F_LRO;
2905 		}
2906 	}
2907 
2908 	return features;
2909 }
2910 
2911 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2912 {
2913 	struct mtk_mac *mac = netdev_priv(dev);
2914 	struct mtk_eth *eth = mac->hw;
2915 	netdev_features_t diff = dev->features ^ features;
2916 	int i;
2917 
2918 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2919 		mtk_hwlro_netdev_disable(dev);
2920 
2921 	/* Set RX VLAN offloading */
2922 	if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2923 		return 0;
2924 
2925 	mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2926 		MTK_CDMP_EG_CTRL);
2927 
2928 	/* sync features with other MAC */
2929 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2930 		if (!eth->netdev[i] || eth->netdev[i] == dev)
2931 			continue;
2932 		eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2933 		eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2934 	}
2935 
2936 	return 0;
2937 }
2938 
2939 /* wait for DMA to finish whatever it is doing before we start using it again */
2940 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2941 {
2942 	unsigned int reg;
2943 	int ret;
2944 	u32 val;
2945 
2946 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2947 		reg = eth->soc->reg_map->qdma.glo_cfg;
2948 	else
2949 		reg = eth->soc->reg_map->pdma.glo_cfg;
2950 
2951 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2952 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2953 					5, MTK_DMA_BUSY_TIMEOUT_US);
2954 	if (ret)
2955 		dev_err(eth->dev, "DMA init timeout\n");
2956 
2957 	return ret;
2958 }
2959 
2960 static int mtk_dma_init(struct mtk_eth *eth)
2961 {
2962 	int err;
2963 	u32 i;
2964 
2965 	if (mtk_dma_busy_wait(eth))
2966 		return -EBUSY;
2967 
2968 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2969 		/* QDMA needs scratch memory for internal reordering of the
2970 		 * descriptors
2971 		 */
2972 		err = mtk_init_fq_dma(eth);
2973 		if (err)
2974 			return err;
2975 	}
2976 
2977 	err = mtk_tx_alloc(eth);
2978 	if (err)
2979 		return err;
2980 
2981 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2982 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2983 		if (err)
2984 			return err;
2985 	}
2986 
2987 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2988 	if (err)
2989 		return err;
2990 
2991 	if (eth->hwlro) {
2992 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2993 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2994 			if (err)
2995 				return err;
2996 		}
2997 		err = mtk_hwlro_rx_init(eth);
2998 		if (err)
2999 			return err;
3000 	}
3001 
3002 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3003 		/* Enable random early drop and set drop threshold
3004 		 * automatically
3005 		 */
3006 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3007 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3008 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3009 	}
3010 
3011 	return 0;
3012 }
3013 
3014 static void mtk_dma_free(struct mtk_eth *eth)
3015 {
3016 	const struct mtk_soc_data *soc = eth->soc;
3017 	int i;
3018 
3019 	for (i = 0; i < MTK_MAC_COUNT; i++)
3020 		if (eth->netdev[i])
3021 			netdev_reset_queue(eth->netdev[i]);
3022 	if (eth->scratch_ring) {
3023 		dma_free_coherent(eth->dma_dev,
3024 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3025 				  eth->scratch_ring, eth->phy_scratch_ring);
3026 		eth->scratch_ring = NULL;
3027 		eth->phy_scratch_ring = 0;
3028 	}
3029 	mtk_tx_clean(eth);
3030 	mtk_rx_clean(eth, &eth->rx_ring[0]);
3031 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
3032 
3033 	if (eth->hwlro) {
3034 		mtk_hwlro_rx_uninit(eth);
3035 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3036 			mtk_rx_clean(eth, &eth->rx_ring[i]);
3037 	}
3038 
3039 	kfree(eth->scratch_head);
3040 }
3041 
3042 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3043 {
3044 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3045 
3046 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3047 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3048 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3049 }
3050 
3051 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3052 {
3053 	struct mtk_mac *mac = netdev_priv(dev);
3054 	struct mtk_eth *eth = mac->hw;
3055 
3056 	if (test_bit(MTK_RESETTING, &eth->state))
3057 		return;
3058 
3059 	if (!mtk_hw_reset_check(eth))
3060 		return;
3061 
3062 	eth->netdev[mac->id]->stats.tx_errors++;
3063 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3064 
3065 	schedule_work(&eth->pending_work);
3066 }
3067 
3068 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3069 {
3070 	struct mtk_eth *eth = _eth;
3071 
3072 	eth->rx_events++;
3073 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3074 		__napi_schedule(&eth->rx_napi);
3075 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3076 	}
3077 
3078 	return IRQ_HANDLED;
3079 }
3080 
3081 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3082 {
3083 	struct mtk_eth *eth = _eth;
3084 
3085 	eth->tx_events++;
3086 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3087 		__napi_schedule(&eth->tx_napi);
3088 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3089 	}
3090 
3091 	return IRQ_HANDLED;
3092 }
3093 
3094 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3095 {
3096 	struct mtk_eth *eth = _eth;
3097 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3098 
3099 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3100 	    eth->soc->txrx.rx_irq_done_mask) {
3101 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3102 		    eth->soc->txrx.rx_irq_done_mask)
3103 			mtk_handle_irq_rx(irq, _eth);
3104 	}
3105 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3106 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3107 			mtk_handle_irq_tx(irq, _eth);
3108 	}
3109 
3110 	return IRQ_HANDLED;
3111 }
3112 
3113 #ifdef CONFIG_NET_POLL_CONTROLLER
3114 static void mtk_poll_controller(struct net_device *dev)
3115 {
3116 	struct mtk_mac *mac = netdev_priv(dev);
3117 	struct mtk_eth *eth = mac->hw;
3118 
3119 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3120 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3121 	mtk_handle_irq_rx(eth->irq[2], dev);
3122 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3123 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3124 }
3125 #endif
3126 
3127 static int mtk_start_dma(struct mtk_eth *eth)
3128 {
3129 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3130 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3131 	int err;
3132 
3133 	err = mtk_dma_init(eth);
3134 	if (err) {
3135 		mtk_dma_free(eth);
3136 		return err;
3137 	}
3138 
3139 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3140 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3141 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3142 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3143 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3144 
3145 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3146 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3147 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3148 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3149 		else
3150 			val |= MTK_RX_BT_32DWORDS;
3151 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3152 
3153 		mtk_w32(eth,
3154 			MTK_RX_DMA_EN | rx_2b_offset |
3155 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3156 			reg_map->pdma.glo_cfg);
3157 	} else {
3158 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3159 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3160 			reg_map->pdma.glo_cfg);
3161 	}
3162 
3163 	return 0;
3164 }
3165 
3166 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3167 {
3168 	int i;
3169 
3170 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3171 		return;
3172 
3173 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3174 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3175 
3176 		/* default setup the forward port to send frame to PDMA */
3177 		val &= ~0xffff;
3178 
3179 		/* Enable RX checksum */
3180 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3181 
3182 		val |= config;
3183 
3184 		if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3185 			val |= MTK_GDMA_SPECIAL_TAG;
3186 
3187 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3188 	}
3189 	/* Reset and enable PSE */
3190 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3191 	mtk_w32(eth, 0, MTK_RST_GL);
3192 }
3193 
3194 
3195 static bool mtk_uses_dsa(struct net_device *dev)
3196 {
3197 #if IS_ENABLED(CONFIG_NET_DSA)
3198 	return netdev_uses_dsa(dev) &&
3199 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3200 #else
3201 	return false;
3202 #endif
3203 }
3204 
3205 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3206 {
3207 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3208 	struct mtk_eth *eth = mac->hw;
3209 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3210 	struct ethtool_link_ksettings s;
3211 	struct net_device *ldev;
3212 	struct list_head *iter;
3213 	struct dsa_port *dp;
3214 
3215 	if (event != NETDEV_CHANGE)
3216 		return NOTIFY_DONE;
3217 
3218 	netdev_for_each_lower_dev(dev, ldev, iter) {
3219 		if (netdev_priv(ldev) == mac)
3220 			goto found;
3221 	}
3222 
3223 	return NOTIFY_DONE;
3224 
3225 found:
3226 	if (!dsa_slave_dev_check(dev))
3227 		return NOTIFY_DONE;
3228 
3229 	if (__ethtool_get_link_ksettings(dev, &s))
3230 		return NOTIFY_DONE;
3231 
3232 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3233 		return NOTIFY_DONE;
3234 
3235 	dp = dsa_port_from_netdev(dev);
3236 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3237 		return NOTIFY_DONE;
3238 
3239 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3240 
3241 	return NOTIFY_DONE;
3242 }
3243 
3244 static int mtk_open(struct net_device *dev)
3245 {
3246 	struct mtk_mac *mac = netdev_priv(dev);
3247 	struct mtk_eth *eth = mac->hw;
3248 	int i, err;
3249 
3250 	if (mtk_uses_dsa(dev) && !eth->prog) {
3251 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3252 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3253 
3254 			if (md_dst)
3255 				continue;
3256 
3257 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3258 						    GFP_KERNEL);
3259 			if (!md_dst)
3260 				return -ENOMEM;
3261 
3262 			md_dst->u.port_info.port_id = i;
3263 			eth->dsa_meta[i] = md_dst;
3264 		}
3265 	} else {
3266 		/* Hardware special tag parsing needs to be disabled if at least
3267 		 * one MAC does not use DSA.
3268 		 */
3269 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3270 		val &= ~MTK_CDMP_STAG_EN;
3271 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3272 	}
3273 
3274 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3275 	if (err) {
3276 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3277 			   err);
3278 		return err;
3279 	}
3280 
3281 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3282 	if (!refcount_read(&eth->dma_refcnt)) {
3283 		const struct mtk_soc_data *soc = eth->soc;
3284 		u32 gdm_config;
3285 		int i;
3286 
3287 		err = mtk_start_dma(eth);
3288 		if (err) {
3289 			phylink_disconnect_phy(mac->phylink);
3290 			return err;
3291 		}
3292 
3293 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3294 			mtk_ppe_start(eth->ppe[i]);
3295 
3296 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3297 						  : MTK_GDMA_TO_PDMA;
3298 		mtk_gdm_config(eth, gdm_config);
3299 
3300 		napi_enable(&eth->tx_napi);
3301 		napi_enable(&eth->rx_napi);
3302 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3303 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3304 		refcount_set(&eth->dma_refcnt, 1);
3305 	}
3306 	else
3307 		refcount_inc(&eth->dma_refcnt);
3308 
3309 	phylink_start(mac->phylink);
3310 	netif_tx_start_all_queues(dev);
3311 
3312 	return 0;
3313 }
3314 
3315 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3316 {
3317 	u32 val;
3318 	int i;
3319 
3320 	/* stop the dma engine */
3321 	spin_lock_bh(&eth->page_lock);
3322 	val = mtk_r32(eth, glo_cfg);
3323 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3324 		glo_cfg);
3325 	spin_unlock_bh(&eth->page_lock);
3326 
3327 	/* wait for dma stop */
3328 	for (i = 0; i < 10; i++) {
3329 		val = mtk_r32(eth, glo_cfg);
3330 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3331 			msleep(20);
3332 			continue;
3333 		}
3334 		break;
3335 	}
3336 }
3337 
3338 static int mtk_stop(struct net_device *dev)
3339 {
3340 	struct mtk_mac *mac = netdev_priv(dev);
3341 	struct mtk_eth *eth = mac->hw;
3342 	int i;
3343 
3344 	phylink_stop(mac->phylink);
3345 
3346 	netif_tx_disable(dev);
3347 
3348 	phylink_disconnect_phy(mac->phylink);
3349 
3350 	/* only shutdown DMA if this is the last user */
3351 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3352 		return 0;
3353 
3354 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3355 
3356 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3357 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3358 	napi_disable(&eth->tx_napi);
3359 	napi_disable(&eth->rx_napi);
3360 
3361 	cancel_work_sync(&eth->rx_dim.work);
3362 	cancel_work_sync(&eth->tx_dim.work);
3363 
3364 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3365 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3366 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3367 
3368 	mtk_dma_free(eth);
3369 
3370 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3371 		mtk_ppe_stop(eth->ppe[i]);
3372 
3373 	return 0;
3374 }
3375 
3376 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3377 			 struct netlink_ext_ack *extack)
3378 {
3379 	struct mtk_mac *mac = netdev_priv(dev);
3380 	struct mtk_eth *eth = mac->hw;
3381 	struct bpf_prog *old_prog;
3382 	bool need_update;
3383 
3384 	if (eth->hwlro) {
3385 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3386 		return -EOPNOTSUPP;
3387 	}
3388 
3389 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3390 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3391 		return -EOPNOTSUPP;
3392 	}
3393 
3394 	need_update = !!eth->prog != !!prog;
3395 	if (netif_running(dev) && need_update)
3396 		mtk_stop(dev);
3397 
3398 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3399 	if (old_prog)
3400 		bpf_prog_put(old_prog);
3401 
3402 	if (netif_running(dev) && need_update)
3403 		return mtk_open(dev);
3404 
3405 	return 0;
3406 }
3407 
3408 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3409 {
3410 	switch (xdp->command) {
3411 	case XDP_SETUP_PROG:
3412 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3413 	default:
3414 		return -EINVAL;
3415 	}
3416 }
3417 
3418 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3419 {
3420 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3421 			   reset_bits,
3422 			   reset_bits);
3423 
3424 	usleep_range(1000, 1100);
3425 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3426 			   reset_bits,
3427 			   ~reset_bits);
3428 	mdelay(10);
3429 }
3430 
3431 static void mtk_clk_disable(struct mtk_eth *eth)
3432 {
3433 	int clk;
3434 
3435 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3436 		clk_disable_unprepare(eth->clks[clk]);
3437 }
3438 
3439 static int mtk_clk_enable(struct mtk_eth *eth)
3440 {
3441 	int clk, ret;
3442 
3443 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3444 		ret = clk_prepare_enable(eth->clks[clk]);
3445 		if (ret)
3446 			goto err_disable_clks;
3447 	}
3448 
3449 	return 0;
3450 
3451 err_disable_clks:
3452 	while (--clk >= 0)
3453 		clk_disable_unprepare(eth->clks[clk]);
3454 
3455 	return ret;
3456 }
3457 
3458 static void mtk_dim_rx(struct work_struct *work)
3459 {
3460 	struct dim *dim = container_of(work, struct dim, work);
3461 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3462 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3463 	struct dim_cq_moder cur_profile;
3464 	u32 val, cur;
3465 
3466 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3467 						dim->profile_ix);
3468 	spin_lock_bh(&eth->dim_lock);
3469 
3470 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3471 	val &= MTK_PDMA_DELAY_TX_MASK;
3472 	val |= MTK_PDMA_DELAY_RX_EN;
3473 
3474 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3475 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3476 
3477 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3478 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3479 
3480 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3481 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3482 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3483 
3484 	spin_unlock_bh(&eth->dim_lock);
3485 
3486 	dim->state = DIM_START_MEASURE;
3487 }
3488 
3489 static void mtk_dim_tx(struct work_struct *work)
3490 {
3491 	struct dim *dim = container_of(work, struct dim, work);
3492 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3493 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3494 	struct dim_cq_moder cur_profile;
3495 	u32 val, cur;
3496 
3497 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3498 						dim->profile_ix);
3499 	spin_lock_bh(&eth->dim_lock);
3500 
3501 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3502 	val &= MTK_PDMA_DELAY_RX_MASK;
3503 	val |= MTK_PDMA_DELAY_TX_EN;
3504 
3505 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3506 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3507 
3508 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3509 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3510 
3511 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3512 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3513 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3514 
3515 	spin_unlock_bh(&eth->dim_lock);
3516 
3517 	dim->state = DIM_START_MEASURE;
3518 }
3519 
3520 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3521 {
3522 	struct mtk_eth *eth = mac->hw;
3523 	u32 mcr_cur, mcr_new;
3524 
3525 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3526 		return;
3527 
3528 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3529 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3530 
3531 	if (val <= 1518)
3532 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3533 	else if (val <= 1536)
3534 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3535 	else if (val <= 1552)
3536 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3537 	else
3538 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3539 
3540 	if (mcr_new != mcr_cur)
3541 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3542 }
3543 
3544 static void mtk_hw_reset(struct mtk_eth *eth)
3545 {
3546 	u32 val;
3547 
3548 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3549 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3550 		val = RSTCTRL_PPE0_V2;
3551 	} else {
3552 		val = RSTCTRL_PPE0;
3553 	}
3554 
3555 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3556 		val |= RSTCTRL_PPE1;
3557 
3558 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3559 
3560 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3561 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3562 			     0x3ffffff);
3563 }
3564 
3565 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3566 {
3567 	u32 val;
3568 
3569 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3570 	return val;
3571 }
3572 
3573 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3574 {
3575 	u32 rst_mask, val;
3576 
3577 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3578 			   RSTCTRL_FE);
3579 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3580 				      val & RSTCTRL_FE, 1, 1000)) {
3581 		dev_err(eth->dev, "warm reset failed\n");
3582 		mtk_hw_reset(eth);
3583 		return;
3584 	}
3585 
3586 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3587 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3588 	else
3589 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3590 
3591 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3592 		rst_mask |= RSTCTRL_PPE1;
3593 
3594 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3595 
3596 	udelay(1);
3597 	val = mtk_hw_reset_read(eth);
3598 	if (!(val & rst_mask))
3599 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3600 			val, rst_mask);
3601 
3602 	rst_mask |= RSTCTRL_FE;
3603 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3604 
3605 	udelay(1);
3606 	val = mtk_hw_reset_read(eth);
3607 	if (val & rst_mask)
3608 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3609 			val, rst_mask);
3610 }
3611 
3612 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3613 {
3614 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3615 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3616 	bool oq_hang, cdm1_busy, adma_busy;
3617 	bool wtx_busy, cdm_full, oq_free;
3618 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3619 	bool qfsm_hang, qfwd_hang;
3620 	bool ret = false;
3621 
3622 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3623 		return false;
3624 
3625 	/* WDMA sanity checks */
3626 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3627 
3628 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3629 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3630 
3631 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3632 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3633 
3634 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3635 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3636 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3637 
3638 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3639 		if (++eth->reset.wdma_hang_count > 2) {
3640 			eth->reset.wdma_hang_count = 0;
3641 			ret = true;
3642 		}
3643 		goto out;
3644 	}
3645 
3646 	/* QDMA sanity checks */
3647 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3648 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3649 
3650 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3651 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3652 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3653 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3654 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3655 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3656 
3657 	if (qfsm_hang && qfwd_hang &&
3658 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3659 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3660 		if (++eth->reset.qdma_hang_count > 2) {
3661 			eth->reset.qdma_hang_count = 0;
3662 			ret = true;
3663 		}
3664 		goto out;
3665 	}
3666 
3667 	/* ADMA sanity checks */
3668 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3669 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3670 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3671 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3672 
3673 	if (oq_hang && cdm1_busy && adma_busy) {
3674 		if (++eth->reset.adma_hang_count > 2) {
3675 			eth->reset.adma_hang_count = 0;
3676 			ret = true;
3677 		}
3678 		goto out;
3679 	}
3680 
3681 	eth->reset.wdma_hang_count = 0;
3682 	eth->reset.qdma_hang_count = 0;
3683 	eth->reset.adma_hang_count = 0;
3684 out:
3685 	eth->reset.wdidx = wdidx;
3686 
3687 	return ret;
3688 }
3689 
3690 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3691 {
3692 	struct delayed_work *del_work = to_delayed_work(work);
3693 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3694 					   reset.monitor_work);
3695 
3696 	if (test_bit(MTK_RESETTING, &eth->state))
3697 		goto out;
3698 
3699 	/* DMA stuck checks */
3700 	if (mtk_hw_check_dma_hang(eth))
3701 		schedule_work(&eth->pending_work);
3702 
3703 out:
3704 	schedule_delayed_work(&eth->reset.monitor_work,
3705 			      MTK_DMA_MONITOR_TIMEOUT);
3706 }
3707 
3708 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3709 {
3710 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3711 		       ETHSYS_DMA_AG_MAP_PPE;
3712 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3713 	int i, val, ret;
3714 
3715 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3716 		return 0;
3717 
3718 	if (!reset) {
3719 		pm_runtime_enable(eth->dev);
3720 		pm_runtime_get_sync(eth->dev);
3721 
3722 		ret = mtk_clk_enable(eth);
3723 		if (ret)
3724 			goto err_disable_pm;
3725 	}
3726 
3727 	if (eth->ethsys)
3728 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3729 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3730 
3731 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3732 		ret = device_reset(eth->dev);
3733 		if (ret) {
3734 			dev_err(eth->dev, "MAC reset failed!\n");
3735 			goto err_disable_pm;
3736 		}
3737 
3738 		/* set interrupt delays based on current Net DIM sample */
3739 		mtk_dim_rx(&eth->rx_dim.work);
3740 		mtk_dim_tx(&eth->tx_dim.work);
3741 
3742 		/* disable delay and normal interrupt */
3743 		mtk_tx_irq_disable(eth, ~0);
3744 		mtk_rx_irq_disable(eth, ~0);
3745 
3746 		return 0;
3747 	}
3748 
3749 	msleep(100);
3750 
3751 	if (reset)
3752 		mtk_hw_warm_reset(eth);
3753 	else
3754 		mtk_hw_reset(eth);
3755 
3756 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3757 		/* Set FE to PDMAv2 if necessary */
3758 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3759 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3760 	}
3761 
3762 	if (eth->pctl) {
3763 		/* Set GE2 driving and slew rate */
3764 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3765 
3766 		/* set GE2 TDSEL */
3767 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3768 
3769 		/* set GE2 TUNE */
3770 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3771 	}
3772 
3773 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3774 	 * up with the more appropriate value when mtk_mac_config call is being
3775 	 * invoked.
3776 	 */
3777 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3778 		struct net_device *dev = eth->netdev[i];
3779 
3780 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3781 		if (dev) {
3782 			struct mtk_mac *mac = netdev_priv(dev);
3783 
3784 			mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3785 		}
3786 	}
3787 
3788 	/* Indicates CDM to parse the MTK special tag from CPU
3789 	 * which also is working out for untag packets.
3790 	 */
3791 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3792 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3793 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3794 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3795 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3796 	}
3797 
3798 	/* Enable RX VLan Offloading */
3799 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3800 
3801 	/* set interrupt delays based on current Net DIM sample */
3802 	mtk_dim_rx(&eth->rx_dim.work);
3803 	mtk_dim_tx(&eth->tx_dim.work);
3804 
3805 	/* disable delay and normal interrupt */
3806 	mtk_tx_irq_disable(eth, ~0);
3807 	mtk_rx_irq_disable(eth, ~0);
3808 
3809 	/* FE int grouping */
3810 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3811 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3812 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3813 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3814 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3815 
3816 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3817 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3818 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3819 
3820 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3821 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3822 
3823 		/* PSE Free Queue Flow Control  */
3824 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3825 
3826 		/* PSE config input queue threshold */
3827 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3828 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3829 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3830 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3831 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3832 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3833 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3834 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3835 
3836 		/* PSE config output queue threshold */
3837 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3838 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3839 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3840 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3841 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3842 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3843 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3844 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3845 
3846 		/* GDM and CDM Threshold */
3847 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3848 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3849 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3850 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3851 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3852 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3853 	}
3854 
3855 	return 0;
3856 
3857 err_disable_pm:
3858 	if (!reset) {
3859 		pm_runtime_put_sync(eth->dev);
3860 		pm_runtime_disable(eth->dev);
3861 	}
3862 
3863 	return ret;
3864 }
3865 
3866 static int mtk_hw_deinit(struct mtk_eth *eth)
3867 {
3868 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
3869 		return 0;
3870 
3871 	mtk_clk_disable(eth);
3872 
3873 	pm_runtime_put_sync(eth->dev);
3874 	pm_runtime_disable(eth->dev);
3875 
3876 	return 0;
3877 }
3878 
3879 static int __init mtk_init(struct net_device *dev)
3880 {
3881 	struct mtk_mac *mac = netdev_priv(dev);
3882 	struct mtk_eth *eth = mac->hw;
3883 	int ret;
3884 
3885 	ret = of_get_ethdev_address(mac->of_node, dev);
3886 	if (ret) {
3887 		/* If the mac address is invalid, use random mac address */
3888 		eth_hw_addr_random(dev);
3889 		dev_err(eth->dev, "generated random MAC address %pM\n",
3890 			dev->dev_addr);
3891 	}
3892 
3893 	return 0;
3894 }
3895 
3896 static void mtk_uninit(struct net_device *dev)
3897 {
3898 	struct mtk_mac *mac = netdev_priv(dev);
3899 	struct mtk_eth *eth = mac->hw;
3900 
3901 	phylink_disconnect_phy(mac->phylink);
3902 	mtk_tx_irq_disable(eth, ~0);
3903 	mtk_rx_irq_disable(eth, ~0);
3904 }
3905 
3906 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3907 {
3908 	int length = new_mtu + MTK_RX_ETH_HLEN;
3909 	struct mtk_mac *mac = netdev_priv(dev);
3910 	struct mtk_eth *eth = mac->hw;
3911 
3912 	if (rcu_access_pointer(eth->prog) &&
3913 	    length > MTK_PP_MAX_BUF_SIZE) {
3914 		netdev_err(dev, "Invalid MTU for XDP mode\n");
3915 		return -EINVAL;
3916 	}
3917 
3918 	mtk_set_mcr_max_rx(mac, length);
3919 	dev->mtu = new_mtu;
3920 
3921 	return 0;
3922 }
3923 
3924 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3925 {
3926 	struct mtk_mac *mac = netdev_priv(dev);
3927 
3928 	switch (cmd) {
3929 	case SIOCGMIIPHY:
3930 	case SIOCGMIIREG:
3931 	case SIOCSMIIREG:
3932 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3933 	default:
3934 		break;
3935 	}
3936 
3937 	return -EOPNOTSUPP;
3938 }
3939 
3940 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3941 {
3942 	u32 val;
3943 	int i;
3944 
3945 	/* disabe FE P3 and P4 */
3946 	val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3947 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3948 		val |= MTK_FE_LINK_DOWN_P4;
3949 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
3950 
3951 	/* adjust PPE configurations to prepare for reset */
3952 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3953 		mtk_ppe_prepare_reset(eth->ppe[i]);
3954 
3955 	/* disable NETSYS interrupts */
3956 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3957 
3958 	/* force link down GMAC */
3959 	for (i = 0; i < 2; i++) {
3960 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3961 		mtk_w32(eth, val, MTK_MAC_MCR(i));
3962 	}
3963 }
3964 
3965 static void mtk_pending_work(struct work_struct *work)
3966 {
3967 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3968 	unsigned long restart = 0;
3969 	u32 val;
3970 	int i;
3971 
3972 	rtnl_lock();
3973 	set_bit(MTK_RESETTING, &eth->state);
3974 
3975 	mtk_prepare_for_reset(eth);
3976 	mtk_wed_fe_reset();
3977 	/* Run again reset preliminary configuration in order to avoid any
3978 	 * possible race during FE reset since it can run releasing RTNL lock.
3979 	 */
3980 	mtk_prepare_for_reset(eth);
3981 
3982 	/* stop all devices to make sure that dma is properly shut down */
3983 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3984 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3985 			continue;
3986 
3987 		mtk_stop(eth->netdev[i]);
3988 		__set_bit(i, &restart);
3989 	}
3990 
3991 	usleep_range(15000, 16000);
3992 
3993 	if (eth->dev->pins)
3994 		pinctrl_select_state(eth->dev->pins->p,
3995 				     eth->dev->pins->default_state);
3996 	mtk_hw_init(eth, true);
3997 
3998 	/* restart DMA and enable IRQs */
3999 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4000 		if (!test_bit(i, &restart))
4001 			continue;
4002 
4003 		if (mtk_open(eth->netdev[i])) {
4004 			netif_alert(eth, ifup, eth->netdev[i],
4005 				    "Driver up/down cycle failed\n");
4006 			dev_close(eth->netdev[i]);
4007 		}
4008 	}
4009 
4010 	/* enabe FE P3 and P4 */
4011 	val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
4012 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4013 		val &= ~MTK_FE_LINK_DOWN_P4;
4014 	mtk_w32(eth, val, MTK_FE_GLO_CFG);
4015 
4016 	clear_bit(MTK_RESETTING, &eth->state);
4017 
4018 	mtk_wed_fe_reset_complete();
4019 
4020 	rtnl_unlock();
4021 }
4022 
4023 static int mtk_free_dev(struct mtk_eth *eth)
4024 {
4025 	int i;
4026 
4027 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4028 		if (!eth->netdev[i])
4029 			continue;
4030 		free_netdev(eth->netdev[i]);
4031 	}
4032 
4033 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4034 		if (!eth->dsa_meta[i])
4035 			break;
4036 		metadata_dst_free(eth->dsa_meta[i]);
4037 	}
4038 
4039 	return 0;
4040 }
4041 
4042 static int mtk_unreg_dev(struct mtk_eth *eth)
4043 {
4044 	int i;
4045 
4046 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4047 		struct mtk_mac *mac;
4048 		if (!eth->netdev[i])
4049 			continue;
4050 		mac = netdev_priv(eth->netdev[i]);
4051 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4052 			unregister_netdevice_notifier(&mac->device_notifier);
4053 		unregister_netdev(eth->netdev[i]);
4054 	}
4055 
4056 	return 0;
4057 }
4058 
4059 static int mtk_cleanup(struct mtk_eth *eth)
4060 {
4061 	mtk_unreg_dev(eth);
4062 	mtk_free_dev(eth);
4063 	cancel_work_sync(&eth->pending_work);
4064 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4065 
4066 	return 0;
4067 }
4068 
4069 static int mtk_get_link_ksettings(struct net_device *ndev,
4070 				  struct ethtool_link_ksettings *cmd)
4071 {
4072 	struct mtk_mac *mac = netdev_priv(ndev);
4073 
4074 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4075 		return -EBUSY;
4076 
4077 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4078 }
4079 
4080 static int mtk_set_link_ksettings(struct net_device *ndev,
4081 				  const struct ethtool_link_ksettings *cmd)
4082 {
4083 	struct mtk_mac *mac = netdev_priv(ndev);
4084 
4085 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4086 		return -EBUSY;
4087 
4088 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4089 }
4090 
4091 static void mtk_get_drvinfo(struct net_device *dev,
4092 			    struct ethtool_drvinfo *info)
4093 {
4094 	struct mtk_mac *mac = netdev_priv(dev);
4095 
4096 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4097 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4098 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4099 }
4100 
4101 static u32 mtk_get_msglevel(struct net_device *dev)
4102 {
4103 	struct mtk_mac *mac = netdev_priv(dev);
4104 
4105 	return mac->hw->msg_enable;
4106 }
4107 
4108 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4109 {
4110 	struct mtk_mac *mac = netdev_priv(dev);
4111 
4112 	mac->hw->msg_enable = value;
4113 }
4114 
4115 static int mtk_nway_reset(struct net_device *dev)
4116 {
4117 	struct mtk_mac *mac = netdev_priv(dev);
4118 
4119 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4120 		return -EBUSY;
4121 
4122 	if (!mac->phylink)
4123 		return -ENOTSUPP;
4124 
4125 	return phylink_ethtool_nway_reset(mac->phylink);
4126 }
4127 
4128 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4129 {
4130 	int i;
4131 
4132 	switch (stringset) {
4133 	case ETH_SS_STATS: {
4134 		struct mtk_mac *mac = netdev_priv(dev);
4135 
4136 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4137 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4138 			data += ETH_GSTRING_LEN;
4139 		}
4140 		if (mtk_page_pool_enabled(mac->hw))
4141 			page_pool_ethtool_stats_get_strings(data);
4142 		break;
4143 	}
4144 	default:
4145 		break;
4146 	}
4147 }
4148 
4149 static int mtk_get_sset_count(struct net_device *dev, int sset)
4150 {
4151 	switch (sset) {
4152 	case ETH_SS_STATS: {
4153 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4154 		struct mtk_mac *mac = netdev_priv(dev);
4155 
4156 		if (mtk_page_pool_enabled(mac->hw))
4157 			count += page_pool_ethtool_stats_get_count();
4158 		return count;
4159 	}
4160 	default:
4161 		return -EOPNOTSUPP;
4162 	}
4163 }
4164 
4165 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4166 {
4167 	struct page_pool_stats stats = {};
4168 	int i;
4169 
4170 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4171 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4172 
4173 		if (!ring->page_pool)
4174 			continue;
4175 
4176 		page_pool_get_stats(ring->page_pool, &stats);
4177 	}
4178 	page_pool_ethtool_stats_get(data, &stats);
4179 }
4180 
4181 static void mtk_get_ethtool_stats(struct net_device *dev,
4182 				  struct ethtool_stats *stats, u64 *data)
4183 {
4184 	struct mtk_mac *mac = netdev_priv(dev);
4185 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4186 	u64 *data_src, *data_dst;
4187 	unsigned int start;
4188 	int i;
4189 
4190 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4191 		return;
4192 
4193 	if (netif_running(dev) && netif_device_present(dev)) {
4194 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4195 			mtk_stats_update_mac(mac);
4196 			spin_unlock_bh(&hwstats->stats_lock);
4197 		}
4198 	}
4199 
4200 	data_src = (u64 *)hwstats;
4201 
4202 	do {
4203 		data_dst = data;
4204 		start = u64_stats_fetch_begin(&hwstats->syncp);
4205 
4206 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4207 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4208 		if (mtk_page_pool_enabled(mac->hw))
4209 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4210 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4211 }
4212 
4213 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4214 			 u32 *rule_locs)
4215 {
4216 	int ret = -EOPNOTSUPP;
4217 
4218 	switch (cmd->cmd) {
4219 	case ETHTOOL_GRXRINGS:
4220 		if (dev->hw_features & NETIF_F_LRO) {
4221 			cmd->data = MTK_MAX_RX_RING_NUM;
4222 			ret = 0;
4223 		}
4224 		break;
4225 	case ETHTOOL_GRXCLSRLCNT:
4226 		if (dev->hw_features & NETIF_F_LRO) {
4227 			struct mtk_mac *mac = netdev_priv(dev);
4228 
4229 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4230 			ret = 0;
4231 		}
4232 		break;
4233 	case ETHTOOL_GRXCLSRULE:
4234 		if (dev->hw_features & NETIF_F_LRO)
4235 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4236 		break;
4237 	case ETHTOOL_GRXCLSRLALL:
4238 		if (dev->hw_features & NETIF_F_LRO)
4239 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4240 						     rule_locs);
4241 		break;
4242 	default:
4243 		break;
4244 	}
4245 
4246 	return ret;
4247 }
4248 
4249 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4250 {
4251 	int ret = -EOPNOTSUPP;
4252 
4253 	switch (cmd->cmd) {
4254 	case ETHTOOL_SRXCLSRLINS:
4255 		if (dev->hw_features & NETIF_F_LRO)
4256 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4257 		break;
4258 	case ETHTOOL_SRXCLSRLDEL:
4259 		if (dev->hw_features & NETIF_F_LRO)
4260 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4261 		break;
4262 	default:
4263 		break;
4264 	}
4265 
4266 	return ret;
4267 }
4268 
4269 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4270 			    struct net_device *sb_dev)
4271 {
4272 	struct mtk_mac *mac = netdev_priv(dev);
4273 	unsigned int queue = 0;
4274 
4275 	if (netdev_uses_dsa(dev))
4276 		queue = skb_get_queue_mapping(skb) + 3;
4277 	else
4278 		queue = mac->id;
4279 
4280 	if (queue >= dev->num_tx_queues)
4281 		queue = 0;
4282 
4283 	return queue;
4284 }
4285 
4286 static const struct ethtool_ops mtk_ethtool_ops = {
4287 	.get_link_ksettings	= mtk_get_link_ksettings,
4288 	.set_link_ksettings	= mtk_set_link_ksettings,
4289 	.get_drvinfo		= mtk_get_drvinfo,
4290 	.get_msglevel		= mtk_get_msglevel,
4291 	.set_msglevel		= mtk_set_msglevel,
4292 	.nway_reset		= mtk_nway_reset,
4293 	.get_link		= ethtool_op_get_link,
4294 	.get_strings		= mtk_get_strings,
4295 	.get_sset_count		= mtk_get_sset_count,
4296 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4297 	.get_rxnfc		= mtk_get_rxnfc,
4298 	.set_rxnfc              = mtk_set_rxnfc,
4299 };
4300 
4301 static const struct net_device_ops mtk_netdev_ops = {
4302 	.ndo_init		= mtk_init,
4303 	.ndo_uninit		= mtk_uninit,
4304 	.ndo_open		= mtk_open,
4305 	.ndo_stop		= mtk_stop,
4306 	.ndo_start_xmit		= mtk_start_xmit,
4307 	.ndo_set_mac_address	= mtk_set_mac_address,
4308 	.ndo_validate_addr	= eth_validate_addr,
4309 	.ndo_eth_ioctl		= mtk_do_ioctl,
4310 	.ndo_change_mtu		= mtk_change_mtu,
4311 	.ndo_tx_timeout		= mtk_tx_timeout,
4312 	.ndo_get_stats64        = mtk_get_stats64,
4313 	.ndo_fix_features	= mtk_fix_features,
4314 	.ndo_set_features	= mtk_set_features,
4315 #ifdef CONFIG_NET_POLL_CONTROLLER
4316 	.ndo_poll_controller	= mtk_poll_controller,
4317 #endif
4318 	.ndo_setup_tc		= mtk_eth_setup_tc,
4319 	.ndo_bpf		= mtk_xdp,
4320 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4321 	.ndo_select_queue	= mtk_select_queue,
4322 };
4323 
4324 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4325 {
4326 	const __be32 *_id = of_get_property(np, "reg", NULL);
4327 	phy_interface_t phy_mode;
4328 	struct phylink *phylink;
4329 	struct mtk_mac *mac;
4330 	int id, err;
4331 	int txqs = 1;
4332 
4333 	if (!_id) {
4334 		dev_err(eth->dev, "missing mac id\n");
4335 		return -EINVAL;
4336 	}
4337 
4338 	id = be32_to_cpup(_id);
4339 	if (id >= MTK_MAC_COUNT) {
4340 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4341 		return -EINVAL;
4342 	}
4343 
4344 	if (eth->netdev[id]) {
4345 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4346 		return -EINVAL;
4347 	}
4348 
4349 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4350 		txqs = MTK_QDMA_NUM_QUEUES;
4351 
4352 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4353 	if (!eth->netdev[id]) {
4354 		dev_err(eth->dev, "alloc_etherdev failed\n");
4355 		return -ENOMEM;
4356 	}
4357 	mac = netdev_priv(eth->netdev[id]);
4358 	eth->mac[id] = mac;
4359 	mac->id = id;
4360 	mac->hw = eth;
4361 	mac->of_node = np;
4362 
4363 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4364 	mac->hwlro_ip_cnt = 0;
4365 
4366 	mac->hw_stats = devm_kzalloc(eth->dev,
4367 				     sizeof(*mac->hw_stats),
4368 				     GFP_KERNEL);
4369 	if (!mac->hw_stats) {
4370 		dev_err(eth->dev, "failed to allocate counter memory\n");
4371 		err = -ENOMEM;
4372 		goto free_netdev;
4373 	}
4374 	spin_lock_init(&mac->hw_stats->stats_lock);
4375 	u64_stats_init(&mac->hw_stats->syncp);
4376 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4377 
4378 	/* phylink create */
4379 	err = of_get_phy_mode(np, &phy_mode);
4380 	if (err) {
4381 		dev_err(eth->dev, "incorrect phy-mode\n");
4382 		goto free_netdev;
4383 	}
4384 
4385 	/* mac config is not set */
4386 	mac->interface = PHY_INTERFACE_MODE_NA;
4387 	mac->speed = SPEED_UNKNOWN;
4388 
4389 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4390 	mac->phylink_config.type = PHYLINK_NETDEV;
4391 	/* This driver makes use of state->speed in mac_config */
4392 	mac->phylink_config.legacy_pre_march2020 = true;
4393 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4394 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4395 
4396 	__set_bit(PHY_INTERFACE_MODE_MII,
4397 		  mac->phylink_config.supported_interfaces);
4398 	__set_bit(PHY_INTERFACE_MODE_GMII,
4399 		  mac->phylink_config.supported_interfaces);
4400 
4401 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4402 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4403 
4404 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4405 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4406 			  mac->phylink_config.supported_interfaces);
4407 
4408 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4409 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4410 			  mac->phylink_config.supported_interfaces);
4411 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4412 			  mac->phylink_config.supported_interfaces);
4413 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4414 			  mac->phylink_config.supported_interfaces);
4415 	}
4416 
4417 	phylink = phylink_create(&mac->phylink_config,
4418 				 of_fwnode_handle(mac->of_node),
4419 				 phy_mode, &mtk_phylink_ops);
4420 	if (IS_ERR(phylink)) {
4421 		err = PTR_ERR(phylink);
4422 		goto free_netdev;
4423 	}
4424 
4425 	mac->phylink = phylink;
4426 
4427 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4428 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4429 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4430 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4431 
4432 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4433 	if (eth->hwlro)
4434 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4435 
4436 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4437 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4438 	eth->netdev[id]->features |= eth->soc->hw_features;
4439 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4440 
4441 	eth->netdev[id]->irq = eth->irq[0];
4442 	eth->netdev[id]->dev.of_node = np;
4443 
4444 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4445 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4446 	else
4447 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4448 
4449 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4450 		mac->device_notifier.notifier_call = mtk_device_event;
4451 		register_netdevice_notifier(&mac->device_notifier);
4452 	}
4453 
4454 	if (mtk_page_pool_enabled(eth))
4455 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4456 						NETDEV_XDP_ACT_REDIRECT |
4457 						NETDEV_XDP_ACT_NDO_XMIT |
4458 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4459 
4460 	return 0;
4461 
4462 free_netdev:
4463 	free_netdev(eth->netdev[id]);
4464 	return err;
4465 }
4466 
4467 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4468 {
4469 	struct net_device *dev, *tmp;
4470 	LIST_HEAD(dev_list);
4471 	int i;
4472 
4473 	rtnl_lock();
4474 
4475 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4476 		dev = eth->netdev[i];
4477 
4478 		if (!dev || !(dev->flags & IFF_UP))
4479 			continue;
4480 
4481 		list_add_tail(&dev->close_list, &dev_list);
4482 	}
4483 
4484 	dev_close_many(&dev_list, false);
4485 
4486 	eth->dma_dev = dma_dev;
4487 
4488 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4489 		list_del_init(&dev->close_list);
4490 		dev_open(dev, NULL);
4491 	}
4492 
4493 	rtnl_unlock();
4494 }
4495 
4496 static int mtk_probe(struct platform_device *pdev)
4497 {
4498 	struct resource *res = NULL;
4499 	struct device_node *mac_np;
4500 	struct mtk_eth *eth;
4501 	int err, i;
4502 
4503 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4504 	if (!eth)
4505 		return -ENOMEM;
4506 
4507 	eth->soc = of_device_get_match_data(&pdev->dev);
4508 
4509 	eth->dev = &pdev->dev;
4510 	eth->dma_dev = &pdev->dev;
4511 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4512 	if (IS_ERR(eth->base))
4513 		return PTR_ERR(eth->base);
4514 
4515 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4516 		eth->ip_align = NET_IP_ALIGN;
4517 
4518 	spin_lock_init(&eth->page_lock);
4519 	spin_lock_init(&eth->tx_irq_lock);
4520 	spin_lock_init(&eth->rx_irq_lock);
4521 	spin_lock_init(&eth->dim_lock);
4522 
4523 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4524 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4525 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4526 
4527 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4528 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4529 
4530 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4531 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4532 							      "mediatek,ethsys");
4533 		if (IS_ERR(eth->ethsys)) {
4534 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4535 			return PTR_ERR(eth->ethsys);
4536 		}
4537 	}
4538 
4539 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4540 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4541 							     "mediatek,infracfg");
4542 		if (IS_ERR(eth->infra)) {
4543 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4544 			return PTR_ERR(eth->infra);
4545 		}
4546 	}
4547 
4548 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4549 		struct regmap *cci;
4550 
4551 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4552 						      "cci-control-port");
4553 		/* enable CPU/bus coherency */
4554 		if (!IS_ERR(cci))
4555 			regmap_write(cci, 0, 3);
4556 	}
4557 
4558 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4559 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4560 					  GFP_KERNEL);
4561 		if (!eth->sgmii)
4562 			return -ENOMEM;
4563 
4564 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4565 				     eth->soc->ana_rgc3);
4566 
4567 		if (err)
4568 			return err;
4569 	}
4570 
4571 	if (eth->soc->required_pctl) {
4572 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4573 							    "mediatek,pctl");
4574 		if (IS_ERR(eth->pctl)) {
4575 			dev_err(&pdev->dev, "no pctl regmap found\n");
4576 			return PTR_ERR(eth->pctl);
4577 		}
4578 	}
4579 
4580 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4581 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4582 		if (!res)
4583 			return -EINVAL;
4584 	}
4585 
4586 	if (eth->soc->offload_version) {
4587 		for (i = 0;; i++) {
4588 			struct device_node *np;
4589 			phys_addr_t wdma_phy;
4590 			u32 wdma_base;
4591 
4592 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4593 				break;
4594 
4595 			np = of_parse_phandle(pdev->dev.of_node,
4596 					      "mediatek,wed", i);
4597 			if (!np)
4598 				break;
4599 
4600 			wdma_base = eth->soc->reg_map->wdma_base[i];
4601 			wdma_phy = res ? res->start + wdma_base : 0;
4602 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4603 				       wdma_phy, i);
4604 		}
4605 	}
4606 
4607 	for (i = 0; i < 3; i++) {
4608 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4609 			eth->irq[i] = eth->irq[0];
4610 		else
4611 			eth->irq[i] = platform_get_irq(pdev, i);
4612 		if (eth->irq[i] < 0) {
4613 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4614 			err = -ENXIO;
4615 			goto err_wed_exit;
4616 		}
4617 	}
4618 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4619 		eth->clks[i] = devm_clk_get(eth->dev,
4620 					    mtk_clks_source_name[i]);
4621 		if (IS_ERR(eth->clks[i])) {
4622 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4623 				err = -EPROBE_DEFER;
4624 				goto err_wed_exit;
4625 			}
4626 			if (eth->soc->required_clks & BIT(i)) {
4627 				dev_err(&pdev->dev, "clock %s not found\n",
4628 					mtk_clks_source_name[i]);
4629 				err = -EINVAL;
4630 				goto err_wed_exit;
4631 			}
4632 			eth->clks[i] = NULL;
4633 		}
4634 	}
4635 
4636 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4637 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4638 
4639 	err = mtk_hw_init(eth, false);
4640 	if (err)
4641 		goto err_wed_exit;
4642 
4643 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4644 
4645 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4646 		if (!of_device_is_compatible(mac_np,
4647 					     "mediatek,eth-mac"))
4648 			continue;
4649 
4650 		if (!of_device_is_available(mac_np))
4651 			continue;
4652 
4653 		err = mtk_add_mac(eth, mac_np);
4654 		if (err) {
4655 			of_node_put(mac_np);
4656 			goto err_deinit_hw;
4657 		}
4658 	}
4659 
4660 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4661 		err = devm_request_irq(eth->dev, eth->irq[0],
4662 				       mtk_handle_irq, 0,
4663 				       dev_name(eth->dev), eth);
4664 	} else {
4665 		err = devm_request_irq(eth->dev, eth->irq[1],
4666 				       mtk_handle_irq_tx, 0,
4667 				       dev_name(eth->dev), eth);
4668 		if (err)
4669 			goto err_free_dev;
4670 
4671 		err = devm_request_irq(eth->dev, eth->irq[2],
4672 				       mtk_handle_irq_rx, 0,
4673 				       dev_name(eth->dev), eth);
4674 	}
4675 	if (err)
4676 		goto err_free_dev;
4677 
4678 	/* No MT7628/88 support yet */
4679 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4680 		err = mtk_mdio_init(eth);
4681 		if (err)
4682 			goto err_free_dev;
4683 	}
4684 
4685 	if (eth->soc->offload_version) {
4686 		u32 num_ppe;
4687 
4688 		num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4689 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4690 		for (i = 0; i < num_ppe; i++) {
4691 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4692 
4693 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4694 						   eth->soc->offload_version, i);
4695 			if (!eth->ppe[i]) {
4696 				err = -ENOMEM;
4697 				goto err_deinit_ppe;
4698 			}
4699 		}
4700 
4701 		err = mtk_eth_offload_init(eth);
4702 		if (err)
4703 			goto err_deinit_ppe;
4704 	}
4705 
4706 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4707 		if (!eth->netdev[i])
4708 			continue;
4709 
4710 		err = register_netdev(eth->netdev[i]);
4711 		if (err) {
4712 			dev_err(eth->dev, "error bringing up device\n");
4713 			goto err_deinit_ppe;
4714 		} else
4715 			netif_info(eth, probe, eth->netdev[i],
4716 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4717 				   eth->netdev[i]->base_addr, eth->irq[0]);
4718 	}
4719 
4720 	/* we run 2 devices on the same DMA ring so we need a dummy device
4721 	 * for NAPI to work
4722 	 */
4723 	init_dummy_netdev(&eth->dummy_dev);
4724 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4725 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4726 
4727 	platform_set_drvdata(pdev, eth);
4728 	schedule_delayed_work(&eth->reset.monitor_work,
4729 			      MTK_DMA_MONITOR_TIMEOUT);
4730 
4731 	return 0;
4732 
4733 err_deinit_ppe:
4734 	mtk_ppe_deinit(eth);
4735 	mtk_mdio_cleanup(eth);
4736 err_free_dev:
4737 	mtk_free_dev(eth);
4738 err_deinit_hw:
4739 	mtk_hw_deinit(eth);
4740 err_wed_exit:
4741 	mtk_wed_exit();
4742 
4743 	return err;
4744 }
4745 
4746 static int mtk_remove(struct platform_device *pdev)
4747 {
4748 	struct mtk_eth *eth = platform_get_drvdata(pdev);
4749 	struct mtk_mac *mac;
4750 	int i;
4751 
4752 	/* stop all devices to make sure that dma is properly shut down */
4753 	for (i = 0; i < MTK_MAC_COUNT; i++) {
4754 		if (!eth->netdev[i])
4755 			continue;
4756 		mtk_stop(eth->netdev[i]);
4757 		mac = netdev_priv(eth->netdev[i]);
4758 		phylink_disconnect_phy(mac->phylink);
4759 	}
4760 
4761 	mtk_wed_exit();
4762 	mtk_hw_deinit(eth);
4763 
4764 	netif_napi_del(&eth->tx_napi);
4765 	netif_napi_del(&eth->rx_napi);
4766 	mtk_cleanup(eth);
4767 	mtk_mdio_cleanup(eth);
4768 
4769 	return 0;
4770 }
4771 
4772 static const struct mtk_soc_data mt2701_data = {
4773 	.reg_map = &mtk_reg_map,
4774 	.caps = MT7623_CAPS | MTK_HWLRO,
4775 	.hw_features = MTK_HW_FEATURES,
4776 	.required_clks = MT7623_CLKS_BITMAP,
4777 	.required_pctl = true,
4778 	.txrx = {
4779 		.txd_size = sizeof(struct mtk_tx_dma),
4780 		.rxd_size = sizeof(struct mtk_rx_dma),
4781 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4782 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4783 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4784 		.dma_len_offset = 16,
4785 	},
4786 };
4787 
4788 static const struct mtk_soc_data mt7621_data = {
4789 	.reg_map = &mtk_reg_map,
4790 	.caps = MT7621_CAPS,
4791 	.hw_features = MTK_HW_FEATURES,
4792 	.required_clks = MT7621_CLKS_BITMAP,
4793 	.required_pctl = false,
4794 	.offload_version = 1,
4795 	.hash_offset = 2,
4796 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4797 	.txrx = {
4798 		.txd_size = sizeof(struct mtk_tx_dma),
4799 		.rxd_size = sizeof(struct mtk_rx_dma),
4800 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4801 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4802 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4803 		.dma_len_offset = 16,
4804 	},
4805 };
4806 
4807 static const struct mtk_soc_data mt7622_data = {
4808 	.reg_map = &mtk_reg_map,
4809 	.ana_rgc3 = 0x2028,
4810 	.caps = MT7622_CAPS | MTK_HWLRO,
4811 	.hw_features = MTK_HW_FEATURES,
4812 	.required_clks = MT7622_CLKS_BITMAP,
4813 	.required_pctl = false,
4814 	.offload_version = 2,
4815 	.hash_offset = 2,
4816 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4817 	.txrx = {
4818 		.txd_size = sizeof(struct mtk_tx_dma),
4819 		.rxd_size = sizeof(struct mtk_rx_dma),
4820 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4821 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4822 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4823 		.dma_len_offset = 16,
4824 	},
4825 };
4826 
4827 static const struct mtk_soc_data mt7623_data = {
4828 	.reg_map = &mtk_reg_map,
4829 	.caps = MT7623_CAPS | MTK_HWLRO,
4830 	.hw_features = MTK_HW_FEATURES,
4831 	.required_clks = MT7623_CLKS_BITMAP,
4832 	.required_pctl = true,
4833 	.offload_version = 1,
4834 	.hash_offset = 2,
4835 	.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4836 	.txrx = {
4837 		.txd_size = sizeof(struct mtk_tx_dma),
4838 		.rxd_size = sizeof(struct mtk_rx_dma),
4839 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4840 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4841 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4842 		.dma_len_offset = 16,
4843 	},
4844 };
4845 
4846 static const struct mtk_soc_data mt7629_data = {
4847 	.reg_map = &mtk_reg_map,
4848 	.ana_rgc3 = 0x128,
4849 	.caps = MT7629_CAPS | MTK_HWLRO,
4850 	.hw_features = MTK_HW_FEATURES,
4851 	.required_clks = MT7629_CLKS_BITMAP,
4852 	.required_pctl = false,
4853 	.txrx = {
4854 		.txd_size = sizeof(struct mtk_tx_dma),
4855 		.rxd_size = sizeof(struct mtk_rx_dma),
4856 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4857 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
4858 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4859 		.dma_len_offset = 16,
4860 	},
4861 };
4862 
4863 static const struct mtk_soc_data mt7986_data = {
4864 	.reg_map = &mt7986_reg_map,
4865 	.ana_rgc3 = 0x128,
4866 	.caps = MT7986_CAPS,
4867 	.hw_features = MTK_HW_FEATURES,
4868 	.required_clks = MT7986_CLKS_BITMAP,
4869 	.required_pctl = false,
4870 	.offload_version = 2,
4871 	.hash_offset = 4,
4872 	.foe_entry_size = sizeof(struct mtk_foe_entry),
4873 	.txrx = {
4874 		.txd_size = sizeof(struct mtk_tx_dma_v2),
4875 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
4876 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4877 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4878 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4879 		.dma_len_offset = 8,
4880 	},
4881 };
4882 
4883 static const struct mtk_soc_data rt5350_data = {
4884 	.reg_map = &mt7628_reg_map,
4885 	.caps = MT7628_CAPS,
4886 	.hw_features = MTK_HW_FEATURES_MT7628,
4887 	.required_clks = MT7628_CLKS_BITMAP,
4888 	.required_pctl = false,
4889 	.txrx = {
4890 		.txd_size = sizeof(struct mtk_tx_dma),
4891 		.rxd_size = sizeof(struct mtk_rx_dma),
4892 		.rx_irq_done_mask = MTK_RX_DONE_INT,
4893 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4894 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
4895 		.dma_len_offset = 16,
4896 	},
4897 };
4898 
4899 const struct of_device_id of_mtk_match[] = {
4900 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4901 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4902 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4903 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4904 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4905 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4906 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4907 	{},
4908 };
4909 MODULE_DEVICE_TABLE(of, of_mtk_match);
4910 
4911 static struct platform_driver mtk_driver = {
4912 	.probe = mtk_probe,
4913 	.remove = mtk_remove,
4914 	.driver = {
4915 		.name = "mtk_soc_eth",
4916 		.of_match_table = of_mtk_match,
4917 	},
4918 };
4919 
4920 module_platform_driver(mtk_driver);
4921 
4922 MODULE_LICENSE("GPL");
4923 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4924 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
4925