1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30 
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33 
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37 
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40 
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 				  offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 				  sizeof(u64) }
44 
45 static const struct mtk_reg_map mtk_reg_map = {
46 	.tx_irq_mask		= 0x1a1c,
47 	.tx_irq_status		= 0x1a18,
48 	.pdma = {
49 		.rx_ptr		= 0x0900,
50 		.rx_cnt_cfg	= 0x0904,
51 		.pcrx_ptr	= 0x0908,
52 		.glo_cfg	= 0x0a04,
53 		.rst_idx	= 0x0a08,
54 		.delay_irq	= 0x0a0c,
55 		.irq_status	= 0x0a20,
56 		.irq_mask	= 0x0a28,
57 		.adma_rx_dbg0	= 0x0a38,
58 		.int_grp	= 0x0a50,
59 	},
60 	.qdma = {
61 		.qtx_cfg	= 0x1800,
62 		.qtx_sch	= 0x1804,
63 		.rx_ptr		= 0x1900,
64 		.rx_cnt_cfg	= 0x1904,
65 		.qcrx_ptr	= 0x1908,
66 		.glo_cfg	= 0x1a04,
67 		.rst_idx	= 0x1a08,
68 		.delay_irq	= 0x1a0c,
69 		.fc_th		= 0x1a10,
70 		.tx_sch_rate	= 0x1a14,
71 		.int_grp	= 0x1a20,
72 		.hred		= 0x1a44,
73 		.ctx_ptr	= 0x1b00,
74 		.dtx_ptr	= 0x1b04,
75 		.crx_ptr	= 0x1b10,
76 		.drx_ptr	= 0x1b14,
77 		.fq_head	= 0x1b20,
78 		.fq_tail	= 0x1b24,
79 		.fq_count	= 0x1b28,
80 		.fq_blen	= 0x1b2c,
81 	},
82 	.gdm1_cnt		= 0x2400,
83 	.gdma_to_ppe		= 0x4444,
84 	.ppe_base		= 0x0c00,
85 	.wdma_base = {
86 		[0]		= 0x2800,
87 		[1]		= 0x2c00,
88 	},
89 	.pse_iq_sta		= 0x0110,
90 	.pse_oq_sta		= 0x0118,
91 };
92 
93 static const struct mtk_reg_map mt7628_reg_map = {
94 	.tx_irq_mask		= 0x0a28,
95 	.tx_irq_status		= 0x0a20,
96 	.pdma = {
97 		.rx_ptr		= 0x0900,
98 		.rx_cnt_cfg	= 0x0904,
99 		.pcrx_ptr	= 0x0908,
100 		.glo_cfg	= 0x0a04,
101 		.rst_idx	= 0x0a08,
102 		.delay_irq	= 0x0a0c,
103 		.irq_status	= 0x0a20,
104 		.irq_mask	= 0x0a28,
105 		.int_grp	= 0x0a50,
106 	},
107 };
108 
109 static const struct mtk_reg_map mt7986_reg_map = {
110 	.tx_irq_mask		= 0x461c,
111 	.tx_irq_status		= 0x4618,
112 	.pdma = {
113 		.rx_ptr		= 0x6100,
114 		.rx_cnt_cfg	= 0x6104,
115 		.pcrx_ptr	= 0x6108,
116 		.glo_cfg	= 0x6204,
117 		.rst_idx	= 0x6208,
118 		.delay_irq	= 0x620c,
119 		.irq_status	= 0x6220,
120 		.irq_mask	= 0x6228,
121 		.adma_rx_dbg0	= 0x6238,
122 		.int_grp	= 0x6250,
123 	},
124 	.qdma = {
125 		.qtx_cfg	= 0x4400,
126 		.qtx_sch	= 0x4404,
127 		.rx_ptr		= 0x4500,
128 		.rx_cnt_cfg	= 0x4504,
129 		.qcrx_ptr	= 0x4508,
130 		.glo_cfg	= 0x4604,
131 		.rst_idx	= 0x4608,
132 		.delay_irq	= 0x460c,
133 		.fc_th		= 0x4610,
134 		.int_grp	= 0x4620,
135 		.hred		= 0x4644,
136 		.ctx_ptr	= 0x4700,
137 		.dtx_ptr	= 0x4704,
138 		.crx_ptr	= 0x4710,
139 		.drx_ptr	= 0x4714,
140 		.fq_head	= 0x4720,
141 		.fq_tail	= 0x4724,
142 		.fq_count	= 0x4728,
143 		.fq_blen	= 0x472c,
144 		.tx_sch_rate	= 0x4798,
145 	},
146 	.gdm1_cnt		= 0x1c00,
147 	.gdma_to_ppe		= 0x3333,
148 	.ppe_base		= 0x2000,
149 	.wdma_base = {
150 		[0]		= 0x4800,
151 		[1]		= 0x4c00,
152 	},
153 	.pse_iq_sta		= 0x0180,
154 	.pse_oq_sta		= 0x01a0,
155 };
156 
157 static const struct mtk_reg_map mt7988_reg_map = {
158 	.tx_irq_mask		= 0x461c,
159 	.tx_irq_status		= 0x4618,
160 	.pdma = {
161 		.rx_ptr		= 0x6900,
162 		.rx_cnt_cfg	= 0x6904,
163 		.pcrx_ptr	= 0x6908,
164 		.glo_cfg	= 0x6a04,
165 		.rst_idx	= 0x6a08,
166 		.delay_irq	= 0x6a0c,
167 		.irq_status	= 0x6a20,
168 		.irq_mask	= 0x6a28,
169 		.adma_rx_dbg0	= 0x6a38,
170 		.int_grp	= 0x6a50,
171 	},
172 	.qdma = {
173 		.qtx_cfg	= 0x4400,
174 		.qtx_sch	= 0x4404,
175 		.rx_ptr		= 0x4500,
176 		.rx_cnt_cfg	= 0x4504,
177 		.qcrx_ptr	= 0x4508,
178 		.glo_cfg	= 0x4604,
179 		.rst_idx	= 0x4608,
180 		.delay_irq	= 0x460c,
181 		.fc_th		= 0x4610,
182 		.int_grp	= 0x4620,
183 		.hred		= 0x4644,
184 		.ctx_ptr	= 0x4700,
185 		.dtx_ptr	= 0x4704,
186 		.crx_ptr	= 0x4710,
187 		.drx_ptr	= 0x4714,
188 		.fq_head	= 0x4720,
189 		.fq_tail	= 0x4724,
190 		.fq_count	= 0x4728,
191 		.fq_blen	= 0x472c,
192 		.tx_sch_rate	= 0x4798,
193 	},
194 	.gdm1_cnt		= 0x1c00,
195 	.gdma_to_ppe		= 0x3333,
196 	.ppe_base		= 0x2000,
197 	.wdma_base = {
198 		[0]		= 0x4800,
199 		[1]		= 0x4c00,
200 	},
201 	.pse_iq_sta		= 0x0180,
202 	.pse_oq_sta		= 0x01a0,
203 };
204 
205 /* strings used by ethtool */
206 static const struct mtk_ethtool_stats {
207 	char str[ETH_GSTRING_LEN];
208 	u32 offset;
209 } mtk_ethtool_stats[] = {
210 	MTK_ETHTOOL_STAT(tx_bytes),
211 	MTK_ETHTOOL_STAT(tx_packets),
212 	MTK_ETHTOOL_STAT(tx_skip),
213 	MTK_ETHTOOL_STAT(tx_collisions),
214 	MTK_ETHTOOL_STAT(rx_bytes),
215 	MTK_ETHTOOL_STAT(rx_packets),
216 	MTK_ETHTOOL_STAT(rx_overflow),
217 	MTK_ETHTOOL_STAT(rx_fcs_errors),
218 	MTK_ETHTOOL_STAT(rx_short_errors),
219 	MTK_ETHTOOL_STAT(rx_long_errors),
220 	MTK_ETHTOOL_STAT(rx_checksum_errors),
221 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
222 	MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
223 	MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
224 	MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
225 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
226 	MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
227 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
228 	MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
229 };
230 
231 static const char * const mtk_clks_source_name[] = {
232 	"ethif",
233 	"sgmiitop",
234 	"esw",
235 	"gp0",
236 	"gp1",
237 	"gp2",
238 	"gp3",
239 	"xgp1",
240 	"xgp2",
241 	"xgp3",
242 	"crypto",
243 	"fe",
244 	"trgpll",
245 	"sgmii_tx250m",
246 	"sgmii_rx250m",
247 	"sgmii_cdr_ref",
248 	"sgmii_cdr_fb",
249 	"sgmii2_tx250m",
250 	"sgmii2_rx250m",
251 	"sgmii2_cdr_ref",
252 	"sgmii2_cdr_fb",
253 	"sgmii_ck",
254 	"eth2pll",
255 	"wocpu0",
256 	"wocpu1",
257 	"netsys0",
258 	"netsys1",
259 	"ethwarp_wocpu2",
260 	"ethwarp_wocpu1",
261 	"ethwarp_wocpu0",
262 	"top_usxgmii0_sel",
263 	"top_usxgmii1_sel",
264 	"top_sgm0_sel",
265 	"top_sgm1_sel",
266 	"top_xfi_phy0_xtal_sel",
267 	"top_xfi_phy1_xtal_sel",
268 	"top_eth_gmii_sel",
269 	"top_eth_refck_50m_sel",
270 	"top_eth_sys_200m_sel",
271 	"top_eth_sys_sel",
272 	"top_eth_xgmii_sel",
273 	"top_eth_mii_sel",
274 	"top_netsys_sel",
275 	"top_netsys_500m_sel",
276 	"top_netsys_pao_2x_sel",
277 	"top_netsys_sync_250m_sel",
278 	"top_netsys_ppefb_250m_sel",
279 	"top_netsys_warp_sel",
280 };
281 
282 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
283 {
284 	__raw_writel(val, eth->base + reg);
285 }
286 
287 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
288 {
289 	return __raw_readl(eth->base + reg);
290 }
291 
292 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
293 {
294 	u32 val;
295 
296 	val = mtk_r32(eth, reg);
297 	val &= ~mask;
298 	val |= set;
299 	mtk_w32(eth, val, reg);
300 	return reg;
301 }
302 
303 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
304 {
305 	unsigned long t_start = jiffies;
306 
307 	while (1) {
308 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
309 			return 0;
310 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
311 			break;
312 		cond_resched();
313 	}
314 
315 	dev_err(eth->dev, "mdio: MDIO timeout\n");
316 	return -ETIMEDOUT;
317 }
318 
319 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
320 			       u32 write_data)
321 {
322 	int ret;
323 
324 	ret = mtk_mdio_busy_wait(eth);
325 	if (ret < 0)
326 		return ret;
327 
328 	mtk_w32(eth, PHY_IAC_ACCESS |
329 		PHY_IAC_START_C22 |
330 		PHY_IAC_CMD_WRITE |
331 		PHY_IAC_REG(phy_reg) |
332 		PHY_IAC_ADDR(phy_addr) |
333 		PHY_IAC_DATA(write_data),
334 		MTK_PHY_IAC);
335 
336 	ret = mtk_mdio_busy_wait(eth);
337 	if (ret < 0)
338 		return ret;
339 
340 	return 0;
341 }
342 
343 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
344 			       u32 devad, u32 phy_reg, u32 write_data)
345 {
346 	int ret;
347 
348 	ret = mtk_mdio_busy_wait(eth);
349 	if (ret < 0)
350 		return ret;
351 
352 	mtk_w32(eth, PHY_IAC_ACCESS |
353 		PHY_IAC_START_C45 |
354 		PHY_IAC_CMD_C45_ADDR |
355 		PHY_IAC_REG(devad) |
356 		PHY_IAC_ADDR(phy_addr) |
357 		PHY_IAC_DATA(phy_reg),
358 		MTK_PHY_IAC);
359 
360 	ret = mtk_mdio_busy_wait(eth);
361 	if (ret < 0)
362 		return ret;
363 
364 	mtk_w32(eth, PHY_IAC_ACCESS |
365 		PHY_IAC_START_C45 |
366 		PHY_IAC_CMD_WRITE |
367 		PHY_IAC_REG(devad) |
368 		PHY_IAC_ADDR(phy_addr) |
369 		PHY_IAC_DATA(write_data),
370 		MTK_PHY_IAC);
371 
372 	ret = mtk_mdio_busy_wait(eth);
373 	if (ret < 0)
374 		return ret;
375 
376 	return 0;
377 }
378 
379 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
380 {
381 	int ret;
382 
383 	ret = mtk_mdio_busy_wait(eth);
384 	if (ret < 0)
385 		return ret;
386 
387 	mtk_w32(eth, PHY_IAC_ACCESS |
388 		PHY_IAC_START_C22 |
389 		PHY_IAC_CMD_C22_READ |
390 		PHY_IAC_REG(phy_reg) |
391 		PHY_IAC_ADDR(phy_addr),
392 		MTK_PHY_IAC);
393 
394 	ret = mtk_mdio_busy_wait(eth);
395 	if (ret < 0)
396 		return ret;
397 
398 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
399 }
400 
401 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
402 			      u32 devad, u32 phy_reg)
403 {
404 	int ret;
405 
406 	ret = mtk_mdio_busy_wait(eth);
407 	if (ret < 0)
408 		return ret;
409 
410 	mtk_w32(eth, PHY_IAC_ACCESS |
411 		PHY_IAC_START_C45 |
412 		PHY_IAC_CMD_C45_ADDR |
413 		PHY_IAC_REG(devad) |
414 		PHY_IAC_ADDR(phy_addr) |
415 		PHY_IAC_DATA(phy_reg),
416 		MTK_PHY_IAC);
417 
418 	ret = mtk_mdio_busy_wait(eth);
419 	if (ret < 0)
420 		return ret;
421 
422 	mtk_w32(eth, PHY_IAC_ACCESS |
423 		PHY_IAC_START_C45 |
424 		PHY_IAC_CMD_C45_READ |
425 		PHY_IAC_REG(devad) |
426 		PHY_IAC_ADDR(phy_addr),
427 		MTK_PHY_IAC);
428 
429 	ret = mtk_mdio_busy_wait(eth);
430 	if (ret < 0)
431 		return ret;
432 
433 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
434 }
435 
436 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
437 			      int phy_reg, u16 val)
438 {
439 	struct mtk_eth *eth = bus->priv;
440 
441 	return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
442 }
443 
444 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
445 			      int devad, int phy_reg, u16 val)
446 {
447 	struct mtk_eth *eth = bus->priv;
448 
449 	return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
450 }
451 
452 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
453 {
454 	struct mtk_eth *eth = bus->priv;
455 
456 	return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
457 }
458 
459 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
460 			     int phy_reg)
461 {
462 	struct mtk_eth *eth = bus->priv;
463 
464 	return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
465 }
466 
467 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
468 				     phy_interface_t interface)
469 {
470 	u32 val;
471 
472 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
473 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
474 
475 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
476 			   ETHSYS_TRGMII_MT7621_MASK, val);
477 
478 	return 0;
479 }
480 
481 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
482 				   phy_interface_t interface)
483 {
484 	int ret;
485 
486 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
487 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
488 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
489 		if (ret)
490 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
491 		return;
492 	}
493 
494 	dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
495 }
496 
497 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
498 {
499 	/* Force Port1 XGMAC Link Up */
500 	mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
501 		MTK_XGMAC_STS(MTK_GMAC1_ID));
502 
503 	/* Adjust GSW bridge IPG to 11 */
504 	mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
505 		(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
506 		(GSW_IPG_11 << GSWRX_IPG_SHIFT),
507 		MTK_GSW_CFG);
508 }
509 
510 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
511 					      phy_interface_t interface)
512 {
513 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
514 					   phylink_config);
515 	struct mtk_eth *eth = mac->hw;
516 	unsigned int sid;
517 
518 	if (interface == PHY_INTERFACE_MODE_SGMII ||
519 	    phy_interface_mode_is_8023z(interface)) {
520 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
521 		       0 : mac->id;
522 
523 		return eth->sgmii_pcs[sid];
524 	}
525 
526 	return NULL;
527 }
528 
529 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
530 			   const struct phylink_link_state *state)
531 {
532 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
533 					   phylink_config);
534 	struct mtk_eth *eth = mac->hw;
535 	int val, ge_mode, err = 0;
536 	u32 i;
537 
538 	/* MT76x8 has no hardware settings between for the MAC */
539 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
540 	    mac->interface != state->interface) {
541 		/* Setup soc pin functions */
542 		switch (state->interface) {
543 		case PHY_INTERFACE_MODE_TRGMII:
544 		case PHY_INTERFACE_MODE_RGMII_TXID:
545 		case PHY_INTERFACE_MODE_RGMII_RXID:
546 		case PHY_INTERFACE_MODE_RGMII_ID:
547 		case PHY_INTERFACE_MODE_RGMII:
548 		case PHY_INTERFACE_MODE_MII:
549 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
550 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
551 				if (err)
552 					goto init_err;
553 			}
554 			break;
555 		case PHY_INTERFACE_MODE_1000BASEX:
556 		case PHY_INTERFACE_MODE_2500BASEX:
557 		case PHY_INTERFACE_MODE_SGMII:
558 			err = mtk_gmac_sgmii_path_setup(eth, mac->id);
559 			if (err)
560 				goto init_err;
561 			break;
562 		case PHY_INTERFACE_MODE_GMII:
563 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
564 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
565 				if (err)
566 					goto init_err;
567 			}
568 			break;
569 		case PHY_INTERFACE_MODE_INTERNAL:
570 			break;
571 		default:
572 			goto err_phy;
573 		}
574 
575 		/* Setup clock for 1st gmac */
576 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
577 		    !phy_interface_mode_is_8023z(state->interface) &&
578 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
579 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
580 					 MTK_TRGMII_MT7621_CLK)) {
581 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
582 							      state->interface))
583 					goto err_phy;
584 			} else {
585 				mtk_gmac0_rgmii_adjust(mac->hw,
586 						       state->interface);
587 
588 				/* mt7623_pad_clk_setup */
589 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
590 					mtk_w32(mac->hw,
591 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
592 						TRGMII_TD_ODT(i));
593 
594 				/* Assert/release MT7623 RXC reset */
595 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
596 					TRGMII_RCK_CTRL);
597 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
598 			}
599 		}
600 
601 		switch (state->interface) {
602 		case PHY_INTERFACE_MODE_MII:
603 		case PHY_INTERFACE_MODE_GMII:
604 			ge_mode = 1;
605 			break;
606 		default:
607 			ge_mode = 0;
608 			break;
609 		}
610 
611 		/* put the gmac into the right mode */
612 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
613 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
614 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
615 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
616 
617 		mac->interface = state->interface;
618 	}
619 
620 	/* SGMII */
621 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
622 	    phy_interface_mode_is_8023z(state->interface)) {
623 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
624 		 * being setup done.
625 		 */
626 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
627 
628 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
629 				   SYSCFG0_SGMII_MASK,
630 				   ~(u32)SYSCFG0_SGMII_MASK);
631 
632 		/* Save the syscfg0 value for mac_finish */
633 		mac->syscfg0 = val;
634 	} else if (phylink_autoneg_inband(mode)) {
635 		dev_err(eth->dev,
636 			"In-band mode not supported in non SGMII mode!\n");
637 		return;
638 	}
639 
640 	/* Setup gmac */
641 	if (mtk_is_netsys_v3_or_greater(eth) &&
642 	    mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
643 		mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
644 		mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
645 
646 		mtk_setup_bridge_switch(eth);
647 	}
648 
649 	return;
650 
651 err_phy:
652 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
653 		mac->id, phy_modes(state->interface));
654 	return;
655 
656 init_err:
657 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
658 		mac->id, phy_modes(state->interface), err);
659 }
660 
661 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
662 			  phy_interface_t interface)
663 {
664 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
665 					   phylink_config);
666 	struct mtk_eth *eth = mac->hw;
667 	u32 mcr_cur, mcr_new;
668 
669 	/* Enable SGMII */
670 	if (interface == PHY_INTERFACE_MODE_SGMII ||
671 	    phy_interface_mode_is_8023z(interface))
672 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
673 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
674 
675 	/* Setup gmac */
676 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
677 	mcr_new = mcr_cur;
678 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
679 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
680 		   MAC_MCR_RX_FIFO_CLR_DIS;
681 
682 	/* Only update control register when needed! */
683 	if (mcr_new != mcr_cur)
684 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
685 
686 	return 0;
687 }
688 
689 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
690 			      phy_interface_t interface)
691 {
692 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
693 					   phylink_config);
694 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
695 
696 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
697 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
698 }
699 
700 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
701 				int speed)
702 {
703 	const struct mtk_soc_data *soc = eth->soc;
704 	u32 ofs, val;
705 
706 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
707 		return;
708 
709 	val = MTK_QTX_SCH_MIN_RATE_EN |
710 	      /* minimum: 10 Mbps */
711 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
712 	      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
713 	      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
714 	if (mtk_is_netsys_v1(eth))
715 		val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
716 
717 	if (IS_ENABLED(CONFIG_SOC_MT7621)) {
718 		switch (speed) {
719 		case SPEED_10:
720 			val |= MTK_QTX_SCH_MAX_RATE_EN |
721 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
722 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
723 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
724 			break;
725 		case SPEED_100:
726 			val |= MTK_QTX_SCH_MAX_RATE_EN |
727 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
728 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
729 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
730 			break;
731 		case SPEED_1000:
732 			val |= MTK_QTX_SCH_MAX_RATE_EN |
733 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
734 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
735 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
736 			break;
737 		default:
738 			break;
739 		}
740 	} else {
741 		switch (speed) {
742 		case SPEED_10:
743 			val |= MTK_QTX_SCH_MAX_RATE_EN |
744 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
745 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
746 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
747 			break;
748 		case SPEED_100:
749 			val |= MTK_QTX_SCH_MAX_RATE_EN |
750 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
751 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
752 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
753 			break;
754 		case SPEED_1000:
755 			val |= MTK_QTX_SCH_MAX_RATE_EN |
756 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
757 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
758 			       FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
759 			break;
760 		default:
761 			break;
762 		}
763 	}
764 
765 	ofs = MTK_QTX_OFFSET * idx;
766 	mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
767 }
768 
769 static void mtk_mac_link_up(struct phylink_config *config,
770 			    struct phy_device *phy,
771 			    unsigned int mode, phy_interface_t interface,
772 			    int speed, int duplex, bool tx_pause, bool rx_pause)
773 {
774 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
775 					   phylink_config);
776 	u32 mcr;
777 
778 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
779 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
780 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
781 		 MAC_MCR_FORCE_RX_FC);
782 
783 	/* Configure speed */
784 	mac->speed = speed;
785 	switch (speed) {
786 	case SPEED_2500:
787 	case SPEED_1000:
788 		mcr |= MAC_MCR_SPEED_1000;
789 		break;
790 	case SPEED_100:
791 		mcr |= MAC_MCR_SPEED_100;
792 		break;
793 	}
794 
795 	/* Configure duplex */
796 	if (duplex == DUPLEX_FULL)
797 		mcr |= MAC_MCR_FORCE_DPX;
798 
799 	/* Configure pause modes - phylink will avoid these for half duplex */
800 	if (tx_pause)
801 		mcr |= MAC_MCR_FORCE_TX_FC;
802 	if (rx_pause)
803 		mcr |= MAC_MCR_FORCE_RX_FC;
804 
805 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
806 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
807 }
808 
809 static const struct phylink_mac_ops mtk_phylink_ops = {
810 	.mac_select_pcs = mtk_mac_select_pcs,
811 	.mac_config = mtk_mac_config,
812 	.mac_finish = mtk_mac_finish,
813 	.mac_link_down = mtk_mac_link_down,
814 	.mac_link_up = mtk_mac_link_up,
815 };
816 
817 static int mtk_mdio_init(struct mtk_eth *eth)
818 {
819 	unsigned int max_clk = 2500000, divider;
820 	struct device_node *mii_np;
821 	int ret;
822 	u32 val;
823 
824 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
825 	if (!mii_np) {
826 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
827 		return -ENODEV;
828 	}
829 
830 	if (!of_device_is_available(mii_np)) {
831 		ret = -ENODEV;
832 		goto err_put_node;
833 	}
834 
835 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
836 	if (!eth->mii_bus) {
837 		ret = -ENOMEM;
838 		goto err_put_node;
839 	}
840 
841 	eth->mii_bus->name = "mdio";
842 	eth->mii_bus->read = mtk_mdio_read_c22;
843 	eth->mii_bus->write = mtk_mdio_write_c22;
844 	eth->mii_bus->read_c45 = mtk_mdio_read_c45;
845 	eth->mii_bus->write_c45 = mtk_mdio_write_c45;
846 	eth->mii_bus->priv = eth;
847 	eth->mii_bus->parent = eth->dev;
848 
849 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
850 
851 	if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
852 		if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
853 			dev_err(eth->dev, "MDIO clock frequency out of range");
854 			ret = -EINVAL;
855 			goto err_put_node;
856 		}
857 		max_clk = val;
858 	}
859 	divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
860 
861 	/* Configure MDC Turbo Mode */
862 	if (mtk_is_netsys_v3_or_greater(eth))
863 		mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
864 
865 	/* Configure MDC Divider */
866 	val = FIELD_PREP(PPSC_MDC_CFG, divider);
867 	if (!mtk_is_netsys_v3_or_greater(eth))
868 		val |= PPSC_MDC_TURBO;
869 	mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
870 
871 	dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
872 
873 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
874 
875 err_put_node:
876 	of_node_put(mii_np);
877 	return ret;
878 }
879 
880 static void mtk_mdio_cleanup(struct mtk_eth *eth)
881 {
882 	if (!eth->mii_bus)
883 		return;
884 
885 	mdiobus_unregister(eth->mii_bus);
886 }
887 
888 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
889 {
890 	unsigned long flags;
891 	u32 val;
892 
893 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
894 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
895 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
896 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
897 }
898 
899 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
900 {
901 	unsigned long flags;
902 	u32 val;
903 
904 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
905 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
906 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
907 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
908 }
909 
910 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
911 {
912 	unsigned long flags;
913 	u32 val;
914 
915 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
916 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
917 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
918 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
919 }
920 
921 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
922 {
923 	unsigned long flags;
924 	u32 val;
925 
926 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
927 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
928 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
929 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
930 }
931 
932 static int mtk_set_mac_address(struct net_device *dev, void *p)
933 {
934 	int ret = eth_mac_addr(dev, p);
935 	struct mtk_mac *mac = netdev_priv(dev);
936 	struct mtk_eth *eth = mac->hw;
937 	const char *macaddr = dev->dev_addr;
938 
939 	if (ret)
940 		return ret;
941 
942 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
943 		return -EBUSY;
944 
945 	spin_lock_bh(&mac->hw->page_lock);
946 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
947 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
948 			MT7628_SDM_MAC_ADRH);
949 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
950 			(macaddr[4] << 8) | macaddr[5],
951 			MT7628_SDM_MAC_ADRL);
952 	} else {
953 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
954 			MTK_GDMA_MAC_ADRH(mac->id));
955 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
956 			(macaddr[4] << 8) | macaddr[5],
957 			MTK_GDMA_MAC_ADRL(mac->id));
958 	}
959 	spin_unlock_bh(&mac->hw->page_lock);
960 
961 	return 0;
962 }
963 
964 void mtk_stats_update_mac(struct mtk_mac *mac)
965 {
966 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
967 	struct mtk_eth *eth = mac->hw;
968 
969 	u64_stats_update_begin(&hw_stats->syncp);
970 
971 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
972 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
973 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
974 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
975 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
976 		hw_stats->rx_checksum_errors +=
977 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
978 	} else {
979 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
980 		unsigned int offs = hw_stats->reg_offset;
981 		u64 stats;
982 
983 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
984 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
985 		if (stats)
986 			hw_stats->rx_bytes += (stats << 32);
987 		hw_stats->rx_packets +=
988 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
989 		hw_stats->rx_overflow +=
990 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
991 		hw_stats->rx_fcs_errors +=
992 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
993 		hw_stats->rx_short_errors +=
994 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
995 		hw_stats->rx_long_errors +=
996 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
997 		hw_stats->rx_checksum_errors +=
998 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
999 		hw_stats->rx_flow_control_packets +=
1000 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1001 
1002 		if (mtk_is_netsys_v3_or_greater(eth)) {
1003 			hw_stats->tx_skip +=
1004 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1005 			hw_stats->tx_collisions +=
1006 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1007 			hw_stats->tx_bytes +=
1008 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1009 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1010 			if (stats)
1011 				hw_stats->tx_bytes += (stats << 32);
1012 			hw_stats->tx_packets +=
1013 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1014 		} else {
1015 			hw_stats->tx_skip +=
1016 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1017 			hw_stats->tx_collisions +=
1018 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1019 			hw_stats->tx_bytes +=
1020 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1021 			stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1022 			if (stats)
1023 				hw_stats->tx_bytes += (stats << 32);
1024 			hw_stats->tx_packets +=
1025 				mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1026 		}
1027 	}
1028 
1029 	u64_stats_update_end(&hw_stats->syncp);
1030 }
1031 
1032 static void mtk_stats_update(struct mtk_eth *eth)
1033 {
1034 	int i;
1035 
1036 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1037 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1038 			continue;
1039 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1040 			mtk_stats_update_mac(eth->mac[i]);
1041 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1042 		}
1043 	}
1044 }
1045 
1046 static void mtk_get_stats64(struct net_device *dev,
1047 			    struct rtnl_link_stats64 *storage)
1048 {
1049 	struct mtk_mac *mac = netdev_priv(dev);
1050 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1051 	unsigned int start;
1052 
1053 	if (netif_running(dev) && netif_device_present(dev)) {
1054 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
1055 			mtk_stats_update_mac(mac);
1056 			spin_unlock_bh(&hw_stats->stats_lock);
1057 		}
1058 	}
1059 
1060 	do {
1061 		start = u64_stats_fetch_begin(&hw_stats->syncp);
1062 		storage->rx_packets = hw_stats->rx_packets;
1063 		storage->tx_packets = hw_stats->tx_packets;
1064 		storage->rx_bytes = hw_stats->rx_bytes;
1065 		storage->tx_bytes = hw_stats->tx_bytes;
1066 		storage->collisions = hw_stats->tx_collisions;
1067 		storage->rx_length_errors = hw_stats->rx_short_errors +
1068 			hw_stats->rx_long_errors;
1069 		storage->rx_over_errors = hw_stats->rx_overflow;
1070 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1071 		storage->rx_errors = hw_stats->rx_checksum_errors;
1072 		storage->tx_aborted_errors = hw_stats->tx_skip;
1073 	} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1074 
1075 	storage->tx_errors = dev->stats.tx_errors;
1076 	storage->rx_dropped = dev->stats.rx_dropped;
1077 	storage->tx_dropped = dev->stats.tx_dropped;
1078 }
1079 
1080 static inline int mtk_max_frag_size(int mtu)
1081 {
1082 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1083 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1084 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1085 
1086 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1087 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1088 }
1089 
1090 static inline int mtk_max_buf_size(int frag_size)
1091 {
1092 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1093 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1094 
1095 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1096 
1097 	return buf_size;
1098 }
1099 
1100 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1101 			    struct mtk_rx_dma_v2 *dma_rxd)
1102 {
1103 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1104 	if (!(rxd->rxd2 & RX_DMA_DONE))
1105 		return false;
1106 
1107 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1108 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1109 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1110 	if (mtk_is_netsys_v2_or_greater(eth)) {
1111 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1112 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1113 	}
1114 
1115 	return true;
1116 }
1117 
1118 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1119 {
1120 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1121 	unsigned long data;
1122 
1123 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1124 				get_order(size));
1125 
1126 	return (void *)data;
1127 }
1128 
1129 /* the qdma core needs scratch memory to be setup */
1130 static int mtk_init_fq_dma(struct mtk_eth *eth)
1131 {
1132 	const struct mtk_soc_data *soc = eth->soc;
1133 	dma_addr_t phy_ring_tail;
1134 	int cnt = MTK_QDMA_RING_SIZE;
1135 	dma_addr_t dma_addr;
1136 	int i;
1137 
1138 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1139 		eth->scratch_ring = eth->sram_base;
1140 	else
1141 		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1142 						       cnt * soc->txrx.txd_size,
1143 						       &eth->phy_scratch_ring,
1144 						       GFP_KERNEL);
1145 	if (unlikely(!eth->scratch_ring))
1146 		return -ENOMEM;
1147 
1148 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1149 	if (unlikely(!eth->scratch_head))
1150 		return -ENOMEM;
1151 
1152 	dma_addr = dma_map_single(eth->dma_dev,
1153 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1154 				  DMA_FROM_DEVICE);
1155 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1156 		return -ENOMEM;
1157 
1158 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1159 
1160 	for (i = 0; i < cnt; i++) {
1161 		struct mtk_tx_dma_v2 *txd;
1162 
1163 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
1164 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1165 		if (i < cnt - 1)
1166 			txd->txd2 = eth->phy_scratch_ring +
1167 				    (i + 1) * soc->txrx.txd_size;
1168 
1169 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1170 		txd->txd4 = 0;
1171 		if (mtk_is_netsys_v2_or_greater(eth)) {
1172 			txd->txd5 = 0;
1173 			txd->txd6 = 0;
1174 			txd->txd7 = 0;
1175 			txd->txd8 = 0;
1176 		}
1177 	}
1178 
1179 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1180 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1181 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1182 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1183 
1184 	return 0;
1185 }
1186 
1187 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1188 {
1189 	return ring->dma + (desc - ring->phys);
1190 }
1191 
1192 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1193 					     void *txd, u32 txd_size)
1194 {
1195 	int idx = (txd - ring->dma) / txd_size;
1196 
1197 	return &ring->buf[idx];
1198 }
1199 
1200 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1201 				       struct mtk_tx_dma *dma)
1202 {
1203 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1204 }
1205 
1206 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1207 {
1208 	return (dma - ring->dma) / txd_size;
1209 }
1210 
1211 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1212 			 struct xdp_frame_bulk *bq, bool napi)
1213 {
1214 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1215 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1216 			dma_unmap_single(eth->dma_dev,
1217 					 dma_unmap_addr(tx_buf, dma_addr0),
1218 					 dma_unmap_len(tx_buf, dma_len0),
1219 					 DMA_TO_DEVICE);
1220 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1221 			dma_unmap_page(eth->dma_dev,
1222 				       dma_unmap_addr(tx_buf, dma_addr0),
1223 				       dma_unmap_len(tx_buf, dma_len0),
1224 				       DMA_TO_DEVICE);
1225 		}
1226 	} else {
1227 		if (dma_unmap_len(tx_buf, dma_len0)) {
1228 			dma_unmap_page(eth->dma_dev,
1229 				       dma_unmap_addr(tx_buf, dma_addr0),
1230 				       dma_unmap_len(tx_buf, dma_len0),
1231 				       DMA_TO_DEVICE);
1232 		}
1233 
1234 		if (dma_unmap_len(tx_buf, dma_len1)) {
1235 			dma_unmap_page(eth->dma_dev,
1236 				       dma_unmap_addr(tx_buf, dma_addr1),
1237 				       dma_unmap_len(tx_buf, dma_len1),
1238 				       DMA_TO_DEVICE);
1239 		}
1240 	}
1241 
1242 	if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1243 		if (tx_buf->type == MTK_TYPE_SKB) {
1244 			struct sk_buff *skb = tx_buf->data;
1245 
1246 			if (napi)
1247 				napi_consume_skb(skb, napi);
1248 			else
1249 				dev_kfree_skb_any(skb);
1250 		} else {
1251 			struct xdp_frame *xdpf = tx_buf->data;
1252 
1253 			if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1254 				xdp_return_frame_rx_napi(xdpf);
1255 			else if (bq)
1256 				xdp_return_frame_bulk(xdpf, bq);
1257 			else
1258 				xdp_return_frame(xdpf);
1259 		}
1260 	}
1261 	tx_buf->flags = 0;
1262 	tx_buf->data = NULL;
1263 }
1264 
1265 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1266 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1267 			 size_t size, int idx)
1268 {
1269 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1270 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1271 		dma_unmap_len_set(tx_buf, dma_len0, size);
1272 	} else {
1273 		if (idx & 1) {
1274 			txd->txd3 = mapped_addr;
1275 			txd->txd2 |= TX_DMA_PLEN1(size);
1276 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1277 			dma_unmap_len_set(tx_buf, dma_len1, size);
1278 		} else {
1279 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1280 			txd->txd1 = mapped_addr;
1281 			txd->txd2 = TX_DMA_PLEN0(size);
1282 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1283 			dma_unmap_len_set(tx_buf, dma_len0, size);
1284 		}
1285 	}
1286 }
1287 
1288 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1289 				   struct mtk_tx_dma_desc_info *info)
1290 {
1291 	struct mtk_mac *mac = netdev_priv(dev);
1292 	struct mtk_eth *eth = mac->hw;
1293 	struct mtk_tx_dma *desc = txd;
1294 	u32 data;
1295 
1296 	WRITE_ONCE(desc->txd1, info->addr);
1297 
1298 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1299 	       FIELD_PREP(TX_DMA_PQID, info->qid);
1300 	if (info->last)
1301 		data |= TX_DMA_LS0;
1302 	WRITE_ONCE(desc->txd3, data);
1303 
1304 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1305 	if (info->first) {
1306 		if (info->gso)
1307 			data |= TX_DMA_TSO;
1308 		/* tx checksum offload */
1309 		if (info->csum)
1310 			data |= TX_DMA_CHKSUM;
1311 		/* vlan header offload */
1312 		if (info->vlan)
1313 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1314 	}
1315 	WRITE_ONCE(desc->txd4, data);
1316 }
1317 
1318 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1319 				   struct mtk_tx_dma_desc_info *info)
1320 {
1321 	struct mtk_mac *mac = netdev_priv(dev);
1322 	struct mtk_tx_dma_v2 *desc = txd;
1323 	struct mtk_eth *eth = mac->hw;
1324 	u32 data;
1325 
1326 	WRITE_ONCE(desc->txd1, info->addr);
1327 
1328 	data = TX_DMA_PLEN0(info->size);
1329 	if (info->last)
1330 		data |= TX_DMA_LS0;
1331 
1332 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1333 		data |= TX_DMA_PREP_ADDR64(info->addr);
1334 
1335 	WRITE_ONCE(desc->txd3, data);
1336 
1337 	 /* set forward port */
1338 	switch (mac->id) {
1339 	case MTK_GMAC1_ID:
1340 		data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1341 		break;
1342 	case MTK_GMAC2_ID:
1343 		data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1344 		break;
1345 	case MTK_GMAC3_ID:
1346 		data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1347 		break;
1348 	}
1349 
1350 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1351 	WRITE_ONCE(desc->txd4, data);
1352 
1353 	data = 0;
1354 	if (info->first) {
1355 		if (info->gso)
1356 			data |= TX_DMA_TSO_V2;
1357 		/* tx checksum offload */
1358 		if (info->csum)
1359 			data |= TX_DMA_CHKSUM_V2;
1360 		if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1361 			data |= TX_DMA_SPTAG_V3;
1362 	}
1363 	WRITE_ONCE(desc->txd5, data);
1364 
1365 	data = 0;
1366 	if (info->first && info->vlan)
1367 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1368 	WRITE_ONCE(desc->txd6, data);
1369 
1370 	WRITE_ONCE(desc->txd7, 0);
1371 	WRITE_ONCE(desc->txd8, 0);
1372 }
1373 
1374 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1375 				struct mtk_tx_dma_desc_info *info)
1376 {
1377 	struct mtk_mac *mac = netdev_priv(dev);
1378 	struct mtk_eth *eth = mac->hw;
1379 
1380 	if (mtk_is_netsys_v2_or_greater(eth))
1381 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1382 	else
1383 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1384 }
1385 
1386 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1387 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1388 {
1389 	struct mtk_tx_dma_desc_info txd_info = {
1390 		.size = skb_headlen(skb),
1391 		.gso = gso,
1392 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1393 		.vlan = skb_vlan_tag_present(skb),
1394 		.qid = skb_get_queue_mapping(skb),
1395 		.vlan_tci = skb_vlan_tag_get(skb),
1396 		.first = true,
1397 		.last = !skb_is_nonlinear(skb),
1398 	};
1399 	struct netdev_queue *txq;
1400 	struct mtk_mac *mac = netdev_priv(dev);
1401 	struct mtk_eth *eth = mac->hw;
1402 	const struct mtk_soc_data *soc = eth->soc;
1403 	struct mtk_tx_dma *itxd, *txd;
1404 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1405 	struct mtk_tx_buf *itx_buf, *tx_buf;
1406 	int i, n_desc = 1;
1407 	int queue = skb_get_queue_mapping(skb);
1408 	int k = 0;
1409 
1410 	txq = netdev_get_tx_queue(dev, queue);
1411 	itxd = ring->next_free;
1412 	itxd_pdma = qdma_to_pdma(ring, itxd);
1413 	if (itxd == ring->last_free)
1414 		return -ENOMEM;
1415 
1416 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1417 	memset(itx_buf, 0, sizeof(*itx_buf));
1418 
1419 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1420 				       DMA_TO_DEVICE);
1421 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1422 		return -ENOMEM;
1423 
1424 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1425 
1426 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1427 	itx_buf->mac_id = mac->id;
1428 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1429 		     k++);
1430 
1431 	/* TX SG offload */
1432 	txd = itxd;
1433 	txd_pdma = qdma_to_pdma(ring, txd);
1434 
1435 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1436 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1437 		unsigned int offset = 0;
1438 		int frag_size = skb_frag_size(frag);
1439 
1440 		while (frag_size) {
1441 			bool new_desc = true;
1442 
1443 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1444 			    (i & 0x1)) {
1445 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1446 				txd_pdma = qdma_to_pdma(ring, txd);
1447 				if (txd == ring->last_free)
1448 					goto err_dma;
1449 
1450 				n_desc++;
1451 			} else {
1452 				new_desc = false;
1453 			}
1454 
1455 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1456 			txd_info.size = min_t(unsigned int, frag_size,
1457 					      soc->txrx.dma_max_len);
1458 			txd_info.qid = queue;
1459 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1460 					!(frag_size - txd_info.size);
1461 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1462 							 offset, txd_info.size,
1463 							 DMA_TO_DEVICE);
1464 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1465 				goto err_dma;
1466 
1467 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1468 
1469 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1470 						    soc->txrx.txd_size);
1471 			if (new_desc)
1472 				memset(tx_buf, 0, sizeof(*tx_buf));
1473 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1474 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1475 			tx_buf->mac_id = mac->id;
1476 
1477 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1478 				     txd_info.size, k++);
1479 
1480 			frag_size -= txd_info.size;
1481 			offset += txd_info.size;
1482 		}
1483 	}
1484 
1485 	/* store skb to cleanup */
1486 	itx_buf->type = MTK_TYPE_SKB;
1487 	itx_buf->data = skb;
1488 
1489 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1490 		if (k & 0x1)
1491 			txd_pdma->txd2 |= TX_DMA_LS0;
1492 		else
1493 			txd_pdma->txd2 |= TX_DMA_LS1;
1494 	}
1495 
1496 	netdev_tx_sent_queue(txq, skb->len);
1497 	skb_tx_timestamp(skb);
1498 
1499 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1500 	atomic_sub(n_desc, &ring->free_count);
1501 
1502 	/* make sure that all changes to the dma ring are flushed before we
1503 	 * continue
1504 	 */
1505 	wmb();
1506 
1507 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1508 		if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1509 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1510 	} else {
1511 		int next_idx;
1512 
1513 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1514 					 ring->dma_size);
1515 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1516 	}
1517 
1518 	return 0;
1519 
1520 err_dma:
1521 	do {
1522 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1523 
1524 		/* unmap dma */
1525 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1526 
1527 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1528 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1529 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1530 
1531 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1532 		itxd_pdma = qdma_to_pdma(ring, itxd);
1533 	} while (itxd != txd);
1534 
1535 	return -ENOMEM;
1536 }
1537 
1538 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1539 {
1540 	int i, nfrags = 1;
1541 	skb_frag_t *frag;
1542 
1543 	if (skb_is_gso(skb)) {
1544 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1545 			frag = &skb_shinfo(skb)->frags[i];
1546 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1547 					       eth->soc->txrx.dma_max_len);
1548 		}
1549 	} else {
1550 		nfrags += skb_shinfo(skb)->nr_frags;
1551 	}
1552 
1553 	return nfrags;
1554 }
1555 
1556 static int mtk_queue_stopped(struct mtk_eth *eth)
1557 {
1558 	int i;
1559 
1560 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1561 		if (!eth->netdev[i])
1562 			continue;
1563 		if (netif_queue_stopped(eth->netdev[i]))
1564 			return 1;
1565 	}
1566 
1567 	return 0;
1568 }
1569 
1570 static void mtk_wake_queue(struct mtk_eth *eth)
1571 {
1572 	int i;
1573 
1574 	for (i = 0; i < MTK_MAX_DEVS; i++) {
1575 		if (!eth->netdev[i])
1576 			continue;
1577 		netif_tx_wake_all_queues(eth->netdev[i]);
1578 	}
1579 }
1580 
1581 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1582 {
1583 	struct mtk_mac *mac = netdev_priv(dev);
1584 	struct mtk_eth *eth = mac->hw;
1585 	struct mtk_tx_ring *ring = &eth->tx_ring;
1586 	struct net_device_stats *stats = &dev->stats;
1587 	bool gso = false;
1588 	int tx_num;
1589 
1590 	/* normally we can rely on the stack not calling this more than once,
1591 	 * however we have 2 queues running on the same ring so we need to lock
1592 	 * the ring access
1593 	 */
1594 	spin_lock(&eth->page_lock);
1595 
1596 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1597 		goto drop;
1598 
1599 	tx_num = mtk_cal_txd_req(eth, skb);
1600 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1601 		netif_tx_stop_all_queues(dev);
1602 		netif_err(eth, tx_queued, dev,
1603 			  "Tx Ring full when queue awake!\n");
1604 		spin_unlock(&eth->page_lock);
1605 		return NETDEV_TX_BUSY;
1606 	}
1607 
1608 	/* TSO: fill MSS info in tcp checksum field */
1609 	if (skb_is_gso(skb)) {
1610 		if (skb_cow_head(skb, 0)) {
1611 			netif_warn(eth, tx_err, dev,
1612 				   "GSO expand head fail.\n");
1613 			goto drop;
1614 		}
1615 
1616 		if (skb_shinfo(skb)->gso_type &
1617 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1618 			gso = true;
1619 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1620 		}
1621 	}
1622 
1623 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1624 		goto drop;
1625 
1626 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1627 		netif_tx_stop_all_queues(dev);
1628 
1629 	spin_unlock(&eth->page_lock);
1630 
1631 	return NETDEV_TX_OK;
1632 
1633 drop:
1634 	spin_unlock(&eth->page_lock);
1635 	stats->tx_dropped++;
1636 	dev_kfree_skb_any(skb);
1637 	return NETDEV_TX_OK;
1638 }
1639 
1640 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1641 {
1642 	int i;
1643 	struct mtk_rx_ring *ring;
1644 	int idx;
1645 
1646 	if (!eth->hwlro)
1647 		return &eth->rx_ring[0];
1648 
1649 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1650 		struct mtk_rx_dma *rxd;
1651 
1652 		ring = &eth->rx_ring[i];
1653 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1654 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1655 		if (rxd->rxd2 & RX_DMA_DONE) {
1656 			ring->calc_idx_update = true;
1657 			return ring;
1658 		}
1659 	}
1660 
1661 	return NULL;
1662 }
1663 
1664 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1665 {
1666 	struct mtk_rx_ring *ring;
1667 	int i;
1668 
1669 	if (!eth->hwlro) {
1670 		ring = &eth->rx_ring[0];
1671 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1672 	} else {
1673 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1674 			ring = &eth->rx_ring[i];
1675 			if (ring->calc_idx_update) {
1676 				ring->calc_idx_update = false;
1677 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1678 			}
1679 		}
1680 	}
1681 }
1682 
1683 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1684 {
1685 	return mtk_is_netsys_v2_or_greater(eth);
1686 }
1687 
1688 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1689 					      struct xdp_rxq_info *xdp_q,
1690 					      int id, int size)
1691 {
1692 	struct page_pool_params pp_params = {
1693 		.order = 0,
1694 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1695 		.pool_size = size,
1696 		.nid = NUMA_NO_NODE,
1697 		.dev = eth->dma_dev,
1698 		.offset = MTK_PP_HEADROOM,
1699 		.max_len = MTK_PP_MAX_BUF_SIZE,
1700 	};
1701 	struct page_pool *pp;
1702 	int err;
1703 
1704 	pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1705 							  : DMA_FROM_DEVICE;
1706 	pp = page_pool_create(&pp_params);
1707 	if (IS_ERR(pp))
1708 		return pp;
1709 
1710 	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
1711 				 eth->rx_napi.napi_id, PAGE_SIZE);
1712 	if (err < 0)
1713 		goto err_free_pp;
1714 
1715 	err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1716 	if (err)
1717 		goto err_unregister_rxq;
1718 
1719 	return pp;
1720 
1721 err_unregister_rxq:
1722 	xdp_rxq_info_unreg(xdp_q);
1723 err_free_pp:
1724 	page_pool_destroy(pp);
1725 
1726 	return ERR_PTR(err);
1727 }
1728 
1729 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1730 				    gfp_t gfp_mask)
1731 {
1732 	struct page *page;
1733 
1734 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1735 	if (!page)
1736 		return NULL;
1737 
1738 	*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1739 	return page_address(page);
1740 }
1741 
1742 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1743 {
1744 	if (ring->page_pool)
1745 		page_pool_put_full_page(ring->page_pool,
1746 					virt_to_head_page(data), napi);
1747 	else
1748 		skb_free_frag(data);
1749 }
1750 
1751 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1752 			     struct mtk_tx_dma_desc_info *txd_info,
1753 			     struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1754 			     void *data, u16 headroom, int index, bool dma_map)
1755 {
1756 	struct mtk_tx_ring *ring = &eth->tx_ring;
1757 	struct mtk_mac *mac = netdev_priv(dev);
1758 	struct mtk_tx_dma *txd_pdma;
1759 
1760 	if (dma_map) {  /* ndo_xdp_xmit */
1761 		txd_info->addr = dma_map_single(eth->dma_dev, data,
1762 						txd_info->size, DMA_TO_DEVICE);
1763 		if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1764 			return -ENOMEM;
1765 
1766 		tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1767 	} else {
1768 		struct page *page = virt_to_head_page(data);
1769 
1770 		txd_info->addr = page_pool_get_dma_addr(page) +
1771 				 sizeof(struct xdp_frame) + headroom;
1772 		dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1773 					   txd_info->size, DMA_BIDIRECTIONAL);
1774 	}
1775 	mtk_tx_set_dma_desc(dev, txd, txd_info);
1776 
1777 	tx_buf->mac_id = mac->id;
1778 	tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1779 	tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1780 
1781 	txd_pdma = qdma_to_pdma(ring, txd);
1782 	setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1783 		     index);
1784 
1785 	return 0;
1786 }
1787 
1788 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1789 				struct net_device *dev, bool dma_map)
1790 {
1791 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1792 	const struct mtk_soc_data *soc = eth->soc;
1793 	struct mtk_tx_ring *ring = &eth->tx_ring;
1794 	struct mtk_mac *mac = netdev_priv(dev);
1795 	struct mtk_tx_dma_desc_info txd_info = {
1796 		.size	= xdpf->len,
1797 		.first	= true,
1798 		.last	= !xdp_frame_has_frags(xdpf),
1799 		.qid	= mac->id,
1800 	};
1801 	int err, index = 0, n_desc = 1, nr_frags;
1802 	struct mtk_tx_buf *htx_buf, *tx_buf;
1803 	struct mtk_tx_dma *htxd, *txd;
1804 	void *data = xdpf->data;
1805 
1806 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1807 		return -EBUSY;
1808 
1809 	nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1810 	if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1811 		return -EBUSY;
1812 
1813 	spin_lock(&eth->page_lock);
1814 
1815 	txd = ring->next_free;
1816 	if (txd == ring->last_free) {
1817 		spin_unlock(&eth->page_lock);
1818 		return -ENOMEM;
1819 	}
1820 	htxd = txd;
1821 
1822 	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1823 	memset(tx_buf, 0, sizeof(*tx_buf));
1824 	htx_buf = tx_buf;
1825 
1826 	for (;;) {
1827 		err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1828 					data, xdpf->headroom, index, dma_map);
1829 		if (err < 0)
1830 			goto unmap;
1831 
1832 		if (txd_info.last)
1833 			break;
1834 
1835 		if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1836 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1837 			if (txd == ring->last_free)
1838 				goto unmap;
1839 
1840 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1841 						    soc->txrx.txd_size);
1842 			memset(tx_buf, 0, sizeof(*tx_buf));
1843 			n_desc++;
1844 		}
1845 
1846 		memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1847 		txd_info.size = skb_frag_size(&sinfo->frags[index]);
1848 		txd_info.last = index + 1 == nr_frags;
1849 		txd_info.qid = mac->id;
1850 		data = skb_frag_address(&sinfo->frags[index]);
1851 
1852 		index++;
1853 	}
1854 	/* store xdpf for cleanup */
1855 	htx_buf->data = xdpf;
1856 
1857 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1858 		struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1859 
1860 		if (index & 1)
1861 			txd_pdma->txd2 |= TX_DMA_LS0;
1862 		else
1863 			txd_pdma->txd2 |= TX_DMA_LS1;
1864 	}
1865 
1866 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1867 	atomic_sub(n_desc, &ring->free_count);
1868 
1869 	/* make sure that all changes to the dma ring are flushed before we
1870 	 * continue
1871 	 */
1872 	wmb();
1873 
1874 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1875 		mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1876 	} else {
1877 		int idx;
1878 
1879 		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1880 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1881 			MT7628_TX_CTX_IDX0);
1882 	}
1883 
1884 	spin_unlock(&eth->page_lock);
1885 
1886 	return 0;
1887 
1888 unmap:
1889 	while (htxd != txd) {
1890 		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1891 		mtk_tx_unmap(eth, tx_buf, NULL, false);
1892 
1893 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1894 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1895 			struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1896 
1897 			txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1898 		}
1899 
1900 		htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1901 	}
1902 
1903 	spin_unlock(&eth->page_lock);
1904 
1905 	return err;
1906 }
1907 
1908 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1909 			struct xdp_frame **frames, u32 flags)
1910 {
1911 	struct mtk_mac *mac = netdev_priv(dev);
1912 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1913 	struct mtk_eth *eth = mac->hw;
1914 	int i, nxmit = 0;
1915 
1916 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1917 		return -EINVAL;
1918 
1919 	for (i = 0; i < num_frame; i++) {
1920 		if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1921 			break;
1922 		nxmit++;
1923 	}
1924 
1925 	u64_stats_update_begin(&hw_stats->syncp);
1926 	hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1927 	hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1928 	u64_stats_update_end(&hw_stats->syncp);
1929 
1930 	return nxmit;
1931 }
1932 
1933 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1934 		       struct xdp_buff *xdp, struct net_device *dev)
1935 {
1936 	struct mtk_mac *mac = netdev_priv(dev);
1937 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
1938 	u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1939 	struct bpf_prog *prog;
1940 	u32 act = XDP_PASS;
1941 
1942 	rcu_read_lock();
1943 
1944 	prog = rcu_dereference(eth->prog);
1945 	if (!prog)
1946 		goto out;
1947 
1948 	act = bpf_prog_run_xdp(prog, xdp);
1949 	switch (act) {
1950 	case XDP_PASS:
1951 		count = &hw_stats->xdp_stats.rx_xdp_pass;
1952 		goto update_stats;
1953 	case XDP_REDIRECT:
1954 		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1955 			act = XDP_DROP;
1956 			break;
1957 		}
1958 
1959 		count = &hw_stats->xdp_stats.rx_xdp_redirect;
1960 		goto update_stats;
1961 	case XDP_TX: {
1962 		struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1963 
1964 		if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1965 			count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1966 			act = XDP_DROP;
1967 			break;
1968 		}
1969 
1970 		count = &hw_stats->xdp_stats.rx_xdp_tx;
1971 		goto update_stats;
1972 	}
1973 	default:
1974 		bpf_warn_invalid_xdp_action(dev, prog, act);
1975 		fallthrough;
1976 	case XDP_ABORTED:
1977 		trace_xdp_exception(dev, prog, act);
1978 		fallthrough;
1979 	case XDP_DROP:
1980 		break;
1981 	}
1982 
1983 	page_pool_put_full_page(ring->page_pool,
1984 				virt_to_head_page(xdp->data), true);
1985 
1986 update_stats:
1987 	u64_stats_update_begin(&hw_stats->syncp);
1988 	*count = *count + 1;
1989 	u64_stats_update_end(&hw_stats->syncp);
1990 out:
1991 	rcu_read_unlock();
1992 
1993 	return act;
1994 }
1995 
1996 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1997 		       struct mtk_eth *eth)
1998 {
1999 	struct dim_sample dim_sample = {};
2000 	struct mtk_rx_ring *ring;
2001 	bool xdp_flush = false;
2002 	int idx;
2003 	struct sk_buff *skb;
2004 	u64 addr64 = 0;
2005 	u8 *data, *new_data;
2006 	struct mtk_rx_dma_v2 *rxd, trxd;
2007 	int done = 0, bytes = 0;
2008 	dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2009 
2010 	while (done < budget) {
2011 		unsigned int pktlen, *rxdcsum;
2012 		struct net_device *netdev;
2013 		u32 hash, reason;
2014 		int mac = 0;
2015 
2016 		ring = mtk_get_rx_ring(eth);
2017 		if (unlikely(!ring))
2018 			goto rx_done;
2019 
2020 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2021 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
2022 		data = ring->data[idx];
2023 
2024 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
2025 			break;
2026 
2027 		/* find out which mac the packet come from. values start at 1 */
2028 		if (mtk_is_netsys_v2_or_greater(eth)) {
2029 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2030 
2031 			switch (val) {
2032 			case PSE_GDM1_PORT:
2033 			case PSE_GDM2_PORT:
2034 				mac = val - 1;
2035 				break;
2036 			case PSE_GDM3_PORT:
2037 				mac = MTK_GMAC3_ID;
2038 				break;
2039 			default:
2040 				break;
2041 			}
2042 		} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2043 			   !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2044 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2045 		}
2046 
2047 		if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2048 			     !eth->netdev[mac]))
2049 			goto release_desc;
2050 
2051 		netdev = eth->netdev[mac];
2052 
2053 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
2054 			goto release_desc;
2055 
2056 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2057 
2058 		/* alloc new buffer */
2059 		if (ring->page_pool) {
2060 			struct page *page = virt_to_head_page(data);
2061 			struct xdp_buff xdp;
2062 			u32 ret;
2063 
2064 			new_data = mtk_page_pool_get_buff(ring->page_pool,
2065 							  &dma_addr,
2066 							  GFP_ATOMIC);
2067 			if (unlikely(!new_data)) {
2068 				netdev->stats.rx_dropped++;
2069 				goto release_desc;
2070 			}
2071 
2072 			dma_sync_single_for_cpu(eth->dma_dev,
2073 				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2074 				pktlen, page_pool_get_dma_dir(ring->page_pool));
2075 
2076 			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2077 			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2078 					 false);
2079 			xdp_buff_clear_frags_flag(&xdp);
2080 
2081 			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2082 			if (ret == XDP_REDIRECT)
2083 				xdp_flush = true;
2084 
2085 			if (ret != XDP_PASS)
2086 				goto skip_rx;
2087 
2088 			skb = build_skb(data, PAGE_SIZE);
2089 			if (unlikely(!skb)) {
2090 				page_pool_put_full_page(ring->page_pool,
2091 							page, true);
2092 				netdev->stats.rx_dropped++;
2093 				goto skip_rx;
2094 			}
2095 
2096 			skb_reserve(skb, xdp.data - xdp.data_hard_start);
2097 			skb_put(skb, xdp.data_end - xdp.data);
2098 			skb_mark_for_recycle(skb);
2099 		} else {
2100 			if (ring->frag_size <= PAGE_SIZE)
2101 				new_data = napi_alloc_frag(ring->frag_size);
2102 			else
2103 				new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2104 
2105 			if (unlikely(!new_data)) {
2106 				netdev->stats.rx_dropped++;
2107 				goto release_desc;
2108 			}
2109 
2110 			dma_addr = dma_map_single(eth->dma_dev,
2111 				new_data + NET_SKB_PAD + eth->ip_align,
2112 				ring->buf_size, DMA_FROM_DEVICE);
2113 			if (unlikely(dma_mapping_error(eth->dma_dev,
2114 						       dma_addr))) {
2115 				skb_free_frag(new_data);
2116 				netdev->stats.rx_dropped++;
2117 				goto release_desc;
2118 			}
2119 
2120 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2121 				addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2122 
2123 			dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2124 					 ring->buf_size, DMA_FROM_DEVICE);
2125 
2126 			skb = build_skb(data, ring->frag_size);
2127 			if (unlikely(!skb)) {
2128 				netdev->stats.rx_dropped++;
2129 				skb_free_frag(data);
2130 				goto skip_rx;
2131 			}
2132 
2133 			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2134 			skb_put(skb, pktlen);
2135 		}
2136 
2137 		skb->dev = netdev;
2138 		bytes += skb->len;
2139 
2140 		if (mtk_is_netsys_v2_or_greater(eth)) {
2141 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2142 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2143 			if (hash != MTK_RXD5_FOE_ENTRY)
2144 				skb_set_hash(skb, jhash_1word(hash, 0),
2145 					     PKT_HASH_TYPE_L4);
2146 			rxdcsum = &trxd.rxd3;
2147 		} else {
2148 			reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2149 			hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2150 			if (hash != MTK_RXD4_FOE_ENTRY)
2151 				skb_set_hash(skb, jhash_1word(hash, 0),
2152 					     PKT_HASH_TYPE_L4);
2153 			rxdcsum = &trxd.rxd4;
2154 		}
2155 
2156 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2157 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2158 		else
2159 			skb_checksum_none_assert(skb);
2160 		skb->protocol = eth_type_trans(skb, netdev);
2161 
2162 		/* When using VLAN untagging in combination with DSA, the
2163 		 * hardware treats the MTK special tag as a VLAN and untags it.
2164 		 */
2165 		if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2166 		    netdev_uses_dsa(netdev)) {
2167 			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2168 
2169 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
2170 			    eth->dsa_meta[port])
2171 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
2172 		}
2173 
2174 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2175 			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2176 
2177 		skb_record_rx_queue(skb, 0);
2178 		napi_gro_receive(napi, skb);
2179 
2180 skip_rx:
2181 		ring->data[idx] = new_data;
2182 		rxd->rxd1 = (unsigned int)dma_addr;
2183 release_desc:
2184 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2185 			rxd->rxd2 = RX_DMA_LSO;
2186 		else
2187 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2188 
2189 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2190 		    likely(dma_addr != DMA_MAPPING_ERROR))
2191 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2192 
2193 		ring->calc_idx = idx;
2194 		done++;
2195 	}
2196 
2197 rx_done:
2198 	if (done) {
2199 		/* make sure that all changes to the dma ring are flushed before
2200 		 * we continue
2201 		 */
2202 		wmb();
2203 		mtk_update_rx_cpu_idx(eth);
2204 	}
2205 
2206 	eth->rx_packets += done;
2207 	eth->rx_bytes += bytes;
2208 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2209 			  &dim_sample);
2210 	net_dim(&eth->rx_dim, dim_sample);
2211 
2212 	if (xdp_flush)
2213 		xdp_do_flush_map();
2214 
2215 	return done;
2216 }
2217 
2218 struct mtk_poll_state {
2219     struct netdev_queue *txq;
2220     unsigned int total;
2221     unsigned int done;
2222     unsigned int bytes;
2223 };
2224 
2225 static void
2226 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2227 		 struct sk_buff *skb)
2228 {
2229 	struct netdev_queue *txq;
2230 	struct net_device *dev;
2231 	unsigned int bytes = skb->len;
2232 
2233 	state->total++;
2234 	eth->tx_packets++;
2235 	eth->tx_bytes += bytes;
2236 
2237 	dev = eth->netdev[mac];
2238 	if (!dev)
2239 		return;
2240 
2241 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2242 	if (state->txq == txq) {
2243 		state->done++;
2244 		state->bytes += bytes;
2245 		return;
2246 	}
2247 
2248 	if (state->txq)
2249 		netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2250 
2251 	state->txq = txq;
2252 	state->done = 1;
2253 	state->bytes = bytes;
2254 }
2255 
2256 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2257 			    struct mtk_poll_state *state)
2258 {
2259 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2260 	struct mtk_tx_ring *ring = &eth->tx_ring;
2261 	struct mtk_tx_buf *tx_buf;
2262 	struct xdp_frame_bulk bq;
2263 	struct mtk_tx_dma *desc;
2264 	u32 cpu, dma;
2265 
2266 	cpu = ring->last_free_ptr;
2267 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2268 
2269 	desc = mtk_qdma_phys_to_virt(ring, cpu);
2270 	xdp_frame_bulk_init(&bq);
2271 
2272 	while ((cpu != dma) && budget) {
2273 		u32 next_cpu = desc->txd2;
2274 
2275 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2276 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2277 			break;
2278 
2279 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
2280 					    eth->soc->txrx.txd_size);
2281 		if (!tx_buf->data)
2282 			break;
2283 
2284 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2285 			if (tx_buf->type == MTK_TYPE_SKB)
2286 				mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2287 						 tx_buf->data);
2288 
2289 			budget--;
2290 		}
2291 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2292 
2293 		ring->last_free = desc;
2294 		atomic_inc(&ring->free_count);
2295 
2296 		cpu = next_cpu;
2297 	}
2298 	xdp_flush_frame_bulk(&bq);
2299 
2300 	ring->last_free_ptr = cpu;
2301 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2302 
2303 	return budget;
2304 }
2305 
2306 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2307 			    struct mtk_poll_state *state)
2308 {
2309 	struct mtk_tx_ring *ring = &eth->tx_ring;
2310 	struct mtk_tx_buf *tx_buf;
2311 	struct xdp_frame_bulk bq;
2312 	struct mtk_tx_dma *desc;
2313 	u32 cpu, dma;
2314 
2315 	cpu = ring->cpu_idx;
2316 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2317 	xdp_frame_bulk_init(&bq);
2318 
2319 	while ((cpu != dma) && budget) {
2320 		tx_buf = &ring->buf[cpu];
2321 		if (!tx_buf->data)
2322 			break;
2323 
2324 		if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2325 			if (tx_buf->type == MTK_TYPE_SKB)
2326 				mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2327 			budget--;
2328 		}
2329 		mtk_tx_unmap(eth, tx_buf, &bq, true);
2330 
2331 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2332 		ring->last_free = desc;
2333 		atomic_inc(&ring->free_count);
2334 
2335 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2336 	}
2337 	xdp_flush_frame_bulk(&bq);
2338 
2339 	ring->cpu_idx = cpu;
2340 
2341 	return budget;
2342 }
2343 
2344 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2345 {
2346 	struct mtk_tx_ring *ring = &eth->tx_ring;
2347 	struct dim_sample dim_sample = {};
2348 	struct mtk_poll_state state = {};
2349 
2350 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2351 		budget = mtk_poll_tx_qdma(eth, budget, &state);
2352 	else
2353 		budget = mtk_poll_tx_pdma(eth, budget, &state);
2354 
2355 	if (state.txq)
2356 		netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2357 
2358 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2359 			  &dim_sample);
2360 	net_dim(&eth->tx_dim, dim_sample);
2361 
2362 	if (mtk_queue_stopped(eth) &&
2363 	    (atomic_read(&ring->free_count) > ring->thresh))
2364 		mtk_wake_queue(eth);
2365 
2366 	return state.total;
2367 }
2368 
2369 static void mtk_handle_status_irq(struct mtk_eth *eth)
2370 {
2371 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2372 
2373 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2374 		mtk_stats_update(eth);
2375 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2376 			MTK_INT_STATUS2);
2377 	}
2378 }
2379 
2380 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2381 {
2382 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2383 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2384 	int tx_done = 0;
2385 
2386 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2387 		mtk_handle_status_irq(eth);
2388 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2389 	tx_done = mtk_poll_tx(eth, budget);
2390 
2391 	if (unlikely(netif_msg_intr(eth))) {
2392 		dev_info(eth->dev,
2393 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2394 			 mtk_r32(eth, reg_map->tx_irq_status),
2395 			 mtk_r32(eth, reg_map->tx_irq_mask));
2396 	}
2397 
2398 	if (tx_done == budget)
2399 		return budget;
2400 
2401 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2402 		return budget;
2403 
2404 	if (napi_complete_done(napi, tx_done))
2405 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2406 
2407 	return tx_done;
2408 }
2409 
2410 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2411 {
2412 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2413 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2414 	int rx_done_total = 0;
2415 
2416 	mtk_handle_status_irq(eth);
2417 
2418 	do {
2419 		int rx_done;
2420 
2421 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2422 			reg_map->pdma.irq_status);
2423 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2424 		rx_done_total += rx_done;
2425 
2426 		if (unlikely(netif_msg_intr(eth))) {
2427 			dev_info(eth->dev,
2428 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2429 				 mtk_r32(eth, reg_map->pdma.irq_status),
2430 				 mtk_r32(eth, reg_map->pdma.irq_mask));
2431 		}
2432 
2433 		if (rx_done_total == budget)
2434 			return budget;
2435 
2436 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
2437 		 eth->soc->txrx.rx_irq_done_mask);
2438 
2439 	if (napi_complete_done(napi, rx_done_total))
2440 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2441 
2442 	return rx_done_total;
2443 }
2444 
2445 static int mtk_tx_alloc(struct mtk_eth *eth)
2446 {
2447 	const struct mtk_soc_data *soc = eth->soc;
2448 	struct mtk_tx_ring *ring = &eth->tx_ring;
2449 	int i, sz = soc->txrx.txd_size;
2450 	struct mtk_tx_dma_v2 *txd;
2451 	int ring_size;
2452 	u32 ofs, val;
2453 
2454 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2455 		ring_size = MTK_QDMA_RING_SIZE;
2456 	else
2457 		ring_size = MTK_DMA_SIZE;
2458 
2459 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2460 			       GFP_KERNEL);
2461 	if (!ring->buf)
2462 		goto no_tx_mem;
2463 
2464 	if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2465 		ring->dma = eth->sram_base + ring_size * sz;
2466 		ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
2467 	} else {
2468 		ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2469 					       &ring->phys, GFP_KERNEL);
2470 	}
2471 
2472 	if (!ring->dma)
2473 		goto no_tx_mem;
2474 
2475 	for (i = 0; i < ring_size; i++) {
2476 		int next = (i + 1) % ring_size;
2477 		u32 next_ptr = ring->phys + next * sz;
2478 
2479 		txd = ring->dma + i * sz;
2480 		txd->txd2 = next_ptr;
2481 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2482 		txd->txd4 = 0;
2483 		if (mtk_is_netsys_v2_or_greater(eth)) {
2484 			txd->txd5 = 0;
2485 			txd->txd6 = 0;
2486 			txd->txd7 = 0;
2487 			txd->txd8 = 0;
2488 		}
2489 	}
2490 
2491 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
2492 	 * only as the framework. The real HW descriptors are the PDMA
2493 	 * descriptors in ring->dma_pdma.
2494 	 */
2495 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2496 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2497 						    &ring->phys_pdma, GFP_KERNEL);
2498 		if (!ring->dma_pdma)
2499 			goto no_tx_mem;
2500 
2501 		for (i = 0; i < ring_size; i++) {
2502 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2503 			ring->dma_pdma[i].txd4 = 0;
2504 		}
2505 	}
2506 
2507 	ring->dma_size = ring_size;
2508 	atomic_set(&ring->free_count, ring_size - 2);
2509 	ring->next_free = ring->dma;
2510 	ring->last_free = (void *)txd;
2511 	ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2512 	ring->thresh = MAX_SKB_FRAGS;
2513 
2514 	/* make sure that all changes to the dma ring are flushed before we
2515 	 * continue
2516 	 */
2517 	wmb();
2518 
2519 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2520 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2521 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2522 		mtk_w32(eth,
2523 			ring->phys + ((ring_size - 1) * sz),
2524 			soc->reg_map->qdma.crx_ptr);
2525 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2526 
2527 		for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2528 			val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2529 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2530 
2531 			val = MTK_QTX_SCH_MIN_RATE_EN |
2532 			      /* minimum: 10 Mbps */
2533 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2534 			      FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2535 			      MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2536 			if (mtk_is_netsys_v1(eth))
2537 				val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2538 			mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2539 			ofs += MTK_QTX_OFFSET;
2540 		}
2541 		val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2542 		mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2543 		if (mtk_is_netsys_v2_or_greater(eth))
2544 			mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2545 	} else {
2546 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2547 		mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2548 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2549 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2550 	}
2551 
2552 	return 0;
2553 
2554 no_tx_mem:
2555 	return -ENOMEM;
2556 }
2557 
2558 static void mtk_tx_clean(struct mtk_eth *eth)
2559 {
2560 	const struct mtk_soc_data *soc = eth->soc;
2561 	struct mtk_tx_ring *ring = &eth->tx_ring;
2562 	int i;
2563 
2564 	if (ring->buf) {
2565 		for (i = 0; i < ring->dma_size; i++)
2566 			mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2567 		kfree(ring->buf);
2568 		ring->buf = NULL;
2569 	}
2570 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2571 		dma_free_coherent(eth->dma_dev,
2572 				  ring->dma_size * soc->txrx.txd_size,
2573 				  ring->dma, ring->phys);
2574 		ring->dma = NULL;
2575 	}
2576 
2577 	if (ring->dma_pdma) {
2578 		dma_free_coherent(eth->dma_dev,
2579 				  ring->dma_size * soc->txrx.txd_size,
2580 				  ring->dma_pdma, ring->phys_pdma);
2581 		ring->dma_pdma = NULL;
2582 	}
2583 }
2584 
2585 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2586 {
2587 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2588 	struct mtk_rx_ring *ring;
2589 	int rx_data_len, rx_dma_size, tx_ring_size;
2590 	int i;
2591 
2592 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2593 		tx_ring_size = MTK_QDMA_RING_SIZE;
2594 	else
2595 		tx_ring_size = MTK_DMA_SIZE;
2596 
2597 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2598 		if (ring_no)
2599 			return -EINVAL;
2600 		ring = &eth->rx_ring_qdma;
2601 	} else {
2602 		ring = &eth->rx_ring[ring_no];
2603 	}
2604 
2605 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2606 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2607 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2608 	} else {
2609 		rx_data_len = ETH_DATA_LEN;
2610 		rx_dma_size = MTK_DMA_SIZE;
2611 	}
2612 
2613 	ring->frag_size = mtk_max_frag_size(rx_data_len);
2614 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
2615 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2616 			     GFP_KERNEL);
2617 	if (!ring->data)
2618 		return -ENOMEM;
2619 
2620 	if (mtk_page_pool_enabled(eth)) {
2621 		struct page_pool *pp;
2622 
2623 		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2624 					  rx_dma_size);
2625 		if (IS_ERR(pp))
2626 			return PTR_ERR(pp);
2627 
2628 		ring->page_pool = pp;
2629 	}
2630 
2631 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2632 	    rx_flag != MTK_RX_FLAGS_NORMAL) {
2633 		ring->dma = dma_alloc_coherent(eth->dma_dev,
2634 					       rx_dma_size * eth->soc->txrx.rxd_size,
2635 					       &ring->phys, GFP_KERNEL);
2636 	} else {
2637 		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
2638 
2639 		ring->dma = tx_ring->dma + tx_ring_size *
2640 			    eth->soc->txrx.txd_size * (ring_no + 1);
2641 		ring->phys = tx_ring->phys + tx_ring_size *
2642 			     eth->soc->txrx.txd_size * (ring_no + 1);
2643 	}
2644 
2645 	if (!ring->dma)
2646 		return -ENOMEM;
2647 
2648 	for (i = 0; i < rx_dma_size; i++) {
2649 		struct mtk_rx_dma_v2 *rxd;
2650 		dma_addr_t dma_addr;
2651 		void *data;
2652 
2653 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2654 		if (ring->page_pool) {
2655 			data = mtk_page_pool_get_buff(ring->page_pool,
2656 						      &dma_addr, GFP_KERNEL);
2657 			if (!data)
2658 				return -ENOMEM;
2659 		} else {
2660 			if (ring->frag_size <= PAGE_SIZE)
2661 				data = netdev_alloc_frag(ring->frag_size);
2662 			else
2663 				data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2664 
2665 			if (!data)
2666 				return -ENOMEM;
2667 
2668 			dma_addr = dma_map_single(eth->dma_dev,
2669 				data + NET_SKB_PAD + eth->ip_align,
2670 				ring->buf_size, DMA_FROM_DEVICE);
2671 			if (unlikely(dma_mapping_error(eth->dma_dev,
2672 						       dma_addr))) {
2673 				skb_free_frag(data);
2674 				return -ENOMEM;
2675 			}
2676 		}
2677 		rxd->rxd1 = (unsigned int)dma_addr;
2678 		ring->data[i] = data;
2679 
2680 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2681 			rxd->rxd2 = RX_DMA_LSO;
2682 		else
2683 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2684 
2685 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2686 			rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2687 
2688 		rxd->rxd3 = 0;
2689 		rxd->rxd4 = 0;
2690 		if (mtk_is_netsys_v2_or_greater(eth)) {
2691 			rxd->rxd5 = 0;
2692 			rxd->rxd6 = 0;
2693 			rxd->rxd7 = 0;
2694 			rxd->rxd8 = 0;
2695 		}
2696 	}
2697 
2698 	ring->dma_size = rx_dma_size;
2699 	ring->calc_idx_update = false;
2700 	ring->calc_idx = rx_dma_size - 1;
2701 	if (rx_flag == MTK_RX_FLAGS_QDMA)
2702 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2703 				    ring_no * MTK_QRX_OFFSET;
2704 	else
2705 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2706 				    ring_no * MTK_QRX_OFFSET;
2707 	/* make sure that all changes to the dma ring are flushed before we
2708 	 * continue
2709 	 */
2710 	wmb();
2711 
2712 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
2713 		mtk_w32(eth, ring->phys,
2714 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2715 		mtk_w32(eth, rx_dma_size,
2716 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2717 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2718 			reg_map->qdma.rst_idx);
2719 	} else {
2720 		mtk_w32(eth, ring->phys,
2721 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2722 		mtk_w32(eth, rx_dma_size,
2723 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2724 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2725 			reg_map->pdma.rst_idx);
2726 	}
2727 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2728 
2729 	return 0;
2730 }
2731 
2732 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2733 {
2734 	u64 addr64 = 0;
2735 	int i;
2736 
2737 	if (ring->data && ring->dma) {
2738 		for (i = 0; i < ring->dma_size; i++) {
2739 			struct mtk_rx_dma *rxd;
2740 
2741 			if (!ring->data[i])
2742 				continue;
2743 
2744 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2745 			if (!rxd->rxd1)
2746 				continue;
2747 
2748 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2749 				addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2750 
2751 			dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2752 					 ring->buf_size, DMA_FROM_DEVICE);
2753 			mtk_rx_put_buff(ring, ring->data[i], false);
2754 		}
2755 		kfree(ring->data);
2756 		ring->data = NULL;
2757 	}
2758 
2759 	if (!in_sram && ring->dma) {
2760 		dma_free_coherent(eth->dma_dev,
2761 				  ring->dma_size * eth->soc->txrx.rxd_size,
2762 				  ring->dma, ring->phys);
2763 		ring->dma = NULL;
2764 	}
2765 
2766 	if (ring->page_pool) {
2767 		if (xdp_rxq_info_is_reg(&ring->xdp_q))
2768 			xdp_rxq_info_unreg(&ring->xdp_q);
2769 		page_pool_destroy(ring->page_pool);
2770 		ring->page_pool = NULL;
2771 	}
2772 }
2773 
2774 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2775 {
2776 	int i;
2777 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2778 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2779 
2780 	/* set LRO rings to auto-learn modes */
2781 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2782 
2783 	/* validate LRO ring */
2784 	ring_ctrl_dw2 |= MTK_RING_VLD;
2785 
2786 	/* set AGE timer (unit: 20us) */
2787 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2788 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2789 
2790 	/* set max AGG timer (unit: 20us) */
2791 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2792 
2793 	/* set max LRO AGG count */
2794 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2795 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2796 
2797 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2798 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2799 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2800 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2801 	}
2802 
2803 	/* IPv4 checksum update enable */
2804 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2805 
2806 	/* switch priority comparison to packet count mode */
2807 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2808 
2809 	/* bandwidth threshold setting */
2810 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2811 
2812 	/* auto-learn score delta setting */
2813 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2814 
2815 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2816 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2817 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2818 
2819 	/* set HW LRO mode & the max aggregation count for rx packets */
2820 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2821 
2822 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2823 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2824 
2825 	/* enable HW LRO */
2826 	lro_ctrl_dw0 |= MTK_LRO_EN;
2827 
2828 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2829 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2830 
2831 	return 0;
2832 }
2833 
2834 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2835 {
2836 	int i;
2837 	u32 val;
2838 
2839 	/* relinquish lro rings, flush aggregated packets */
2840 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2841 
2842 	/* wait for relinquishments done */
2843 	for (i = 0; i < 10; i++) {
2844 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2845 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2846 			msleep(20);
2847 			continue;
2848 		}
2849 		break;
2850 	}
2851 
2852 	/* invalidate lro rings */
2853 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2854 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2855 
2856 	/* disable HW LRO */
2857 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2858 }
2859 
2860 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2861 {
2862 	u32 reg_val;
2863 
2864 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2865 
2866 	/* invalidate the IP setting */
2867 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2868 
2869 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2870 
2871 	/* validate the IP setting */
2872 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2873 }
2874 
2875 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2876 {
2877 	u32 reg_val;
2878 
2879 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2880 
2881 	/* invalidate the IP setting */
2882 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2883 
2884 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2885 }
2886 
2887 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2888 {
2889 	int cnt = 0;
2890 	int i;
2891 
2892 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2893 		if (mac->hwlro_ip[i])
2894 			cnt++;
2895 	}
2896 
2897 	return cnt;
2898 }
2899 
2900 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2901 				struct ethtool_rxnfc *cmd)
2902 {
2903 	struct ethtool_rx_flow_spec *fsp =
2904 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2905 	struct mtk_mac *mac = netdev_priv(dev);
2906 	struct mtk_eth *eth = mac->hw;
2907 	int hwlro_idx;
2908 
2909 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2910 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2911 	    (fsp->location > 1))
2912 		return -EINVAL;
2913 
2914 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2915 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2916 
2917 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2918 
2919 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2920 
2921 	return 0;
2922 }
2923 
2924 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2925 				struct ethtool_rxnfc *cmd)
2926 {
2927 	struct ethtool_rx_flow_spec *fsp =
2928 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2929 	struct mtk_mac *mac = netdev_priv(dev);
2930 	struct mtk_eth *eth = mac->hw;
2931 	int hwlro_idx;
2932 
2933 	if (fsp->location > 1)
2934 		return -EINVAL;
2935 
2936 	mac->hwlro_ip[fsp->location] = 0;
2937 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2938 
2939 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2940 
2941 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2942 
2943 	return 0;
2944 }
2945 
2946 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2947 {
2948 	struct mtk_mac *mac = netdev_priv(dev);
2949 	struct mtk_eth *eth = mac->hw;
2950 	int i, hwlro_idx;
2951 
2952 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2953 		mac->hwlro_ip[i] = 0;
2954 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2955 
2956 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2957 	}
2958 
2959 	mac->hwlro_ip_cnt = 0;
2960 }
2961 
2962 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2963 				    struct ethtool_rxnfc *cmd)
2964 {
2965 	struct mtk_mac *mac = netdev_priv(dev);
2966 	struct ethtool_rx_flow_spec *fsp =
2967 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2968 
2969 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2970 		return -EINVAL;
2971 
2972 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2973 	fsp->flow_type = TCP_V4_FLOW;
2974 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2975 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2976 
2977 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2978 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2979 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2980 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2981 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2982 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2983 	fsp->h_u.tcp_ip4_spec.tos = 0;
2984 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2985 
2986 	return 0;
2987 }
2988 
2989 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2990 				  struct ethtool_rxnfc *cmd,
2991 				  u32 *rule_locs)
2992 {
2993 	struct mtk_mac *mac = netdev_priv(dev);
2994 	int cnt = 0;
2995 	int i;
2996 
2997 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2998 		if (cnt == cmd->rule_cnt)
2999 			return -EMSGSIZE;
3000 
3001 		if (mac->hwlro_ip[i]) {
3002 			rule_locs[cnt] = i;
3003 			cnt++;
3004 		}
3005 	}
3006 
3007 	cmd->rule_cnt = cnt;
3008 
3009 	return 0;
3010 }
3011 
3012 static netdev_features_t mtk_fix_features(struct net_device *dev,
3013 					  netdev_features_t features)
3014 {
3015 	if (!(features & NETIF_F_LRO)) {
3016 		struct mtk_mac *mac = netdev_priv(dev);
3017 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3018 
3019 		if (ip_cnt) {
3020 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3021 
3022 			features |= NETIF_F_LRO;
3023 		}
3024 	}
3025 
3026 	return features;
3027 }
3028 
3029 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3030 {
3031 	netdev_features_t diff = dev->features ^ features;
3032 
3033 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3034 		mtk_hwlro_netdev_disable(dev);
3035 
3036 	return 0;
3037 }
3038 
3039 /* wait for DMA to finish whatever it is doing before we start using it again */
3040 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3041 {
3042 	unsigned int reg;
3043 	int ret;
3044 	u32 val;
3045 
3046 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3047 		reg = eth->soc->reg_map->qdma.glo_cfg;
3048 	else
3049 		reg = eth->soc->reg_map->pdma.glo_cfg;
3050 
3051 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3052 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3053 					5, MTK_DMA_BUSY_TIMEOUT_US);
3054 	if (ret)
3055 		dev_err(eth->dev, "DMA init timeout\n");
3056 
3057 	return ret;
3058 }
3059 
3060 static int mtk_dma_init(struct mtk_eth *eth)
3061 {
3062 	int err;
3063 	u32 i;
3064 
3065 	if (mtk_dma_busy_wait(eth))
3066 		return -EBUSY;
3067 
3068 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3069 		/* QDMA needs scratch memory for internal reordering of the
3070 		 * descriptors
3071 		 */
3072 		err = mtk_init_fq_dma(eth);
3073 		if (err)
3074 			return err;
3075 	}
3076 
3077 	err = mtk_tx_alloc(eth);
3078 	if (err)
3079 		return err;
3080 
3081 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3082 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3083 		if (err)
3084 			return err;
3085 	}
3086 
3087 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3088 	if (err)
3089 		return err;
3090 
3091 	if (eth->hwlro) {
3092 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3093 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3094 			if (err)
3095 				return err;
3096 		}
3097 		err = mtk_hwlro_rx_init(eth);
3098 		if (err)
3099 			return err;
3100 	}
3101 
3102 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3103 		/* Enable random early drop and set drop threshold
3104 		 * automatically
3105 		 */
3106 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3107 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3108 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3109 	}
3110 
3111 	return 0;
3112 }
3113 
3114 static void mtk_dma_free(struct mtk_eth *eth)
3115 {
3116 	const struct mtk_soc_data *soc = eth->soc;
3117 	int i;
3118 
3119 	for (i = 0; i < MTK_MAX_DEVS; i++)
3120 		if (eth->netdev[i])
3121 			netdev_reset_queue(eth->netdev[i]);
3122 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3123 		dma_free_coherent(eth->dma_dev,
3124 				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3125 				  eth->scratch_ring, eth->phy_scratch_ring);
3126 		eth->scratch_ring = NULL;
3127 		eth->phy_scratch_ring = 0;
3128 	}
3129 	mtk_tx_clean(eth);
3130 	mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3131 	mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
3132 
3133 	if (eth->hwlro) {
3134 		mtk_hwlro_rx_uninit(eth);
3135 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3136 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
3137 	}
3138 
3139 	kfree(eth->scratch_head);
3140 }
3141 
3142 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3143 {
3144 	u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3145 
3146 	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3147 	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3148 	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3149 }
3150 
3151 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3152 {
3153 	struct mtk_mac *mac = netdev_priv(dev);
3154 	struct mtk_eth *eth = mac->hw;
3155 
3156 	if (test_bit(MTK_RESETTING, &eth->state))
3157 		return;
3158 
3159 	if (!mtk_hw_reset_check(eth))
3160 		return;
3161 
3162 	eth->netdev[mac->id]->stats.tx_errors++;
3163 	netif_err(eth, tx_err, dev, "transmit timed out\n");
3164 
3165 	schedule_work(&eth->pending_work);
3166 }
3167 
3168 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3169 {
3170 	struct mtk_eth *eth = _eth;
3171 
3172 	eth->rx_events++;
3173 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
3174 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3175 		__napi_schedule(&eth->rx_napi);
3176 	}
3177 
3178 	return IRQ_HANDLED;
3179 }
3180 
3181 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3182 {
3183 	struct mtk_eth *eth = _eth;
3184 
3185 	eth->tx_events++;
3186 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
3187 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3188 		__napi_schedule(&eth->tx_napi);
3189 	}
3190 
3191 	return IRQ_HANDLED;
3192 }
3193 
3194 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3195 {
3196 	struct mtk_eth *eth = _eth;
3197 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3198 
3199 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3200 	    eth->soc->txrx.rx_irq_done_mask) {
3201 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
3202 		    eth->soc->txrx.rx_irq_done_mask)
3203 			mtk_handle_irq_rx(irq, _eth);
3204 	}
3205 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3206 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3207 			mtk_handle_irq_tx(irq, _eth);
3208 	}
3209 
3210 	return IRQ_HANDLED;
3211 }
3212 
3213 #ifdef CONFIG_NET_POLL_CONTROLLER
3214 static void mtk_poll_controller(struct net_device *dev)
3215 {
3216 	struct mtk_mac *mac = netdev_priv(dev);
3217 	struct mtk_eth *eth = mac->hw;
3218 
3219 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3220 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3221 	mtk_handle_irq_rx(eth->irq[2], dev);
3222 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3223 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3224 }
3225 #endif
3226 
3227 static int mtk_start_dma(struct mtk_eth *eth)
3228 {
3229 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3230 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3231 	int err;
3232 
3233 	err = mtk_dma_init(eth);
3234 	if (err) {
3235 		mtk_dma_free(eth);
3236 		return err;
3237 	}
3238 
3239 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3240 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3241 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3242 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3243 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3244 
3245 		if (mtk_is_netsys_v2_or_greater(eth))
3246 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3247 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3248 			       MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3249 		else
3250 			val |= MTK_RX_BT_32DWORDS;
3251 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3252 
3253 		mtk_w32(eth,
3254 			MTK_RX_DMA_EN | rx_2b_offset |
3255 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3256 			reg_map->pdma.glo_cfg);
3257 	} else {
3258 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3259 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3260 			reg_map->pdma.glo_cfg);
3261 	}
3262 
3263 	return 0;
3264 }
3265 
3266 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3267 {
3268 	int i;
3269 
3270 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3271 		return;
3272 
3273 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3274 		u32 val;
3275 
3276 		if (!eth->netdev[i])
3277 			continue;
3278 
3279 		val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3280 
3281 		/* default setup the forward port to send frame to PDMA */
3282 		val &= ~0xffff;
3283 
3284 		/* Enable RX checksum */
3285 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3286 
3287 		val |= config;
3288 
3289 		if (netdev_uses_dsa(eth->netdev[i]))
3290 			val |= MTK_GDMA_SPECIAL_TAG;
3291 
3292 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3293 	}
3294 	/* Reset and enable PSE */
3295 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3296 	mtk_w32(eth, 0, MTK_RST_GL);
3297 }
3298 
3299 
3300 static bool mtk_uses_dsa(struct net_device *dev)
3301 {
3302 #if IS_ENABLED(CONFIG_NET_DSA)
3303 	return netdev_uses_dsa(dev) &&
3304 	       dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3305 #else
3306 	return false;
3307 #endif
3308 }
3309 
3310 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3311 {
3312 	struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3313 	struct mtk_eth *eth = mac->hw;
3314 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3315 	struct ethtool_link_ksettings s;
3316 	struct net_device *ldev;
3317 	struct list_head *iter;
3318 	struct dsa_port *dp;
3319 
3320 	if (event != NETDEV_CHANGE)
3321 		return NOTIFY_DONE;
3322 
3323 	netdev_for_each_lower_dev(dev, ldev, iter) {
3324 		if (netdev_priv(ldev) == mac)
3325 			goto found;
3326 	}
3327 
3328 	return NOTIFY_DONE;
3329 
3330 found:
3331 	if (!dsa_slave_dev_check(dev))
3332 		return NOTIFY_DONE;
3333 
3334 	if (__ethtool_get_link_ksettings(dev, &s))
3335 		return NOTIFY_DONE;
3336 
3337 	if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3338 		return NOTIFY_DONE;
3339 
3340 	dp = dsa_port_from_netdev(dev);
3341 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
3342 		return NOTIFY_DONE;
3343 
3344 	if (mac->speed > 0 && mac->speed <= s.base.speed)
3345 		s.base.speed = 0;
3346 
3347 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3348 
3349 	return NOTIFY_DONE;
3350 }
3351 
3352 static int mtk_open(struct net_device *dev)
3353 {
3354 	struct mtk_mac *mac = netdev_priv(dev);
3355 	struct mtk_eth *eth = mac->hw;
3356 	int i, err;
3357 
3358 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3359 	if (err) {
3360 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3361 			   err);
3362 		return err;
3363 	}
3364 
3365 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
3366 	if (!refcount_read(&eth->dma_refcnt)) {
3367 		const struct mtk_soc_data *soc = eth->soc;
3368 		u32 gdm_config;
3369 		int i;
3370 
3371 		err = mtk_start_dma(eth);
3372 		if (err) {
3373 			phylink_disconnect_phy(mac->phylink);
3374 			return err;
3375 		}
3376 
3377 		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3378 			mtk_ppe_start(eth->ppe[i]);
3379 
3380 		gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3381 						  : MTK_GDMA_TO_PDMA;
3382 		mtk_gdm_config(eth, gdm_config);
3383 
3384 		napi_enable(&eth->tx_napi);
3385 		napi_enable(&eth->rx_napi);
3386 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3387 		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3388 		refcount_set(&eth->dma_refcnt, 1);
3389 	}
3390 	else
3391 		refcount_inc(&eth->dma_refcnt);
3392 
3393 	phylink_start(mac->phylink);
3394 	netif_tx_start_all_queues(dev);
3395 
3396 	if (mtk_is_netsys_v2_or_greater(eth))
3397 		return 0;
3398 
3399 	if (mtk_uses_dsa(dev) && !eth->prog) {
3400 		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3401 			struct metadata_dst *md_dst = eth->dsa_meta[i];
3402 
3403 			if (md_dst)
3404 				continue;
3405 
3406 			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3407 						    GFP_KERNEL);
3408 			if (!md_dst)
3409 				return -ENOMEM;
3410 
3411 			md_dst->u.port_info.port_id = i;
3412 			eth->dsa_meta[i] = md_dst;
3413 		}
3414 	} else {
3415 		/* Hardware DSA untagging and VLAN RX offloading need to be
3416 		 * disabled if at least one MAC does not use DSA.
3417 		 */
3418 		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3419 
3420 		val &= ~MTK_CDMP_STAG_EN;
3421 		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3422 
3423 		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3424 	}
3425 
3426 	return 0;
3427 }
3428 
3429 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3430 {
3431 	u32 val;
3432 	int i;
3433 
3434 	/* stop the dma engine */
3435 	spin_lock_bh(&eth->page_lock);
3436 	val = mtk_r32(eth, glo_cfg);
3437 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3438 		glo_cfg);
3439 	spin_unlock_bh(&eth->page_lock);
3440 
3441 	/* wait for dma stop */
3442 	for (i = 0; i < 10; i++) {
3443 		val = mtk_r32(eth, glo_cfg);
3444 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3445 			msleep(20);
3446 			continue;
3447 		}
3448 		break;
3449 	}
3450 }
3451 
3452 static int mtk_stop(struct net_device *dev)
3453 {
3454 	struct mtk_mac *mac = netdev_priv(dev);
3455 	struct mtk_eth *eth = mac->hw;
3456 	int i;
3457 
3458 	phylink_stop(mac->phylink);
3459 
3460 	netif_tx_disable(dev);
3461 
3462 	phylink_disconnect_phy(mac->phylink);
3463 
3464 	/* only shutdown DMA if this is the last user */
3465 	if (!refcount_dec_and_test(&eth->dma_refcnt))
3466 		return 0;
3467 
3468 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3469 
3470 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3471 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3472 	napi_disable(&eth->tx_napi);
3473 	napi_disable(&eth->rx_napi);
3474 
3475 	cancel_work_sync(&eth->rx_dim.work);
3476 	cancel_work_sync(&eth->tx_dim.work);
3477 
3478 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3479 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3480 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3481 
3482 	mtk_dma_free(eth);
3483 
3484 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3485 		mtk_ppe_stop(eth->ppe[i]);
3486 
3487 	return 0;
3488 }
3489 
3490 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3491 			 struct netlink_ext_ack *extack)
3492 {
3493 	struct mtk_mac *mac = netdev_priv(dev);
3494 	struct mtk_eth *eth = mac->hw;
3495 	struct bpf_prog *old_prog;
3496 	bool need_update;
3497 
3498 	if (eth->hwlro) {
3499 		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3500 		return -EOPNOTSUPP;
3501 	}
3502 
3503 	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3504 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3505 		return -EOPNOTSUPP;
3506 	}
3507 
3508 	need_update = !!eth->prog != !!prog;
3509 	if (netif_running(dev) && need_update)
3510 		mtk_stop(dev);
3511 
3512 	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3513 	if (old_prog)
3514 		bpf_prog_put(old_prog);
3515 
3516 	if (netif_running(dev) && need_update)
3517 		return mtk_open(dev);
3518 
3519 	return 0;
3520 }
3521 
3522 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3523 {
3524 	switch (xdp->command) {
3525 	case XDP_SETUP_PROG:
3526 		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3527 	default:
3528 		return -EINVAL;
3529 	}
3530 }
3531 
3532 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3533 {
3534 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3535 			   reset_bits,
3536 			   reset_bits);
3537 
3538 	usleep_range(1000, 1100);
3539 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3540 			   reset_bits,
3541 			   ~reset_bits);
3542 	mdelay(10);
3543 }
3544 
3545 static void mtk_clk_disable(struct mtk_eth *eth)
3546 {
3547 	int clk;
3548 
3549 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3550 		clk_disable_unprepare(eth->clks[clk]);
3551 }
3552 
3553 static int mtk_clk_enable(struct mtk_eth *eth)
3554 {
3555 	int clk, ret;
3556 
3557 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3558 		ret = clk_prepare_enable(eth->clks[clk]);
3559 		if (ret)
3560 			goto err_disable_clks;
3561 	}
3562 
3563 	return 0;
3564 
3565 err_disable_clks:
3566 	while (--clk >= 0)
3567 		clk_disable_unprepare(eth->clks[clk]);
3568 
3569 	return ret;
3570 }
3571 
3572 static void mtk_dim_rx(struct work_struct *work)
3573 {
3574 	struct dim *dim = container_of(work, struct dim, work);
3575 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3576 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3577 	struct dim_cq_moder cur_profile;
3578 	u32 val, cur;
3579 
3580 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3581 						dim->profile_ix);
3582 	spin_lock_bh(&eth->dim_lock);
3583 
3584 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3585 	val &= MTK_PDMA_DELAY_TX_MASK;
3586 	val |= MTK_PDMA_DELAY_RX_EN;
3587 
3588 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3589 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3590 
3591 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3592 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3593 
3594 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3595 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3596 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3597 
3598 	spin_unlock_bh(&eth->dim_lock);
3599 
3600 	dim->state = DIM_START_MEASURE;
3601 }
3602 
3603 static void mtk_dim_tx(struct work_struct *work)
3604 {
3605 	struct dim *dim = container_of(work, struct dim, work);
3606 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3607 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3608 	struct dim_cq_moder cur_profile;
3609 	u32 val, cur;
3610 
3611 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3612 						dim->profile_ix);
3613 	spin_lock_bh(&eth->dim_lock);
3614 
3615 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
3616 	val &= MTK_PDMA_DELAY_RX_MASK;
3617 	val |= MTK_PDMA_DELAY_TX_EN;
3618 
3619 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3620 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3621 
3622 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3623 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3624 
3625 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
3626 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3627 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
3628 
3629 	spin_unlock_bh(&eth->dim_lock);
3630 
3631 	dim->state = DIM_START_MEASURE;
3632 }
3633 
3634 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3635 {
3636 	struct mtk_eth *eth = mac->hw;
3637 	u32 mcr_cur, mcr_new;
3638 
3639 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3640 		return;
3641 
3642 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3643 	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3644 
3645 	if (val <= 1518)
3646 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3647 	else if (val <= 1536)
3648 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3649 	else if (val <= 1552)
3650 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3651 	else
3652 		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3653 
3654 	if (mcr_new != mcr_cur)
3655 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3656 }
3657 
3658 static void mtk_hw_reset(struct mtk_eth *eth)
3659 {
3660 	u32 val;
3661 
3662 	if (mtk_is_netsys_v2_or_greater(eth))
3663 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3664 
3665 	if (mtk_is_netsys_v3_or_greater(eth)) {
3666 		val = RSTCTRL_PPE0_V3;
3667 
3668 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3669 			val |= RSTCTRL_PPE1_V3;
3670 
3671 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3672 			val |= RSTCTRL_PPE2;
3673 
3674 		val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3675 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3676 		val = RSTCTRL_PPE0_V2;
3677 
3678 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3679 			val |= RSTCTRL_PPE1;
3680 	} else {
3681 		val = RSTCTRL_PPE0;
3682 	}
3683 
3684 	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3685 
3686 	if (mtk_is_netsys_v3_or_greater(eth))
3687 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3688 			     0x6f8ff);
3689 	else if (mtk_is_netsys_v2_or_greater(eth))
3690 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3691 			     0x3ffffff);
3692 }
3693 
3694 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3695 {
3696 	u32 val;
3697 
3698 	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3699 	return val;
3700 }
3701 
3702 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3703 {
3704 	u32 rst_mask, val;
3705 
3706 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3707 			   RSTCTRL_FE);
3708 	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3709 				      val & RSTCTRL_FE, 1, 1000)) {
3710 		dev_err(eth->dev, "warm reset failed\n");
3711 		mtk_hw_reset(eth);
3712 		return;
3713 	}
3714 
3715 	if (mtk_is_netsys_v3_or_greater(eth)) {
3716 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3717 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3718 			rst_mask |= RSTCTRL_PPE1_V3;
3719 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3720 			rst_mask |= RSTCTRL_PPE2;
3721 
3722 		rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3723 	} else if (mtk_is_netsys_v2_or_greater(eth)) {
3724 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3725 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3726 			rst_mask |= RSTCTRL_PPE1;
3727 	} else {
3728 		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3729 	}
3730 
3731 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3732 
3733 	udelay(1);
3734 	val = mtk_hw_reset_read(eth);
3735 	if (!(val & rst_mask))
3736 		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3737 			val, rst_mask);
3738 
3739 	rst_mask |= RSTCTRL_FE;
3740 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3741 
3742 	udelay(1);
3743 	val = mtk_hw_reset_read(eth);
3744 	if (val & rst_mask)
3745 		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3746 			val, rst_mask);
3747 }
3748 
3749 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3750 {
3751 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3752 	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3753 	bool oq_hang, cdm1_busy, adma_busy;
3754 	bool wtx_busy, cdm_full, oq_free;
3755 	u32 wdidx, val, gdm1_fc, gdm2_fc;
3756 	bool qfsm_hang, qfwd_hang;
3757 	bool ret = false;
3758 
3759 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3760 		return false;
3761 
3762 	/* WDMA sanity checks */
3763 	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3764 
3765 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3766 	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3767 
3768 	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3769 	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3770 
3771 	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3772 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3773 		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3774 
3775 	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3776 		if (++eth->reset.wdma_hang_count > 2) {
3777 			eth->reset.wdma_hang_count = 0;
3778 			ret = true;
3779 		}
3780 		goto out;
3781 	}
3782 
3783 	/* QDMA sanity checks */
3784 	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3785 	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3786 
3787 	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3788 	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3789 	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3790 	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3791 	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3792 	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3793 
3794 	if (qfsm_hang && qfwd_hang &&
3795 	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3796 	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3797 		if (++eth->reset.qdma_hang_count > 2) {
3798 			eth->reset.qdma_hang_count = 0;
3799 			ret = true;
3800 		}
3801 		goto out;
3802 	}
3803 
3804 	/* ADMA sanity checks */
3805 	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3806 	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3807 	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3808 		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3809 
3810 	if (oq_hang && cdm1_busy && adma_busy) {
3811 		if (++eth->reset.adma_hang_count > 2) {
3812 			eth->reset.adma_hang_count = 0;
3813 			ret = true;
3814 		}
3815 		goto out;
3816 	}
3817 
3818 	eth->reset.wdma_hang_count = 0;
3819 	eth->reset.qdma_hang_count = 0;
3820 	eth->reset.adma_hang_count = 0;
3821 out:
3822 	eth->reset.wdidx = wdidx;
3823 
3824 	return ret;
3825 }
3826 
3827 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3828 {
3829 	struct delayed_work *del_work = to_delayed_work(work);
3830 	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3831 					   reset.monitor_work);
3832 
3833 	if (test_bit(MTK_RESETTING, &eth->state))
3834 		goto out;
3835 
3836 	/* DMA stuck checks */
3837 	if (mtk_hw_check_dma_hang(eth))
3838 		schedule_work(&eth->pending_work);
3839 
3840 out:
3841 	schedule_delayed_work(&eth->reset.monitor_work,
3842 			      MTK_DMA_MONITOR_TIMEOUT);
3843 }
3844 
3845 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3846 {
3847 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3848 		       ETHSYS_DMA_AG_MAP_PPE;
3849 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3850 	int i, val, ret;
3851 
3852 	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
3853 		return 0;
3854 
3855 	if (!reset) {
3856 		pm_runtime_enable(eth->dev);
3857 		pm_runtime_get_sync(eth->dev);
3858 
3859 		ret = mtk_clk_enable(eth);
3860 		if (ret)
3861 			goto err_disable_pm;
3862 	}
3863 
3864 	if (eth->ethsys)
3865 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3866 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3867 
3868 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3869 		ret = device_reset(eth->dev);
3870 		if (ret) {
3871 			dev_err(eth->dev, "MAC reset failed!\n");
3872 			goto err_disable_pm;
3873 		}
3874 
3875 		/* set interrupt delays based on current Net DIM sample */
3876 		mtk_dim_rx(&eth->rx_dim.work);
3877 		mtk_dim_tx(&eth->tx_dim.work);
3878 
3879 		/* disable delay and normal interrupt */
3880 		mtk_tx_irq_disable(eth, ~0);
3881 		mtk_rx_irq_disable(eth, ~0);
3882 
3883 		return 0;
3884 	}
3885 
3886 	msleep(100);
3887 
3888 	if (reset)
3889 		mtk_hw_warm_reset(eth);
3890 	else
3891 		mtk_hw_reset(eth);
3892 
3893 	if (mtk_is_netsys_v2_or_greater(eth)) {
3894 		/* Set FE to PDMAv2 if necessary */
3895 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
3896 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
3897 	}
3898 
3899 	if (eth->pctl) {
3900 		/* Set GE2 driving and slew rate */
3901 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3902 
3903 		/* set GE2 TDSEL */
3904 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3905 
3906 		/* set GE2 TUNE */
3907 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3908 	}
3909 
3910 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
3911 	 * up with the more appropriate value when mtk_mac_config call is being
3912 	 * invoked.
3913 	 */
3914 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3915 		struct net_device *dev = eth->netdev[i];
3916 
3917 		if (!dev)
3918 			continue;
3919 
3920 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3921 		mtk_set_mcr_max_rx(netdev_priv(dev),
3922 				   dev->mtu + MTK_RX_ETH_HLEN);
3923 	}
3924 
3925 	/* Indicates CDM to parse the MTK special tag from CPU
3926 	 * which also is working out for untag packets.
3927 	 */
3928 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3929 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3930 	if (mtk_is_netsys_v1(eth)) {
3931 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3932 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3933 
3934 		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3935 	}
3936 
3937 	/* set interrupt delays based on current Net DIM sample */
3938 	mtk_dim_rx(&eth->rx_dim.work);
3939 	mtk_dim_tx(&eth->tx_dim.work);
3940 
3941 	/* disable delay and normal interrupt */
3942 	mtk_tx_irq_disable(eth, ~0);
3943 	mtk_rx_irq_disable(eth, ~0);
3944 
3945 	/* FE int grouping */
3946 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3947 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3948 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3949 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3950 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3951 
3952 	if (mtk_is_netsys_v3_or_greater(eth)) {
3953 		/* PSE should not drop port1, port8 and port9 packets */
3954 		mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3955 
3956 		/* GDM and CDM Threshold */
3957 		mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3958 		mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3959 
3960 		/* Disable GDM1 RX CRC stripping */
3961 		mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
3962 
3963 		/* PSE GDM3 MIB counter has incorrect hw default values,
3964 		 * so the driver ought to read clear the values beforehand
3965 		 * in case ethtool retrieve wrong mib values.
3966 		 */
3967 		for (i = 0; i < 0x80; i += 0x4)
3968 			mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
3969 	} else if (!mtk_is_netsys_v1(eth)) {
3970 		/* PSE should not drop port8 and port9 packets from WDMA Tx */
3971 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3972 
3973 		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3974 		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3975 
3976 		/* PSE Free Queue Flow Control  */
3977 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3978 
3979 		/* PSE config input queue threshold */
3980 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3981 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3982 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3983 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3984 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3985 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3986 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3987 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3988 
3989 		/* PSE config output queue threshold */
3990 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3991 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3992 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3993 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3994 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3995 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3996 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3997 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3998 
3999 		/* GDM and CDM Threshold */
4000 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4001 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4002 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4003 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4004 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4005 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4006 	}
4007 
4008 	return 0;
4009 
4010 err_disable_pm:
4011 	if (!reset) {
4012 		pm_runtime_put_sync(eth->dev);
4013 		pm_runtime_disable(eth->dev);
4014 	}
4015 
4016 	return ret;
4017 }
4018 
4019 static int mtk_hw_deinit(struct mtk_eth *eth)
4020 {
4021 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
4022 		return 0;
4023 
4024 	mtk_clk_disable(eth);
4025 
4026 	pm_runtime_put_sync(eth->dev);
4027 	pm_runtime_disable(eth->dev);
4028 
4029 	return 0;
4030 }
4031 
4032 static void mtk_uninit(struct net_device *dev)
4033 {
4034 	struct mtk_mac *mac = netdev_priv(dev);
4035 	struct mtk_eth *eth = mac->hw;
4036 
4037 	phylink_disconnect_phy(mac->phylink);
4038 	mtk_tx_irq_disable(eth, ~0);
4039 	mtk_rx_irq_disable(eth, ~0);
4040 }
4041 
4042 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4043 {
4044 	int length = new_mtu + MTK_RX_ETH_HLEN;
4045 	struct mtk_mac *mac = netdev_priv(dev);
4046 	struct mtk_eth *eth = mac->hw;
4047 
4048 	if (rcu_access_pointer(eth->prog) &&
4049 	    length > MTK_PP_MAX_BUF_SIZE) {
4050 		netdev_err(dev, "Invalid MTU for XDP mode\n");
4051 		return -EINVAL;
4052 	}
4053 
4054 	mtk_set_mcr_max_rx(mac, length);
4055 	dev->mtu = new_mtu;
4056 
4057 	return 0;
4058 }
4059 
4060 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4061 {
4062 	struct mtk_mac *mac = netdev_priv(dev);
4063 
4064 	switch (cmd) {
4065 	case SIOCGMIIPHY:
4066 	case SIOCGMIIREG:
4067 	case SIOCSMIIREG:
4068 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4069 	default:
4070 		break;
4071 	}
4072 
4073 	return -EOPNOTSUPP;
4074 }
4075 
4076 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4077 {
4078 	u32 val;
4079 	int i;
4080 
4081 	/* set FE PPE ports link down */
4082 	for (i = MTK_GMAC1_ID;
4083 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4084 	     i += 2) {
4085 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4086 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4087 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4088 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4089 			val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4090 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4091 	}
4092 
4093 	/* adjust PPE configurations to prepare for reset */
4094 	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4095 		mtk_ppe_prepare_reset(eth->ppe[i]);
4096 
4097 	/* disable NETSYS interrupts */
4098 	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4099 
4100 	/* force link down GMAC */
4101 	for (i = 0; i < 2; i++) {
4102 		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4103 		mtk_w32(eth, val, MTK_MAC_MCR(i));
4104 	}
4105 }
4106 
4107 static void mtk_pending_work(struct work_struct *work)
4108 {
4109 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4110 	unsigned long restart = 0;
4111 	u32 val;
4112 	int i;
4113 
4114 	rtnl_lock();
4115 	set_bit(MTK_RESETTING, &eth->state);
4116 
4117 	mtk_prepare_for_reset(eth);
4118 	mtk_wed_fe_reset();
4119 	/* Run again reset preliminary configuration in order to avoid any
4120 	 * possible race during FE reset since it can run releasing RTNL lock.
4121 	 */
4122 	mtk_prepare_for_reset(eth);
4123 
4124 	/* stop all devices to make sure that dma is properly shut down */
4125 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4126 		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4127 			continue;
4128 
4129 		mtk_stop(eth->netdev[i]);
4130 		__set_bit(i, &restart);
4131 	}
4132 
4133 	usleep_range(15000, 16000);
4134 
4135 	if (eth->dev->pins)
4136 		pinctrl_select_state(eth->dev->pins->p,
4137 				     eth->dev->pins->default_state);
4138 	mtk_hw_init(eth, true);
4139 
4140 	/* restart DMA and enable IRQs */
4141 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4142 		if (!eth->netdev[i] || !test_bit(i, &restart))
4143 			continue;
4144 
4145 		if (mtk_open(eth->netdev[i])) {
4146 			netif_alert(eth, ifup, eth->netdev[i],
4147 				    "Driver up/down cycle failed\n");
4148 			dev_close(eth->netdev[i]);
4149 		}
4150 	}
4151 
4152 	/* set FE PPE ports link up */
4153 	for (i = MTK_GMAC1_ID;
4154 	     i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4155 	     i += 2) {
4156 		val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4157 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4158 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4159 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4160 			val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4161 
4162 		mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4163 	}
4164 
4165 	clear_bit(MTK_RESETTING, &eth->state);
4166 
4167 	mtk_wed_fe_reset_complete();
4168 
4169 	rtnl_unlock();
4170 }
4171 
4172 static int mtk_free_dev(struct mtk_eth *eth)
4173 {
4174 	int i;
4175 
4176 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4177 		if (!eth->netdev[i])
4178 			continue;
4179 		free_netdev(eth->netdev[i]);
4180 	}
4181 
4182 	for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4183 		if (!eth->dsa_meta[i])
4184 			break;
4185 		metadata_dst_free(eth->dsa_meta[i]);
4186 	}
4187 
4188 	return 0;
4189 }
4190 
4191 static int mtk_unreg_dev(struct mtk_eth *eth)
4192 {
4193 	int i;
4194 
4195 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4196 		struct mtk_mac *mac;
4197 		if (!eth->netdev[i])
4198 			continue;
4199 		mac = netdev_priv(eth->netdev[i]);
4200 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4201 			unregister_netdevice_notifier(&mac->device_notifier);
4202 		unregister_netdev(eth->netdev[i]);
4203 	}
4204 
4205 	return 0;
4206 }
4207 
4208 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4209 {
4210 	int i;
4211 
4212 	for (i = 0; i < MTK_MAX_DEVS; i++)
4213 		mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4214 }
4215 
4216 static int mtk_cleanup(struct mtk_eth *eth)
4217 {
4218 	mtk_sgmii_destroy(eth);
4219 	mtk_unreg_dev(eth);
4220 	mtk_free_dev(eth);
4221 	cancel_work_sync(&eth->pending_work);
4222 	cancel_delayed_work_sync(&eth->reset.monitor_work);
4223 
4224 	return 0;
4225 }
4226 
4227 static int mtk_get_link_ksettings(struct net_device *ndev,
4228 				  struct ethtool_link_ksettings *cmd)
4229 {
4230 	struct mtk_mac *mac = netdev_priv(ndev);
4231 
4232 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4233 		return -EBUSY;
4234 
4235 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4236 }
4237 
4238 static int mtk_set_link_ksettings(struct net_device *ndev,
4239 				  const struct ethtool_link_ksettings *cmd)
4240 {
4241 	struct mtk_mac *mac = netdev_priv(ndev);
4242 
4243 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4244 		return -EBUSY;
4245 
4246 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4247 }
4248 
4249 static void mtk_get_drvinfo(struct net_device *dev,
4250 			    struct ethtool_drvinfo *info)
4251 {
4252 	struct mtk_mac *mac = netdev_priv(dev);
4253 
4254 	strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4255 	strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4256 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4257 }
4258 
4259 static u32 mtk_get_msglevel(struct net_device *dev)
4260 {
4261 	struct mtk_mac *mac = netdev_priv(dev);
4262 
4263 	return mac->hw->msg_enable;
4264 }
4265 
4266 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4267 {
4268 	struct mtk_mac *mac = netdev_priv(dev);
4269 
4270 	mac->hw->msg_enable = value;
4271 }
4272 
4273 static int mtk_nway_reset(struct net_device *dev)
4274 {
4275 	struct mtk_mac *mac = netdev_priv(dev);
4276 
4277 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4278 		return -EBUSY;
4279 
4280 	if (!mac->phylink)
4281 		return -ENOTSUPP;
4282 
4283 	return phylink_ethtool_nway_reset(mac->phylink);
4284 }
4285 
4286 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4287 {
4288 	int i;
4289 
4290 	switch (stringset) {
4291 	case ETH_SS_STATS: {
4292 		struct mtk_mac *mac = netdev_priv(dev);
4293 
4294 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4295 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4296 			data += ETH_GSTRING_LEN;
4297 		}
4298 		if (mtk_page_pool_enabled(mac->hw))
4299 			page_pool_ethtool_stats_get_strings(data);
4300 		break;
4301 	}
4302 	default:
4303 		break;
4304 	}
4305 }
4306 
4307 static int mtk_get_sset_count(struct net_device *dev, int sset)
4308 {
4309 	switch (sset) {
4310 	case ETH_SS_STATS: {
4311 		int count = ARRAY_SIZE(mtk_ethtool_stats);
4312 		struct mtk_mac *mac = netdev_priv(dev);
4313 
4314 		if (mtk_page_pool_enabled(mac->hw))
4315 			count += page_pool_ethtool_stats_get_count();
4316 		return count;
4317 	}
4318 	default:
4319 		return -EOPNOTSUPP;
4320 	}
4321 }
4322 
4323 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4324 {
4325 	struct page_pool_stats stats = {};
4326 	int i;
4327 
4328 	for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4329 		struct mtk_rx_ring *ring = &eth->rx_ring[i];
4330 
4331 		if (!ring->page_pool)
4332 			continue;
4333 
4334 		page_pool_get_stats(ring->page_pool, &stats);
4335 	}
4336 	page_pool_ethtool_stats_get(data, &stats);
4337 }
4338 
4339 static void mtk_get_ethtool_stats(struct net_device *dev,
4340 				  struct ethtool_stats *stats, u64 *data)
4341 {
4342 	struct mtk_mac *mac = netdev_priv(dev);
4343 	struct mtk_hw_stats *hwstats = mac->hw_stats;
4344 	u64 *data_src, *data_dst;
4345 	unsigned int start;
4346 	int i;
4347 
4348 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4349 		return;
4350 
4351 	if (netif_running(dev) && netif_device_present(dev)) {
4352 		if (spin_trylock_bh(&hwstats->stats_lock)) {
4353 			mtk_stats_update_mac(mac);
4354 			spin_unlock_bh(&hwstats->stats_lock);
4355 		}
4356 	}
4357 
4358 	data_src = (u64 *)hwstats;
4359 
4360 	do {
4361 		data_dst = data;
4362 		start = u64_stats_fetch_begin(&hwstats->syncp);
4363 
4364 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4365 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4366 		if (mtk_page_pool_enabled(mac->hw))
4367 			mtk_ethtool_pp_stats(mac->hw, data_dst);
4368 	} while (u64_stats_fetch_retry(&hwstats->syncp, start));
4369 }
4370 
4371 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4372 			 u32 *rule_locs)
4373 {
4374 	int ret = -EOPNOTSUPP;
4375 
4376 	switch (cmd->cmd) {
4377 	case ETHTOOL_GRXRINGS:
4378 		if (dev->hw_features & NETIF_F_LRO) {
4379 			cmd->data = MTK_MAX_RX_RING_NUM;
4380 			ret = 0;
4381 		}
4382 		break;
4383 	case ETHTOOL_GRXCLSRLCNT:
4384 		if (dev->hw_features & NETIF_F_LRO) {
4385 			struct mtk_mac *mac = netdev_priv(dev);
4386 
4387 			cmd->rule_cnt = mac->hwlro_ip_cnt;
4388 			ret = 0;
4389 		}
4390 		break;
4391 	case ETHTOOL_GRXCLSRULE:
4392 		if (dev->hw_features & NETIF_F_LRO)
4393 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4394 		break;
4395 	case ETHTOOL_GRXCLSRLALL:
4396 		if (dev->hw_features & NETIF_F_LRO)
4397 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
4398 						     rule_locs);
4399 		break;
4400 	default:
4401 		break;
4402 	}
4403 
4404 	return ret;
4405 }
4406 
4407 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4408 {
4409 	int ret = -EOPNOTSUPP;
4410 
4411 	switch (cmd->cmd) {
4412 	case ETHTOOL_SRXCLSRLINS:
4413 		if (dev->hw_features & NETIF_F_LRO)
4414 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
4415 		break;
4416 	case ETHTOOL_SRXCLSRLDEL:
4417 		if (dev->hw_features & NETIF_F_LRO)
4418 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
4419 		break;
4420 	default:
4421 		break;
4422 	}
4423 
4424 	return ret;
4425 }
4426 
4427 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4428 			    struct net_device *sb_dev)
4429 {
4430 	struct mtk_mac *mac = netdev_priv(dev);
4431 	unsigned int queue = 0;
4432 
4433 	if (netdev_uses_dsa(dev))
4434 		queue = skb_get_queue_mapping(skb) + 3;
4435 	else
4436 		queue = mac->id;
4437 
4438 	if (queue >= dev->num_tx_queues)
4439 		queue = 0;
4440 
4441 	return queue;
4442 }
4443 
4444 static const struct ethtool_ops mtk_ethtool_ops = {
4445 	.get_link_ksettings	= mtk_get_link_ksettings,
4446 	.set_link_ksettings	= mtk_set_link_ksettings,
4447 	.get_drvinfo		= mtk_get_drvinfo,
4448 	.get_msglevel		= mtk_get_msglevel,
4449 	.set_msglevel		= mtk_set_msglevel,
4450 	.nway_reset		= mtk_nway_reset,
4451 	.get_link		= ethtool_op_get_link,
4452 	.get_strings		= mtk_get_strings,
4453 	.get_sset_count		= mtk_get_sset_count,
4454 	.get_ethtool_stats	= mtk_get_ethtool_stats,
4455 	.get_rxnfc		= mtk_get_rxnfc,
4456 	.set_rxnfc              = mtk_set_rxnfc,
4457 };
4458 
4459 static const struct net_device_ops mtk_netdev_ops = {
4460 	.ndo_uninit		= mtk_uninit,
4461 	.ndo_open		= mtk_open,
4462 	.ndo_stop		= mtk_stop,
4463 	.ndo_start_xmit		= mtk_start_xmit,
4464 	.ndo_set_mac_address	= mtk_set_mac_address,
4465 	.ndo_validate_addr	= eth_validate_addr,
4466 	.ndo_eth_ioctl		= mtk_do_ioctl,
4467 	.ndo_change_mtu		= mtk_change_mtu,
4468 	.ndo_tx_timeout		= mtk_tx_timeout,
4469 	.ndo_get_stats64        = mtk_get_stats64,
4470 	.ndo_fix_features	= mtk_fix_features,
4471 	.ndo_set_features	= mtk_set_features,
4472 #ifdef CONFIG_NET_POLL_CONTROLLER
4473 	.ndo_poll_controller	= mtk_poll_controller,
4474 #endif
4475 	.ndo_setup_tc		= mtk_eth_setup_tc,
4476 	.ndo_bpf		= mtk_xdp,
4477 	.ndo_xdp_xmit		= mtk_xdp_xmit,
4478 	.ndo_select_queue	= mtk_select_queue,
4479 };
4480 
4481 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4482 {
4483 	const __be32 *_id = of_get_property(np, "reg", NULL);
4484 	phy_interface_t phy_mode;
4485 	struct phylink *phylink;
4486 	struct mtk_mac *mac;
4487 	int id, err;
4488 	int txqs = 1;
4489 	u32 val;
4490 
4491 	if (!_id) {
4492 		dev_err(eth->dev, "missing mac id\n");
4493 		return -EINVAL;
4494 	}
4495 
4496 	id = be32_to_cpup(_id);
4497 	if (id >= MTK_MAX_DEVS) {
4498 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
4499 		return -EINVAL;
4500 	}
4501 
4502 	if (eth->netdev[id]) {
4503 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4504 		return -EINVAL;
4505 	}
4506 
4507 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4508 		txqs = MTK_QDMA_NUM_QUEUES;
4509 
4510 	eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4511 	if (!eth->netdev[id]) {
4512 		dev_err(eth->dev, "alloc_etherdev failed\n");
4513 		return -ENOMEM;
4514 	}
4515 	mac = netdev_priv(eth->netdev[id]);
4516 	eth->mac[id] = mac;
4517 	mac->id = id;
4518 	mac->hw = eth;
4519 	mac->of_node = np;
4520 
4521 	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4522 	if (err == -EPROBE_DEFER)
4523 		return err;
4524 
4525 	if (err) {
4526 		/* If the mac address is invalid, use random mac address */
4527 		eth_hw_addr_random(eth->netdev[id]);
4528 		dev_err(eth->dev, "generated random MAC address %pM\n",
4529 			eth->netdev[id]->dev_addr);
4530 	}
4531 
4532 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4533 	mac->hwlro_ip_cnt = 0;
4534 
4535 	mac->hw_stats = devm_kzalloc(eth->dev,
4536 				     sizeof(*mac->hw_stats),
4537 				     GFP_KERNEL);
4538 	if (!mac->hw_stats) {
4539 		dev_err(eth->dev, "failed to allocate counter memory\n");
4540 		err = -ENOMEM;
4541 		goto free_netdev;
4542 	}
4543 	spin_lock_init(&mac->hw_stats->stats_lock);
4544 	u64_stats_init(&mac->hw_stats->syncp);
4545 
4546 	if (mtk_is_netsys_v3_or_greater(eth))
4547 		mac->hw_stats->reg_offset = id * 0x80;
4548 	else
4549 		mac->hw_stats->reg_offset = id * 0x40;
4550 
4551 	/* phylink create */
4552 	err = of_get_phy_mode(np, &phy_mode);
4553 	if (err) {
4554 		dev_err(eth->dev, "incorrect phy-mode\n");
4555 		goto free_netdev;
4556 	}
4557 
4558 	/* mac config is not set */
4559 	mac->interface = PHY_INTERFACE_MODE_NA;
4560 	mac->speed = SPEED_UNKNOWN;
4561 
4562 	mac->phylink_config.dev = &eth->netdev[id]->dev;
4563 	mac->phylink_config.type = PHYLINK_NETDEV;
4564 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4565 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4566 
4567 	/* MT7623 gmac0 is now missing its speed-specific PLL configuration
4568 	 * in its .mac_config method (since state->speed is not valid there.
4569 	 * Disable support for MII, GMII and RGMII.
4570 	 */
4571 	if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4572 		__set_bit(PHY_INTERFACE_MODE_MII,
4573 			  mac->phylink_config.supported_interfaces);
4574 		__set_bit(PHY_INTERFACE_MODE_GMII,
4575 			  mac->phylink_config.supported_interfaces);
4576 
4577 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4578 			phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4579 	}
4580 
4581 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4582 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
4583 			  mac->phylink_config.supported_interfaces);
4584 
4585 	/* TRGMII is not permitted on MT7621 if using DDR2 */
4586 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4587 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4588 		regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4589 		if (val & SYSCFG_DRAM_TYPE_DDR2)
4590 			__clear_bit(PHY_INTERFACE_MODE_TRGMII,
4591 				    mac->phylink_config.supported_interfaces);
4592 	}
4593 
4594 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4595 		__set_bit(PHY_INTERFACE_MODE_SGMII,
4596 			  mac->phylink_config.supported_interfaces);
4597 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
4598 			  mac->phylink_config.supported_interfaces);
4599 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
4600 			  mac->phylink_config.supported_interfaces);
4601 	}
4602 
4603 	if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4604 	    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4605 	    id == MTK_GMAC1_ID) {
4606 		mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4607 						       MAC_SYM_PAUSE |
4608 						       MAC_10000FD;
4609 		phy_interface_zero(mac->phylink_config.supported_interfaces);
4610 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
4611 			  mac->phylink_config.supported_interfaces);
4612 	}
4613 
4614 	phylink = phylink_create(&mac->phylink_config,
4615 				 of_fwnode_handle(mac->of_node),
4616 				 phy_mode, &mtk_phylink_ops);
4617 	if (IS_ERR(phylink)) {
4618 		err = PTR_ERR(phylink);
4619 		goto free_netdev;
4620 	}
4621 
4622 	mac->phylink = phylink;
4623 
4624 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4625 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
4626 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4627 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
4628 
4629 	eth->netdev[id]->hw_features = eth->soc->hw_features;
4630 	if (eth->hwlro)
4631 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
4632 
4633 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
4634 		~NETIF_F_HW_VLAN_CTAG_TX;
4635 	eth->netdev[id]->features |= eth->soc->hw_features;
4636 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4637 
4638 	eth->netdev[id]->irq = eth->irq[0];
4639 	eth->netdev[id]->dev.of_node = np;
4640 
4641 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4642 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4643 	else
4644 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4645 
4646 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4647 		mac->device_notifier.notifier_call = mtk_device_event;
4648 		register_netdevice_notifier(&mac->device_notifier);
4649 	}
4650 
4651 	if (mtk_page_pool_enabled(eth))
4652 		eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4653 						NETDEV_XDP_ACT_REDIRECT |
4654 						NETDEV_XDP_ACT_NDO_XMIT |
4655 						NETDEV_XDP_ACT_NDO_XMIT_SG;
4656 
4657 	return 0;
4658 
4659 free_netdev:
4660 	free_netdev(eth->netdev[id]);
4661 	return err;
4662 }
4663 
4664 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4665 {
4666 	struct net_device *dev, *tmp;
4667 	LIST_HEAD(dev_list);
4668 	int i;
4669 
4670 	rtnl_lock();
4671 
4672 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4673 		dev = eth->netdev[i];
4674 
4675 		if (!dev || !(dev->flags & IFF_UP))
4676 			continue;
4677 
4678 		list_add_tail(&dev->close_list, &dev_list);
4679 	}
4680 
4681 	dev_close_many(&dev_list, false);
4682 
4683 	eth->dma_dev = dma_dev;
4684 
4685 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4686 		list_del_init(&dev->close_list);
4687 		dev_open(dev, NULL);
4688 	}
4689 
4690 	rtnl_unlock();
4691 }
4692 
4693 static int mtk_sgmii_init(struct mtk_eth *eth)
4694 {
4695 	struct device_node *np;
4696 	struct regmap *regmap;
4697 	u32 flags;
4698 	int i;
4699 
4700 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4701 		np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4702 		if (!np)
4703 			break;
4704 
4705 		regmap = syscon_node_to_regmap(np);
4706 		flags = 0;
4707 		if (of_property_read_bool(np, "mediatek,pnswap"))
4708 			flags |= MTK_SGMII_FLAG_PN_SWAP;
4709 
4710 		of_node_put(np);
4711 
4712 		if (IS_ERR(regmap))
4713 			return PTR_ERR(regmap);
4714 
4715 		eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4716 							 eth->soc->ana_rgc3,
4717 							 flags);
4718 	}
4719 
4720 	return 0;
4721 }
4722 
4723 static int mtk_probe(struct platform_device *pdev)
4724 {
4725 	struct resource *res = NULL, *res_sram;
4726 	struct device_node *mac_np;
4727 	struct mtk_eth *eth;
4728 	int err, i;
4729 
4730 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4731 	if (!eth)
4732 		return -ENOMEM;
4733 
4734 	eth->soc = of_device_get_match_data(&pdev->dev);
4735 
4736 	eth->dev = &pdev->dev;
4737 	eth->dma_dev = &pdev->dev;
4738 	eth->base = devm_platform_ioremap_resource(pdev, 0);
4739 	if (IS_ERR(eth->base))
4740 		return PTR_ERR(eth->base);
4741 
4742 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4743 		eth->ip_align = NET_IP_ALIGN;
4744 
4745 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4746 		/* SRAM is actual memory and supports transparent access just like DRAM.
4747 		 * Hence we don't require __iomem being set and don't need to use accessor
4748 		 * functions to read from or write to SRAM.
4749 		 */
4750 		if (mtk_is_netsys_v3_or_greater(eth)) {
4751 			eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4752 			if (IS_ERR(eth->sram_base))
4753 				return PTR_ERR(eth->sram_base);
4754 		} else {
4755 			eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4756 		}
4757 	}
4758 
4759 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4760 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4761 		if (!err)
4762 			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4763 
4764 		if (err) {
4765 			dev_err(&pdev->dev, "Wrong DMA config\n");
4766 			return -EINVAL;
4767 		}
4768 	}
4769 
4770 	spin_lock_init(&eth->page_lock);
4771 	spin_lock_init(&eth->tx_irq_lock);
4772 	spin_lock_init(&eth->rx_irq_lock);
4773 	spin_lock_init(&eth->dim_lock);
4774 
4775 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4776 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
4777 	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
4778 
4779 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4780 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
4781 
4782 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4783 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4784 							      "mediatek,ethsys");
4785 		if (IS_ERR(eth->ethsys)) {
4786 			dev_err(&pdev->dev, "no ethsys regmap found\n");
4787 			return PTR_ERR(eth->ethsys);
4788 		}
4789 	}
4790 
4791 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4792 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4793 							     "mediatek,infracfg");
4794 		if (IS_ERR(eth->infra)) {
4795 			dev_err(&pdev->dev, "no infracfg regmap found\n");
4796 			return PTR_ERR(eth->infra);
4797 		}
4798 	}
4799 
4800 	if (of_dma_is_coherent(pdev->dev.of_node)) {
4801 		struct regmap *cci;
4802 
4803 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4804 						      "cci-control-port");
4805 		/* enable CPU/bus coherency */
4806 		if (!IS_ERR(cci))
4807 			regmap_write(cci, 0, 3);
4808 	}
4809 
4810 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4811 		err = mtk_sgmii_init(eth);
4812 
4813 		if (err)
4814 			return err;
4815 	}
4816 
4817 	if (eth->soc->required_pctl) {
4818 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4819 							    "mediatek,pctl");
4820 		if (IS_ERR(eth->pctl)) {
4821 			dev_err(&pdev->dev, "no pctl regmap found\n");
4822 			err = PTR_ERR(eth->pctl);
4823 			goto err_destroy_sgmii;
4824 		}
4825 	}
4826 
4827 	if (mtk_is_netsys_v2_or_greater(eth)) {
4828 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4829 		if (!res) {
4830 			err = -EINVAL;
4831 			goto err_destroy_sgmii;
4832 		}
4833 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4834 			if (mtk_is_netsys_v3_or_greater(eth)) {
4835 				res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4836 				if (!res_sram) {
4837 					err = -EINVAL;
4838 					goto err_destroy_sgmii;
4839 				}
4840 				eth->phy_scratch_ring = res_sram->start;
4841 			} else {
4842 				eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4843 			}
4844 		}
4845 	}
4846 
4847 	if (eth->soc->offload_version) {
4848 		for (i = 0;; i++) {
4849 			struct device_node *np;
4850 			phys_addr_t wdma_phy;
4851 			u32 wdma_base;
4852 
4853 			if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4854 				break;
4855 
4856 			np = of_parse_phandle(pdev->dev.of_node,
4857 					      "mediatek,wed", i);
4858 			if (!np)
4859 				break;
4860 
4861 			wdma_base = eth->soc->reg_map->wdma_base[i];
4862 			wdma_phy = res ? res->start + wdma_base : 0;
4863 			mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4864 				       wdma_phy, i);
4865 		}
4866 	}
4867 
4868 	for (i = 0; i < 3; i++) {
4869 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4870 			eth->irq[i] = eth->irq[0];
4871 		else
4872 			eth->irq[i] = platform_get_irq(pdev, i);
4873 		if (eth->irq[i] < 0) {
4874 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4875 			err = -ENXIO;
4876 			goto err_wed_exit;
4877 		}
4878 	}
4879 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4880 		eth->clks[i] = devm_clk_get(eth->dev,
4881 					    mtk_clks_source_name[i]);
4882 		if (IS_ERR(eth->clks[i])) {
4883 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4884 				err = -EPROBE_DEFER;
4885 				goto err_wed_exit;
4886 			}
4887 			if (eth->soc->required_clks & BIT(i)) {
4888 				dev_err(&pdev->dev, "clock %s not found\n",
4889 					mtk_clks_source_name[i]);
4890 				err = -EINVAL;
4891 				goto err_wed_exit;
4892 			}
4893 			eth->clks[i] = NULL;
4894 		}
4895 	}
4896 
4897 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4898 	INIT_WORK(&eth->pending_work, mtk_pending_work);
4899 
4900 	err = mtk_hw_init(eth, false);
4901 	if (err)
4902 		goto err_wed_exit;
4903 
4904 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4905 
4906 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
4907 		if (!of_device_is_compatible(mac_np,
4908 					     "mediatek,eth-mac"))
4909 			continue;
4910 
4911 		if (!of_device_is_available(mac_np))
4912 			continue;
4913 
4914 		err = mtk_add_mac(eth, mac_np);
4915 		if (err) {
4916 			of_node_put(mac_np);
4917 			goto err_deinit_hw;
4918 		}
4919 	}
4920 
4921 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4922 		err = devm_request_irq(eth->dev, eth->irq[0],
4923 				       mtk_handle_irq, 0,
4924 				       dev_name(eth->dev), eth);
4925 	} else {
4926 		err = devm_request_irq(eth->dev, eth->irq[1],
4927 				       mtk_handle_irq_tx, 0,
4928 				       dev_name(eth->dev), eth);
4929 		if (err)
4930 			goto err_free_dev;
4931 
4932 		err = devm_request_irq(eth->dev, eth->irq[2],
4933 				       mtk_handle_irq_rx, 0,
4934 				       dev_name(eth->dev), eth);
4935 	}
4936 	if (err)
4937 		goto err_free_dev;
4938 
4939 	/* No MT7628/88 support yet */
4940 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4941 		err = mtk_mdio_init(eth);
4942 		if (err)
4943 			goto err_free_dev;
4944 	}
4945 
4946 	if (eth->soc->offload_version) {
4947 		u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
4948 
4949 		num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4950 		for (i = 0; i < num_ppe; i++) {
4951 			u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4952 
4953 			eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
4954 
4955 			if (!eth->ppe[i]) {
4956 				err = -ENOMEM;
4957 				goto err_deinit_ppe;
4958 			}
4959 		}
4960 
4961 		err = mtk_eth_offload_init(eth);
4962 		if (err)
4963 			goto err_deinit_ppe;
4964 	}
4965 
4966 	for (i = 0; i < MTK_MAX_DEVS; i++) {
4967 		if (!eth->netdev[i])
4968 			continue;
4969 
4970 		err = register_netdev(eth->netdev[i]);
4971 		if (err) {
4972 			dev_err(eth->dev, "error bringing up device\n");
4973 			goto err_deinit_ppe;
4974 		} else
4975 			netif_info(eth, probe, eth->netdev[i],
4976 				   "mediatek frame engine at 0x%08lx, irq %d\n",
4977 				   eth->netdev[i]->base_addr, eth->irq[0]);
4978 	}
4979 
4980 	/* we run 2 devices on the same DMA ring so we need a dummy device
4981 	 * for NAPI to work
4982 	 */
4983 	init_dummy_netdev(&eth->dummy_dev);
4984 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
4985 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
4986 
4987 	platform_set_drvdata(pdev, eth);
4988 	schedule_delayed_work(&eth->reset.monitor_work,
4989 			      MTK_DMA_MONITOR_TIMEOUT);
4990 
4991 	return 0;
4992 
4993 err_deinit_ppe:
4994 	mtk_ppe_deinit(eth);
4995 	mtk_mdio_cleanup(eth);
4996 err_free_dev:
4997 	mtk_free_dev(eth);
4998 err_deinit_hw:
4999 	mtk_hw_deinit(eth);
5000 err_wed_exit:
5001 	mtk_wed_exit();
5002 err_destroy_sgmii:
5003 	mtk_sgmii_destroy(eth);
5004 
5005 	return err;
5006 }
5007 
5008 static int mtk_remove(struct platform_device *pdev)
5009 {
5010 	struct mtk_eth *eth = platform_get_drvdata(pdev);
5011 	struct mtk_mac *mac;
5012 	int i;
5013 
5014 	/* stop all devices to make sure that dma is properly shut down */
5015 	for (i = 0; i < MTK_MAX_DEVS; i++) {
5016 		if (!eth->netdev[i])
5017 			continue;
5018 		mtk_stop(eth->netdev[i]);
5019 		mac = netdev_priv(eth->netdev[i]);
5020 		phylink_disconnect_phy(mac->phylink);
5021 	}
5022 
5023 	mtk_wed_exit();
5024 	mtk_hw_deinit(eth);
5025 
5026 	netif_napi_del(&eth->tx_napi);
5027 	netif_napi_del(&eth->rx_napi);
5028 	mtk_cleanup(eth);
5029 	mtk_mdio_cleanup(eth);
5030 
5031 	return 0;
5032 }
5033 
5034 static const struct mtk_soc_data mt2701_data = {
5035 	.reg_map = &mtk_reg_map,
5036 	.caps = MT7623_CAPS | MTK_HWLRO,
5037 	.hw_features = MTK_HW_FEATURES,
5038 	.required_clks = MT7623_CLKS_BITMAP,
5039 	.required_pctl = true,
5040 	.version = 1,
5041 	.txrx = {
5042 		.txd_size = sizeof(struct mtk_tx_dma),
5043 		.rxd_size = sizeof(struct mtk_rx_dma),
5044 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5045 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5046 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5047 		.dma_len_offset = 16,
5048 	},
5049 };
5050 
5051 static const struct mtk_soc_data mt7621_data = {
5052 	.reg_map = &mtk_reg_map,
5053 	.caps = MT7621_CAPS,
5054 	.hw_features = MTK_HW_FEATURES,
5055 	.required_clks = MT7621_CLKS_BITMAP,
5056 	.required_pctl = false,
5057 	.version = 1,
5058 	.offload_version = 1,
5059 	.hash_offset = 2,
5060 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5061 	.txrx = {
5062 		.txd_size = sizeof(struct mtk_tx_dma),
5063 		.rxd_size = sizeof(struct mtk_rx_dma),
5064 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5065 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5066 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5067 		.dma_len_offset = 16,
5068 	},
5069 };
5070 
5071 static const struct mtk_soc_data mt7622_data = {
5072 	.reg_map = &mtk_reg_map,
5073 	.ana_rgc3 = 0x2028,
5074 	.caps = MT7622_CAPS | MTK_HWLRO,
5075 	.hw_features = MTK_HW_FEATURES,
5076 	.required_clks = MT7622_CLKS_BITMAP,
5077 	.required_pctl = false,
5078 	.version = 1,
5079 	.offload_version = 2,
5080 	.hash_offset = 2,
5081 	.has_accounting = true,
5082 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5083 	.txrx = {
5084 		.txd_size = sizeof(struct mtk_tx_dma),
5085 		.rxd_size = sizeof(struct mtk_rx_dma),
5086 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5087 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5088 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5089 		.dma_len_offset = 16,
5090 	},
5091 };
5092 
5093 static const struct mtk_soc_data mt7623_data = {
5094 	.reg_map = &mtk_reg_map,
5095 	.caps = MT7623_CAPS | MTK_HWLRO,
5096 	.hw_features = MTK_HW_FEATURES,
5097 	.required_clks = MT7623_CLKS_BITMAP,
5098 	.required_pctl = true,
5099 	.version = 1,
5100 	.offload_version = 1,
5101 	.hash_offset = 2,
5102 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5103 	.disable_pll_modes = true,
5104 	.txrx = {
5105 		.txd_size = sizeof(struct mtk_tx_dma),
5106 		.rxd_size = sizeof(struct mtk_rx_dma),
5107 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5108 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5109 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5110 		.dma_len_offset = 16,
5111 	},
5112 };
5113 
5114 static const struct mtk_soc_data mt7629_data = {
5115 	.reg_map = &mtk_reg_map,
5116 	.ana_rgc3 = 0x128,
5117 	.caps = MT7629_CAPS | MTK_HWLRO,
5118 	.hw_features = MTK_HW_FEATURES,
5119 	.required_clks = MT7629_CLKS_BITMAP,
5120 	.required_pctl = false,
5121 	.has_accounting = true,
5122 	.version = 1,
5123 	.txrx = {
5124 		.txd_size = sizeof(struct mtk_tx_dma),
5125 		.rxd_size = sizeof(struct mtk_rx_dma),
5126 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5127 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
5128 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5129 		.dma_len_offset = 16,
5130 	},
5131 };
5132 
5133 static const struct mtk_soc_data mt7981_data = {
5134 	.reg_map = &mt7986_reg_map,
5135 	.ana_rgc3 = 0x128,
5136 	.caps = MT7981_CAPS,
5137 	.hw_features = MTK_HW_FEATURES,
5138 	.required_clks = MT7981_CLKS_BITMAP,
5139 	.required_pctl = false,
5140 	.version = 2,
5141 	.offload_version = 2,
5142 	.hash_offset = 4,
5143 	.has_accounting = true,
5144 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5145 	.txrx = {
5146 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5147 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5148 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5149 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5150 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5151 		.dma_len_offset = 8,
5152 	},
5153 };
5154 
5155 static const struct mtk_soc_data mt7986_data = {
5156 	.reg_map = &mt7986_reg_map,
5157 	.ana_rgc3 = 0x128,
5158 	.caps = MT7986_CAPS,
5159 	.hw_features = MTK_HW_FEATURES,
5160 	.required_clks = MT7986_CLKS_BITMAP,
5161 	.required_pctl = false,
5162 	.version = 2,
5163 	.offload_version = 2,
5164 	.hash_offset = 4,
5165 	.has_accounting = true,
5166 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5167 	.txrx = {
5168 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5169 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5170 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5171 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5172 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5173 		.dma_len_offset = 8,
5174 	},
5175 };
5176 
5177 static const struct mtk_soc_data mt7988_data = {
5178 	.reg_map = &mt7988_reg_map,
5179 	.ana_rgc3 = 0x128,
5180 	.caps = MT7988_CAPS,
5181 	.hw_features = MTK_HW_FEATURES,
5182 	.required_clks = MT7988_CLKS_BITMAP,
5183 	.required_pctl = false,
5184 	.version = 3,
5185 	.offload_version = 2,
5186 	.hash_offset = 4,
5187 	.has_accounting = true,
5188 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5189 	.txrx = {
5190 		.txd_size = sizeof(struct mtk_tx_dma_v2),
5191 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
5192 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
5193 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
5194 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5195 		.dma_len_offset = 8,
5196 	},
5197 };
5198 
5199 static const struct mtk_soc_data rt5350_data = {
5200 	.reg_map = &mt7628_reg_map,
5201 	.caps = MT7628_CAPS,
5202 	.hw_features = MTK_HW_FEATURES_MT7628,
5203 	.required_clks = MT7628_CLKS_BITMAP,
5204 	.required_pctl = false,
5205 	.version = 1,
5206 	.txrx = {
5207 		.txd_size = sizeof(struct mtk_tx_dma),
5208 		.rxd_size = sizeof(struct mtk_rx_dma),
5209 		.rx_irq_done_mask = MTK_RX_DONE_INT,
5210 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5211 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
5212 		.dma_len_offset = 16,
5213 	},
5214 };
5215 
5216 const struct of_device_id of_mtk_match[] = {
5217 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5218 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5219 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5220 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5221 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5222 	{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5223 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5224 	{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5225 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5226 	{},
5227 };
5228 MODULE_DEVICE_TABLE(of, of_mtk_match);
5229 
5230 static struct platform_driver mtk_driver = {
5231 	.probe = mtk_probe,
5232 	.remove = mtk_remove,
5233 	.driver = {
5234 		.name = "mtk_soc_eth",
5235 		.of_match_table = of_mtk_match,
5236 	},
5237 };
5238 
5239 module_platform_driver(mtk_driver);
5240 
5241 MODULE_LICENSE("GPL");
5242 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5243 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5244