1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
25 #include <net/dsa.h>
26 
27 #include "mtk_eth_soc.h"
28 #include "mtk_wed.h"
29 
30 static int mtk_msg_level = -1;
31 module_param_named(msg_level, mtk_msg_level, int, 0);
32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
33 
34 #define MTK_ETHTOOL_STAT(x) { #x, \
35 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
36 
37 static const struct mtk_reg_map mtk_reg_map = {
38 	.tx_irq_mask		= 0x1a1c,
39 	.tx_irq_status		= 0x1a18,
40 	.pdma = {
41 		.rx_ptr		= 0x0900,
42 		.rx_cnt_cfg	= 0x0904,
43 		.pcrx_ptr	= 0x0908,
44 		.glo_cfg	= 0x0a04,
45 		.rst_idx	= 0x0a08,
46 		.delay_irq	= 0x0a0c,
47 		.irq_status	= 0x0a20,
48 		.irq_mask	= 0x0a28,
49 		.int_grp	= 0x0a50,
50 	},
51 	.qdma = {
52 		.qtx_cfg	= 0x1800,
53 		.rx_ptr		= 0x1900,
54 		.rx_cnt_cfg	= 0x1904,
55 		.qcrx_ptr	= 0x1908,
56 		.glo_cfg	= 0x1a04,
57 		.rst_idx	= 0x1a08,
58 		.delay_irq	= 0x1a0c,
59 		.fc_th		= 0x1a10,
60 		.int_grp	= 0x1a20,
61 		.hred		= 0x1a44,
62 		.ctx_ptr	= 0x1b00,
63 		.dtx_ptr	= 0x1b04,
64 		.crx_ptr	= 0x1b10,
65 		.drx_ptr	= 0x1b14,
66 		.fq_head	= 0x1b20,
67 		.fq_tail	= 0x1b24,
68 		.fq_count	= 0x1b28,
69 		.fq_blen	= 0x1b2c,
70 	},
71 	.gdm1_cnt		= 0x2400,
72 };
73 
74 static const struct mtk_reg_map mt7628_reg_map = {
75 	.tx_irq_mask		= 0x0a28,
76 	.tx_irq_status		= 0x0a20,
77 	.pdma = {
78 		.rx_ptr		= 0x0900,
79 		.rx_cnt_cfg	= 0x0904,
80 		.pcrx_ptr	= 0x0908,
81 		.glo_cfg	= 0x0a04,
82 		.rst_idx	= 0x0a08,
83 		.delay_irq	= 0x0a0c,
84 		.irq_status	= 0x0a20,
85 		.irq_mask	= 0x0a28,
86 		.int_grp	= 0x0a50,
87 	},
88 };
89 
90 static const struct mtk_reg_map mt7986_reg_map = {
91 	.tx_irq_mask		= 0x461c,
92 	.tx_irq_status		= 0x4618,
93 	.pdma = {
94 		.rx_ptr		= 0x6100,
95 		.rx_cnt_cfg	= 0x6104,
96 		.pcrx_ptr	= 0x6108,
97 		.glo_cfg	= 0x6204,
98 		.rst_idx	= 0x6208,
99 		.delay_irq	= 0x620c,
100 		.irq_status	= 0x6220,
101 		.irq_mask	= 0x6228,
102 		.int_grp	= 0x6250,
103 	},
104 	.qdma = {
105 		.qtx_cfg	= 0x4400,
106 		.rx_ptr		= 0x4500,
107 		.rx_cnt_cfg	= 0x4504,
108 		.qcrx_ptr	= 0x4508,
109 		.glo_cfg	= 0x4604,
110 		.rst_idx	= 0x4608,
111 		.delay_irq	= 0x460c,
112 		.fc_th		= 0x4610,
113 		.int_grp	= 0x4620,
114 		.hred		= 0x4644,
115 		.ctx_ptr	= 0x4700,
116 		.dtx_ptr	= 0x4704,
117 		.crx_ptr	= 0x4710,
118 		.drx_ptr	= 0x4714,
119 		.fq_head	= 0x4720,
120 		.fq_tail	= 0x4724,
121 		.fq_count	= 0x4728,
122 		.fq_blen	= 0x472c,
123 	},
124 	.gdm1_cnt		= 0x1c00,
125 };
126 
127 /* strings used by ethtool */
128 static const struct mtk_ethtool_stats {
129 	char str[ETH_GSTRING_LEN];
130 	u32 offset;
131 } mtk_ethtool_stats[] = {
132 	MTK_ETHTOOL_STAT(tx_bytes),
133 	MTK_ETHTOOL_STAT(tx_packets),
134 	MTK_ETHTOOL_STAT(tx_skip),
135 	MTK_ETHTOOL_STAT(tx_collisions),
136 	MTK_ETHTOOL_STAT(rx_bytes),
137 	MTK_ETHTOOL_STAT(rx_packets),
138 	MTK_ETHTOOL_STAT(rx_overflow),
139 	MTK_ETHTOOL_STAT(rx_fcs_errors),
140 	MTK_ETHTOOL_STAT(rx_short_errors),
141 	MTK_ETHTOOL_STAT(rx_long_errors),
142 	MTK_ETHTOOL_STAT(rx_checksum_errors),
143 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
144 };
145 
146 static const char * const mtk_clks_source_name[] = {
147 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
148 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
149 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
150 	"sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
151 };
152 
153 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
154 {
155 	__raw_writel(val, eth->base + reg);
156 }
157 
158 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
159 {
160 	return __raw_readl(eth->base + reg);
161 }
162 
163 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
164 {
165 	u32 val;
166 
167 	val = mtk_r32(eth, reg);
168 	val &= ~mask;
169 	val |= set;
170 	mtk_w32(eth, val, reg);
171 	return reg;
172 }
173 
174 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
175 {
176 	unsigned long t_start = jiffies;
177 
178 	while (1) {
179 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
180 			return 0;
181 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
182 			break;
183 		cond_resched();
184 	}
185 
186 	dev_err(eth->dev, "mdio: MDIO timeout\n");
187 	return -ETIMEDOUT;
188 }
189 
190 static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
191 			   u32 write_data)
192 {
193 	int ret;
194 
195 	ret = mtk_mdio_busy_wait(eth);
196 	if (ret < 0)
197 		return ret;
198 
199 	if (phy_reg & MII_ADDR_C45) {
200 		mtk_w32(eth, PHY_IAC_ACCESS |
201 			     PHY_IAC_START_C45 |
202 			     PHY_IAC_CMD_C45_ADDR |
203 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
204 			     PHY_IAC_ADDR(phy_addr) |
205 			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
206 			MTK_PHY_IAC);
207 
208 		ret = mtk_mdio_busy_wait(eth);
209 		if (ret < 0)
210 			return ret;
211 
212 		mtk_w32(eth, PHY_IAC_ACCESS |
213 			     PHY_IAC_START_C45 |
214 			     PHY_IAC_CMD_WRITE |
215 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
216 			     PHY_IAC_ADDR(phy_addr) |
217 			     PHY_IAC_DATA(write_data),
218 			MTK_PHY_IAC);
219 	} else {
220 		mtk_w32(eth, PHY_IAC_ACCESS |
221 			     PHY_IAC_START_C22 |
222 			     PHY_IAC_CMD_WRITE |
223 			     PHY_IAC_REG(phy_reg) |
224 			     PHY_IAC_ADDR(phy_addr) |
225 			     PHY_IAC_DATA(write_data),
226 			MTK_PHY_IAC);
227 	}
228 
229 	ret = mtk_mdio_busy_wait(eth);
230 	if (ret < 0)
231 		return ret;
232 
233 	return 0;
234 }
235 
236 static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
237 {
238 	int ret;
239 
240 	ret = mtk_mdio_busy_wait(eth);
241 	if (ret < 0)
242 		return ret;
243 
244 	if (phy_reg & MII_ADDR_C45) {
245 		mtk_w32(eth, PHY_IAC_ACCESS |
246 			     PHY_IAC_START_C45 |
247 			     PHY_IAC_CMD_C45_ADDR |
248 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
249 			     PHY_IAC_ADDR(phy_addr) |
250 			     PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
251 			MTK_PHY_IAC);
252 
253 		ret = mtk_mdio_busy_wait(eth);
254 		if (ret < 0)
255 			return ret;
256 
257 		mtk_w32(eth, PHY_IAC_ACCESS |
258 			     PHY_IAC_START_C45 |
259 			     PHY_IAC_CMD_C45_READ |
260 			     PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
261 			     PHY_IAC_ADDR(phy_addr),
262 			MTK_PHY_IAC);
263 	} else {
264 		mtk_w32(eth, PHY_IAC_ACCESS |
265 			     PHY_IAC_START_C22 |
266 			     PHY_IAC_CMD_C22_READ |
267 			     PHY_IAC_REG(phy_reg) |
268 			     PHY_IAC_ADDR(phy_addr),
269 			MTK_PHY_IAC);
270 	}
271 
272 	ret = mtk_mdio_busy_wait(eth);
273 	if (ret < 0)
274 		return ret;
275 
276 	return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
277 }
278 
279 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
280 			  int phy_reg, u16 val)
281 {
282 	struct mtk_eth *eth = bus->priv;
283 
284 	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
285 }
286 
287 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
288 {
289 	struct mtk_eth *eth = bus->priv;
290 
291 	return _mtk_mdio_read(eth, phy_addr, phy_reg);
292 }
293 
294 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
295 				     phy_interface_t interface)
296 {
297 	u32 val;
298 
299 	/* Check DDR memory type.
300 	 * Currently TRGMII mode with DDR2 memory is not supported.
301 	 */
302 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
303 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
304 	    val & SYSCFG_DRAM_TYPE_DDR2) {
305 		dev_err(eth->dev,
306 			"TRGMII mode with DDR2 memory is not supported!\n");
307 		return -EOPNOTSUPP;
308 	}
309 
310 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
311 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
312 
313 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
314 			   ETHSYS_TRGMII_MT7621_MASK, val);
315 
316 	return 0;
317 }
318 
319 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
320 				   phy_interface_t interface, int speed)
321 {
322 	u32 val;
323 	int ret;
324 
325 	if (interface == PHY_INTERFACE_MODE_TRGMII) {
326 		mtk_w32(eth, TRGMII_MODE, INTF_MODE);
327 		val = 500000000;
328 		ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
329 		if (ret)
330 			dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
331 		return;
332 	}
333 
334 	val = (speed == SPEED_1000) ?
335 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
336 	mtk_w32(eth, val, INTF_MODE);
337 
338 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
339 			   ETHSYS_TRGMII_CLK_SEL362_5,
340 			   ETHSYS_TRGMII_CLK_SEL362_5);
341 
342 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
343 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
344 	if (ret)
345 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
346 
347 	val = (speed == SPEED_1000) ?
348 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
349 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
350 
351 	val = (speed == SPEED_1000) ?
352 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
353 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
354 }
355 
356 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
357 					      phy_interface_t interface)
358 {
359 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
360 					   phylink_config);
361 	struct mtk_eth *eth = mac->hw;
362 	unsigned int sid;
363 
364 	if (interface == PHY_INTERFACE_MODE_SGMII ||
365 	    phy_interface_mode_is_8023z(interface)) {
366 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
367 		       0 : mac->id;
368 
369 		return mtk_sgmii_select_pcs(eth->sgmii, sid);
370 	}
371 
372 	return NULL;
373 }
374 
375 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
376 			   const struct phylink_link_state *state)
377 {
378 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
379 					   phylink_config);
380 	struct mtk_eth *eth = mac->hw;
381 	int val, ge_mode, err = 0;
382 	u32 i;
383 
384 	/* MT76x8 has no hardware settings between for the MAC */
385 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
386 	    mac->interface != state->interface) {
387 		/* Setup soc pin functions */
388 		switch (state->interface) {
389 		case PHY_INTERFACE_MODE_TRGMII:
390 			if (mac->id)
391 				goto err_phy;
392 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
393 					  MTK_GMAC1_TRGMII))
394 				goto err_phy;
395 			fallthrough;
396 		case PHY_INTERFACE_MODE_RGMII_TXID:
397 		case PHY_INTERFACE_MODE_RGMII_RXID:
398 		case PHY_INTERFACE_MODE_RGMII_ID:
399 		case PHY_INTERFACE_MODE_RGMII:
400 		case PHY_INTERFACE_MODE_MII:
401 		case PHY_INTERFACE_MODE_REVMII:
402 		case PHY_INTERFACE_MODE_RMII:
403 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
404 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
405 				if (err)
406 					goto init_err;
407 			}
408 			break;
409 		case PHY_INTERFACE_MODE_1000BASEX:
410 		case PHY_INTERFACE_MODE_2500BASEX:
411 		case PHY_INTERFACE_MODE_SGMII:
412 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
413 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
414 				if (err)
415 					goto init_err;
416 			}
417 			break;
418 		case PHY_INTERFACE_MODE_GMII:
419 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
420 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
421 				if (err)
422 					goto init_err;
423 			}
424 			break;
425 		default:
426 			goto err_phy;
427 		}
428 
429 		/* Setup clock for 1st gmac */
430 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
431 		    !phy_interface_mode_is_8023z(state->interface) &&
432 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
433 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
434 					 MTK_TRGMII_MT7621_CLK)) {
435 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
436 							      state->interface))
437 					goto err_phy;
438 			} else {
439 				/* FIXME: this is incorrect. Not only does it
440 				 * use state->speed (which is not guaranteed
441 				 * to be correct) but it also makes use of it
442 				 * in a code path that will only be reachable
443 				 * when the PHY interface mode changes, not
444 				 * when the speed changes. Consequently, RGMII
445 				 * is probably broken.
446 				 */
447 				mtk_gmac0_rgmii_adjust(mac->hw,
448 						       state->interface,
449 						       state->speed);
450 
451 				/* mt7623_pad_clk_setup */
452 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
453 					mtk_w32(mac->hw,
454 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
455 						TRGMII_TD_ODT(i));
456 
457 				/* Assert/release MT7623 RXC reset */
458 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
459 					TRGMII_RCK_CTRL);
460 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
461 			}
462 		}
463 
464 		ge_mode = 0;
465 		switch (state->interface) {
466 		case PHY_INTERFACE_MODE_MII:
467 		case PHY_INTERFACE_MODE_GMII:
468 			ge_mode = 1;
469 			break;
470 		case PHY_INTERFACE_MODE_REVMII:
471 			ge_mode = 2;
472 			break;
473 		case PHY_INTERFACE_MODE_RMII:
474 			if (mac->id)
475 				goto err_phy;
476 			ge_mode = 3;
477 			break;
478 		default:
479 			break;
480 		}
481 
482 		/* put the gmac into the right mode */
483 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
484 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
485 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
486 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
487 
488 		mac->interface = state->interface;
489 	}
490 
491 	/* SGMII */
492 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
493 	    phy_interface_mode_is_8023z(state->interface)) {
494 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
495 		 * being setup done.
496 		 */
497 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
498 
499 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
500 				   SYSCFG0_SGMII_MASK,
501 				   ~(u32)SYSCFG0_SGMII_MASK);
502 
503 		/* Save the syscfg0 value for mac_finish */
504 		mac->syscfg0 = val;
505 	} else if (phylink_autoneg_inband(mode)) {
506 		dev_err(eth->dev,
507 			"In-band mode not supported in non SGMII mode!\n");
508 		return;
509 	}
510 
511 	return;
512 
513 err_phy:
514 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
515 		mac->id, phy_modes(state->interface));
516 	return;
517 
518 init_err:
519 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
520 		mac->id, phy_modes(state->interface), err);
521 }
522 
523 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
524 			  phy_interface_t interface)
525 {
526 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
527 					   phylink_config);
528 	struct mtk_eth *eth = mac->hw;
529 	u32 mcr_cur, mcr_new;
530 
531 	/* Enable SGMII */
532 	if (interface == PHY_INTERFACE_MODE_SGMII ||
533 	    phy_interface_mode_is_8023z(interface))
534 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
535 				   SYSCFG0_SGMII_MASK, mac->syscfg0);
536 
537 	/* Setup gmac */
538 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
539 	mcr_new = mcr_cur;
540 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
541 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
542 
543 	/* Only update control register when needed! */
544 	if (mcr_new != mcr_cur)
545 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
546 
547 	return 0;
548 }
549 
550 static void mtk_mac_pcs_get_state(struct phylink_config *config,
551 				  struct phylink_link_state *state)
552 {
553 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
554 					   phylink_config);
555 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
556 
557 	state->link = (pmsr & MAC_MSR_LINK);
558 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
559 
560 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
561 	case 0:
562 		state->speed = SPEED_10;
563 		break;
564 	case MAC_MSR_SPEED_100:
565 		state->speed = SPEED_100;
566 		break;
567 	case MAC_MSR_SPEED_1000:
568 		state->speed = SPEED_1000;
569 		break;
570 	default:
571 		state->speed = SPEED_UNKNOWN;
572 		break;
573 	}
574 
575 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
576 	if (pmsr & MAC_MSR_RX_FC)
577 		state->pause |= MLO_PAUSE_RX;
578 	if (pmsr & MAC_MSR_TX_FC)
579 		state->pause |= MLO_PAUSE_TX;
580 }
581 
582 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
583 			      phy_interface_t interface)
584 {
585 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
586 					   phylink_config);
587 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
588 
589 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
590 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
591 }
592 
593 static void mtk_mac_link_up(struct phylink_config *config,
594 			    struct phy_device *phy,
595 			    unsigned int mode, phy_interface_t interface,
596 			    int speed, int duplex, bool tx_pause, bool rx_pause)
597 {
598 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
599 					   phylink_config);
600 	u32 mcr;
601 
602 	mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
603 	mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
604 		 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
605 		 MAC_MCR_FORCE_RX_FC);
606 
607 	/* Configure speed */
608 	switch (speed) {
609 	case SPEED_2500:
610 	case SPEED_1000:
611 		mcr |= MAC_MCR_SPEED_1000;
612 		break;
613 	case SPEED_100:
614 		mcr |= MAC_MCR_SPEED_100;
615 		break;
616 	}
617 
618 	/* Configure duplex */
619 	if (duplex == DUPLEX_FULL)
620 		mcr |= MAC_MCR_FORCE_DPX;
621 
622 	/* Configure pause modes - phylink will avoid these for half duplex */
623 	if (tx_pause)
624 		mcr |= MAC_MCR_FORCE_TX_FC;
625 	if (rx_pause)
626 		mcr |= MAC_MCR_FORCE_RX_FC;
627 
628 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
629 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
630 }
631 
632 static const struct phylink_mac_ops mtk_phylink_ops = {
633 	.validate = phylink_generic_validate,
634 	.mac_select_pcs = mtk_mac_select_pcs,
635 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
636 	.mac_config = mtk_mac_config,
637 	.mac_finish = mtk_mac_finish,
638 	.mac_link_down = mtk_mac_link_down,
639 	.mac_link_up = mtk_mac_link_up,
640 };
641 
642 static int mtk_mdio_init(struct mtk_eth *eth)
643 {
644 	struct device_node *mii_np;
645 	int ret;
646 
647 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
648 	if (!mii_np) {
649 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
650 		return -ENODEV;
651 	}
652 
653 	if (!of_device_is_available(mii_np)) {
654 		ret = -ENODEV;
655 		goto err_put_node;
656 	}
657 
658 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
659 	if (!eth->mii_bus) {
660 		ret = -ENOMEM;
661 		goto err_put_node;
662 	}
663 
664 	eth->mii_bus->name = "mdio";
665 	eth->mii_bus->read = mtk_mdio_read;
666 	eth->mii_bus->write = mtk_mdio_write;
667 	eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
668 	eth->mii_bus->priv = eth;
669 	eth->mii_bus->parent = eth->dev;
670 
671 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
672 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
673 
674 err_put_node:
675 	of_node_put(mii_np);
676 	return ret;
677 }
678 
679 static void mtk_mdio_cleanup(struct mtk_eth *eth)
680 {
681 	if (!eth->mii_bus)
682 		return;
683 
684 	mdiobus_unregister(eth->mii_bus);
685 }
686 
687 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
688 {
689 	unsigned long flags;
690 	u32 val;
691 
692 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
693 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
694 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
695 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
696 }
697 
698 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
699 {
700 	unsigned long flags;
701 	u32 val;
702 
703 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
704 	val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
705 	mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
706 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
707 }
708 
709 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
710 {
711 	unsigned long flags;
712 	u32 val;
713 
714 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
715 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
716 	mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
717 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
718 }
719 
720 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
721 {
722 	unsigned long flags;
723 	u32 val;
724 
725 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
726 	val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
727 	mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
728 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
729 }
730 
731 static int mtk_set_mac_address(struct net_device *dev, void *p)
732 {
733 	int ret = eth_mac_addr(dev, p);
734 	struct mtk_mac *mac = netdev_priv(dev);
735 	struct mtk_eth *eth = mac->hw;
736 	const char *macaddr = dev->dev_addr;
737 
738 	if (ret)
739 		return ret;
740 
741 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
742 		return -EBUSY;
743 
744 	spin_lock_bh(&mac->hw->page_lock);
745 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
746 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
747 			MT7628_SDM_MAC_ADRH);
748 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
749 			(macaddr[4] << 8) | macaddr[5],
750 			MT7628_SDM_MAC_ADRL);
751 	} else {
752 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
753 			MTK_GDMA_MAC_ADRH(mac->id));
754 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
755 			(macaddr[4] << 8) | macaddr[5],
756 			MTK_GDMA_MAC_ADRL(mac->id));
757 	}
758 	spin_unlock_bh(&mac->hw->page_lock);
759 
760 	return 0;
761 }
762 
763 void mtk_stats_update_mac(struct mtk_mac *mac)
764 {
765 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
766 	struct mtk_eth *eth = mac->hw;
767 
768 	u64_stats_update_begin(&hw_stats->syncp);
769 
770 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
771 		hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
772 		hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
773 		hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
774 		hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
775 		hw_stats->rx_checksum_errors +=
776 			mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
777 	} else {
778 		const struct mtk_reg_map *reg_map = eth->soc->reg_map;
779 		unsigned int offs = hw_stats->reg_offset;
780 		u64 stats;
781 
782 		hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
783 		stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
784 		if (stats)
785 			hw_stats->rx_bytes += (stats << 32);
786 		hw_stats->rx_packets +=
787 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
788 		hw_stats->rx_overflow +=
789 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
790 		hw_stats->rx_fcs_errors +=
791 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
792 		hw_stats->rx_short_errors +=
793 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
794 		hw_stats->rx_long_errors +=
795 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
796 		hw_stats->rx_checksum_errors +=
797 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
798 		hw_stats->rx_flow_control_packets +=
799 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
800 		hw_stats->tx_skip +=
801 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
802 		hw_stats->tx_collisions +=
803 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
804 		hw_stats->tx_bytes +=
805 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
806 		stats =  mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
807 		if (stats)
808 			hw_stats->tx_bytes += (stats << 32);
809 		hw_stats->tx_packets +=
810 			mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
811 	}
812 
813 	u64_stats_update_end(&hw_stats->syncp);
814 }
815 
816 static void mtk_stats_update(struct mtk_eth *eth)
817 {
818 	int i;
819 
820 	for (i = 0; i < MTK_MAC_COUNT; i++) {
821 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
822 			continue;
823 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
824 			mtk_stats_update_mac(eth->mac[i]);
825 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
826 		}
827 	}
828 }
829 
830 static void mtk_get_stats64(struct net_device *dev,
831 			    struct rtnl_link_stats64 *storage)
832 {
833 	struct mtk_mac *mac = netdev_priv(dev);
834 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
835 	unsigned int start;
836 
837 	if (netif_running(dev) && netif_device_present(dev)) {
838 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
839 			mtk_stats_update_mac(mac);
840 			spin_unlock_bh(&hw_stats->stats_lock);
841 		}
842 	}
843 
844 	do {
845 		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
846 		storage->rx_packets = hw_stats->rx_packets;
847 		storage->tx_packets = hw_stats->tx_packets;
848 		storage->rx_bytes = hw_stats->rx_bytes;
849 		storage->tx_bytes = hw_stats->tx_bytes;
850 		storage->collisions = hw_stats->tx_collisions;
851 		storage->rx_length_errors = hw_stats->rx_short_errors +
852 			hw_stats->rx_long_errors;
853 		storage->rx_over_errors = hw_stats->rx_overflow;
854 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
855 		storage->rx_errors = hw_stats->rx_checksum_errors;
856 		storage->tx_aborted_errors = hw_stats->tx_skip;
857 	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
858 
859 	storage->tx_errors = dev->stats.tx_errors;
860 	storage->rx_dropped = dev->stats.rx_dropped;
861 	storage->tx_dropped = dev->stats.tx_dropped;
862 }
863 
864 static inline int mtk_max_frag_size(int mtu)
865 {
866 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
867 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
868 		mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
869 
870 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
871 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
872 }
873 
874 static inline int mtk_max_buf_size(int frag_size)
875 {
876 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
877 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
878 
879 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
880 
881 	return buf_size;
882 }
883 
884 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
885 			    struct mtk_rx_dma_v2 *dma_rxd)
886 {
887 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
888 	if (!(rxd->rxd2 & RX_DMA_DONE))
889 		return false;
890 
891 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
892 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
893 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
894 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
895 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
896 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
897 	}
898 
899 	return true;
900 }
901 
902 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
903 {
904 	unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
905 	unsigned long data;
906 
907 	data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
908 				get_order(size));
909 
910 	return (void *)data;
911 }
912 
913 /* the qdma core needs scratch memory to be setup */
914 static int mtk_init_fq_dma(struct mtk_eth *eth)
915 {
916 	const struct mtk_soc_data *soc = eth->soc;
917 	dma_addr_t phy_ring_tail;
918 	int cnt = MTK_DMA_SIZE;
919 	dma_addr_t dma_addr;
920 	int i;
921 
922 	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
923 					       cnt * soc->txrx.txd_size,
924 					       &eth->phy_scratch_ring,
925 					       GFP_KERNEL);
926 	if (unlikely(!eth->scratch_ring))
927 		return -ENOMEM;
928 
929 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
930 	if (unlikely(!eth->scratch_head))
931 		return -ENOMEM;
932 
933 	dma_addr = dma_map_single(eth->dma_dev,
934 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
935 				  DMA_FROM_DEVICE);
936 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
937 		return -ENOMEM;
938 
939 	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
940 
941 	for (i = 0; i < cnt; i++) {
942 		struct mtk_tx_dma_v2 *txd;
943 
944 		txd = eth->scratch_ring + i * soc->txrx.txd_size;
945 		txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
946 		if (i < cnt - 1)
947 			txd->txd2 = eth->phy_scratch_ring +
948 				    (i + 1) * soc->txrx.txd_size;
949 
950 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
951 		txd->txd4 = 0;
952 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
953 			txd->txd5 = 0;
954 			txd->txd6 = 0;
955 			txd->txd7 = 0;
956 			txd->txd8 = 0;
957 		}
958 	}
959 
960 	mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
961 	mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
962 	mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
963 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
964 
965 	return 0;
966 }
967 
968 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
969 {
970 	return ring->dma + (desc - ring->phys);
971 }
972 
973 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
974 					     void *txd, u32 txd_size)
975 {
976 	int idx = (txd - ring->dma) / txd_size;
977 
978 	return &ring->buf[idx];
979 }
980 
981 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
982 				       struct mtk_tx_dma *dma)
983 {
984 	return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
985 }
986 
987 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
988 {
989 	return (dma - ring->dma) / txd_size;
990 }
991 
992 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
993 			 bool napi)
994 {
995 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
996 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
997 			dma_unmap_single(eth->dma_dev,
998 					 dma_unmap_addr(tx_buf, dma_addr0),
999 					 dma_unmap_len(tx_buf, dma_len0),
1000 					 DMA_TO_DEVICE);
1001 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1002 			dma_unmap_page(eth->dma_dev,
1003 				       dma_unmap_addr(tx_buf, dma_addr0),
1004 				       dma_unmap_len(tx_buf, dma_len0),
1005 				       DMA_TO_DEVICE);
1006 		}
1007 	} else {
1008 		if (dma_unmap_len(tx_buf, dma_len0)) {
1009 			dma_unmap_page(eth->dma_dev,
1010 				       dma_unmap_addr(tx_buf, dma_addr0),
1011 				       dma_unmap_len(tx_buf, dma_len0),
1012 				       DMA_TO_DEVICE);
1013 		}
1014 
1015 		if (dma_unmap_len(tx_buf, dma_len1)) {
1016 			dma_unmap_page(eth->dma_dev,
1017 				       dma_unmap_addr(tx_buf, dma_addr1),
1018 				       dma_unmap_len(tx_buf, dma_len1),
1019 				       DMA_TO_DEVICE);
1020 		}
1021 	}
1022 
1023 	tx_buf->flags = 0;
1024 	if (tx_buf->skb &&
1025 	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
1026 		if (napi)
1027 			napi_consume_skb(tx_buf->skb, napi);
1028 		else
1029 			dev_kfree_skb_any(tx_buf->skb);
1030 	}
1031 	tx_buf->skb = NULL;
1032 }
1033 
1034 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1035 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1036 			 size_t size, int idx)
1037 {
1038 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1039 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1040 		dma_unmap_len_set(tx_buf, dma_len0, size);
1041 	} else {
1042 		if (idx & 1) {
1043 			txd->txd3 = mapped_addr;
1044 			txd->txd2 |= TX_DMA_PLEN1(size);
1045 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1046 			dma_unmap_len_set(tx_buf, dma_len1, size);
1047 		} else {
1048 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1049 			txd->txd1 = mapped_addr;
1050 			txd->txd2 = TX_DMA_PLEN0(size);
1051 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1052 			dma_unmap_len_set(tx_buf, dma_len0, size);
1053 		}
1054 	}
1055 }
1056 
1057 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1058 				   struct mtk_tx_dma_desc_info *info)
1059 {
1060 	struct mtk_mac *mac = netdev_priv(dev);
1061 	struct mtk_eth *eth = mac->hw;
1062 	struct mtk_tx_dma *desc = txd;
1063 	u32 data;
1064 
1065 	WRITE_ONCE(desc->txd1, info->addr);
1066 
1067 	data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
1068 	if (info->last)
1069 		data |= TX_DMA_LS0;
1070 	WRITE_ONCE(desc->txd3, data);
1071 
1072 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1073 	if (info->first) {
1074 		if (info->gso)
1075 			data |= TX_DMA_TSO;
1076 		/* tx checksum offload */
1077 		if (info->csum)
1078 			data |= TX_DMA_CHKSUM;
1079 		/* vlan header offload */
1080 		if (info->vlan)
1081 			data |= TX_DMA_INS_VLAN | info->vlan_tci;
1082 	}
1083 	WRITE_ONCE(desc->txd4, data);
1084 }
1085 
1086 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1087 				   struct mtk_tx_dma_desc_info *info)
1088 {
1089 	struct mtk_mac *mac = netdev_priv(dev);
1090 	struct mtk_tx_dma_v2 *desc = txd;
1091 	struct mtk_eth *eth = mac->hw;
1092 	u32 data;
1093 
1094 	WRITE_ONCE(desc->txd1, info->addr);
1095 
1096 	data = TX_DMA_PLEN0(info->size);
1097 	if (info->last)
1098 		data |= TX_DMA_LS0;
1099 	WRITE_ONCE(desc->txd3, data);
1100 
1101 	if (!info->qid && mac->id)
1102 		info->qid = MTK_QDMA_GMAC2_QID;
1103 
1104 	data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1105 	data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1106 	WRITE_ONCE(desc->txd4, data);
1107 
1108 	data = 0;
1109 	if (info->first) {
1110 		if (info->gso)
1111 			data |= TX_DMA_TSO_V2;
1112 		/* tx checksum offload */
1113 		if (info->csum)
1114 			data |= TX_DMA_CHKSUM_V2;
1115 	}
1116 	WRITE_ONCE(desc->txd5, data);
1117 
1118 	data = 0;
1119 	if (info->first && info->vlan)
1120 		data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1121 	WRITE_ONCE(desc->txd6, data);
1122 
1123 	WRITE_ONCE(desc->txd7, 0);
1124 	WRITE_ONCE(desc->txd8, 0);
1125 }
1126 
1127 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1128 				struct mtk_tx_dma_desc_info *info)
1129 {
1130 	struct mtk_mac *mac = netdev_priv(dev);
1131 	struct mtk_eth *eth = mac->hw;
1132 
1133 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1134 		mtk_tx_set_dma_desc_v2(dev, txd, info);
1135 	else
1136 		mtk_tx_set_dma_desc_v1(dev, txd, info);
1137 }
1138 
1139 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1140 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
1141 {
1142 	struct mtk_tx_dma_desc_info txd_info = {
1143 		.size = skb_headlen(skb),
1144 		.gso = gso,
1145 		.csum = skb->ip_summed == CHECKSUM_PARTIAL,
1146 		.vlan = skb_vlan_tag_present(skb),
1147 		.qid = skb->mark & MTK_QDMA_TX_MASK,
1148 		.vlan_tci = skb_vlan_tag_get(skb),
1149 		.first = true,
1150 		.last = !skb_is_nonlinear(skb),
1151 	};
1152 	struct mtk_mac *mac = netdev_priv(dev);
1153 	struct mtk_eth *eth = mac->hw;
1154 	const struct mtk_soc_data *soc = eth->soc;
1155 	struct mtk_tx_dma *itxd, *txd;
1156 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1157 	struct mtk_tx_buf *itx_buf, *tx_buf;
1158 	int i, n_desc = 1;
1159 	int k = 0;
1160 
1161 	itxd = ring->next_free;
1162 	itxd_pdma = qdma_to_pdma(ring, itxd);
1163 	if (itxd == ring->last_free)
1164 		return -ENOMEM;
1165 
1166 	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1167 	memset(itx_buf, 0, sizeof(*itx_buf));
1168 
1169 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1170 				       DMA_TO_DEVICE);
1171 	if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1172 		return -ENOMEM;
1173 
1174 	mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1175 
1176 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1177 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1178 			  MTK_TX_FLAGS_FPORT1;
1179 	setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1180 		     k++);
1181 
1182 	/* TX SG offload */
1183 	txd = itxd;
1184 	txd_pdma = qdma_to_pdma(ring, txd);
1185 
1186 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1187 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1188 		unsigned int offset = 0;
1189 		int frag_size = skb_frag_size(frag);
1190 
1191 		while (frag_size) {
1192 			bool new_desc = true;
1193 
1194 			if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1195 			    (i & 0x1)) {
1196 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1197 				txd_pdma = qdma_to_pdma(ring, txd);
1198 				if (txd == ring->last_free)
1199 					goto err_dma;
1200 
1201 				n_desc++;
1202 			} else {
1203 				new_desc = false;
1204 			}
1205 
1206 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1207 			txd_info.size = min_t(unsigned int, frag_size,
1208 					      soc->txrx.dma_max_len);
1209 			txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1210 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1211 					!(frag_size - txd_info.size);
1212 			txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1213 							 offset, txd_info.size,
1214 							 DMA_TO_DEVICE);
1215 			if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1216 				goto err_dma;
1217 
1218 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
1219 
1220 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
1221 						    soc->txrx.txd_size);
1222 			if (new_desc)
1223 				memset(tx_buf, 0, sizeof(*tx_buf));
1224 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1225 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1226 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1227 					 MTK_TX_FLAGS_FPORT1;
1228 
1229 			setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1230 				     txd_info.size, k++);
1231 
1232 			frag_size -= txd_info.size;
1233 			offset += txd_info.size;
1234 		}
1235 	}
1236 
1237 	/* store skb to cleanup */
1238 	itx_buf->skb = skb;
1239 
1240 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1241 		if (k & 0x1)
1242 			txd_pdma->txd2 |= TX_DMA_LS0;
1243 		else
1244 			txd_pdma->txd2 |= TX_DMA_LS1;
1245 	}
1246 
1247 	netdev_sent_queue(dev, skb->len);
1248 	skb_tx_timestamp(skb);
1249 
1250 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1251 	atomic_sub(n_desc, &ring->free_count);
1252 
1253 	/* make sure that all changes to the dma ring are flushed before we
1254 	 * continue
1255 	 */
1256 	wmb();
1257 
1258 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1259 		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1260 		    !netdev_xmit_more())
1261 			mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1262 	} else {
1263 		int next_idx;
1264 
1265 		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1266 					 ring->dma_size);
1267 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1268 	}
1269 
1270 	return 0;
1271 
1272 err_dma:
1273 	do {
1274 		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1275 
1276 		/* unmap dma */
1277 		mtk_tx_unmap(eth, tx_buf, false);
1278 
1279 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1280 		if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1281 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1282 
1283 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1284 		itxd_pdma = qdma_to_pdma(ring, itxd);
1285 	} while (itxd != txd);
1286 
1287 	return -ENOMEM;
1288 }
1289 
1290 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1291 {
1292 	int i, nfrags = 1;
1293 	skb_frag_t *frag;
1294 
1295 	if (skb_is_gso(skb)) {
1296 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297 			frag = &skb_shinfo(skb)->frags[i];
1298 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1299 					       eth->soc->txrx.dma_max_len);
1300 		}
1301 	} else {
1302 		nfrags += skb_shinfo(skb)->nr_frags;
1303 	}
1304 
1305 	return nfrags;
1306 }
1307 
1308 static int mtk_queue_stopped(struct mtk_eth *eth)
1309 {
1310 	int i;
1311 
1312 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1313 		if (!eth->netdev[i])
1314 			continue;
1315 		if (netif_queue_stopped(eth->netdev[i]))
1316 			return 1;
1317 	}
1318 
1319 	return 0;
1320 }
1321 
1322 static void mtk_wake_queue(struct mtk_eth *eth)
1323 {
1324 	int i;
1325 
1326 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1327 		if (!eth->netdev[i])
1328 			continue;
1329 		netif_wake_queue(eth->netdev[i]);
1330 	}
1331 }
1332 
1333 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1334 {
1335 	struct mtk_mac *mac = netdev_priv(dev);
1336 	struct mtk_eth *eth = mac->hw;
1337 	struct mtk_tx_ring *ring = &eth->tx_ring;
1338 	struct net_device_stats *stats = &dev->stats;
1339 	bool gso = false;
1340 	int tx_num;
1341 
1342 	/* normally we can rely on the stack not calling this more than once,
1343 	 * however we have 2 queues running on the same ring so we need to lock
1344 	 * the ring access
1345 	 */
1346 	spin_lock(&eth->page_lock);
1347 
1348 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1349 		goto drop;
1350 
1351 	tx_num = mtk_cal_txd_req(eth, skb);
1352 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1353 		netif_stop_queue(dev);
1354 		netif_err(eth, tx_queued, dev,
1355 			  "Tx Ring full when queue awake!\n");
1356 		spin_unlock(&eth->page_lock);
1357 		return NETDEV_TX_BUSY;
1358 	}
1359 
1360 	/* TSO: fill MSS info in tcp checksum field */
1361 	if (skb_is_gso(skb)) {
1362 		if (skb_cow_head(skb, 0)) {
1363 			netif_warn(eth, tx_err, dev,
1364 				   "GSO expand head fail.\n");
1365 			goto drop;
1366 		}
1367 
1368 		if (skb_shinfo(skb)->gso_type &
1369 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1370 			gso = true;
1371 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1372 		}
1373 	}
1374 
1375 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1376 		goto drop;
1377 
1378 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1379 		netif_stop_queue(dev);
1380 
1381 	spin_unlock(&eth->page_lock);
1382 
1383 	return NETDEV_TX_OK;
1384 
1385 drop:
1386 	spin_unlock(&eth->page_lock);
1387 	stats->tx_dropped++;
1388 	dev_kfree_skb_any(skb);
1389 	return NETDEV_TX_OK;
1390 }
1391 
1392 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1393 {
1394 	int i;
1395 	struct mtk_rx_ring *ring;
1396 	int idx;
1397 
1398 	if (!eth->hwlro)
1399 		return &eth->rx_ring[0];
1400 
1401 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1402 		struct mtk_rx_dma *rxd;
1403 
1404 		ring = &eth->rx_ring[i];
1405 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1406 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1407 		if (rxd->rxd2 & RX_DMA_DONE) {
1408 			ring->calc_idx_update = true;
1409 			return ring;
1410 		}
1411 	}
1412 
1413 	return NULL;
1414 }
1415 
1416 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1417 {
1418 	struct mtk_rx_ring *ring;
1419 	int i;
1420 
1421 	if (!eth->hwlro) {
1422 		ring = &eth->rx_ring[0];
1423 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1424 	} else {
1425 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1426 			ring = &eth->rx_ring[i];
1427 			if (ring->calc_idx_update) {
1428 				ring->calc_idx_update = false;
1429 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1430 			}
1431 		}
1432 	}
1433 }
1434 
1435 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1436 		       struct mtk_eth *eth)
1437 {
1438 	struct dim_sample dim_sample = {};
1439 	struct mtk_rx_ring *ring;
1440 	int idx;
1441 	struct sk_buff *skb;
1442 	u8 *data, *new_data;
1443 	struct mtk_rx_dma_v2 *rxd, trxd;
1444 	int done = 0, bytes = 0;
1445 
1446 	while (done < budget) {
1447 		unsigned int pktlen, *rxdcsum;
1448 		struct net_device *netdev;
1449 		dma_addr_t dma_addr;
1450 		u32 hash, reason;
1451 		int mac = 0;
1452 
1453 		ring = mtk_get_rx_ring(eth);
1454 		if (unlikely(!ring))
1455 			goto rx_done;
1456 
1457 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1458 		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1459 		data = ring->data[idx];
1460 
1461 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
1462 			break;
1463 
1464 		/* find out which mac the packet come from. values start at 1 */
1465 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1466 			mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1467 		else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1468 			 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1469 			mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1470 
1471 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1472 			     !eth->netdev[mac]))
1473 			goto release_desc;
1474 
1475 		netdev = eth->netdev[mac];
1476 
1477 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1478 			goto release_desc;
1479 
1480 		/* alloc new buffer */
1481 		if (ring->frag_size <= PAGE_SIZE)
1482 			new_data = napi_alloc_frag(ring->frag_size);
1483 		else
1484 			new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1485 		if (unlikely(!new_data)) {
1486 			netdev->stats.rx_dropped++;
1487 			goto release_desc;
1488 		}
1489 		dma_addr = dma_map_single(eth->dma_dev,
1490 					  new_data + NET_SKB_PAD +
1491 					  eth->ip_align,
1492 					  ring->buf_size,
1493 					  DMA_FROM_DEVICE);
1494 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
1495 			skb_free_frag(new_data);
1496 			netdev->stats.rx_dropped++;
1497 			goto release_desc;
1498 		}
1499 
1500 		dma_unmap_single(eth->dma_dev, trxd.rxd1,
1501 				 ring->buf_size, DMA_FROM_DEVICE);
1502 
1503 		/* receive data */
1504 		skb = build_skb(data, ring->frag_size);
1505 		if (unlikely(!skb)) {
1506 			skb_free_frag(data);
1507 			netdev->stats.rx_dropped++;
1508 			goto skip_rx;
1509 		}
1510 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1511 
1512 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1513 		skb->dev = netdev;
1514 		skb_put(skb, pktlen);
1515 
1516 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1517 			rxdcsum = &trxd.rxd3;
1518 		else
1519 			rxdcsum = &trxd.rxd4;
1520 
1521 		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
1522 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1523 		else
1524 			skb_checksum_none_assert(skb);
1525 		skb->protocol = eth_type_trans(skb, netdev);
1526 		bytes += pktlen;
1527 
1528 		hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
1529 		if (hash != MTK_RXD4_FOE_ENTRY) {
1530 			hash = jhash_1word(hash, 0);
1531 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
1532 		}
1533 
1534 		reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
1535 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1536 			mtk_ppe_check_skb(eth->ppe, skb,
1537 					  trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
1538 
1539 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1540 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1541 				if (trxd.rxd3 & RX_DMA_VTAG_V2)
1542 					__vlan_hwaccel_put_tag(skb,
1543 						htons(RX_DMA_VPID(trxd.rxd4)),
1544 						RX_DMA_VID(trxd.rxd4));
1545 			} else if (trxd.rxd2 & RX_DMA_VTAG) {
1546 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1547 						       RX_DMA_VID(trxd.rxd3));
1548 			}
1549 
1550 			/* If the device is attached to a dsa switch, the special
1551 			 * tag inserted in VLAN field by hw switch can * be offloaded
1552 			 * by RX HW VLAN offload. Clear vlan info.
1553 			 */
1554 			if (netdev_uses_dsa(netdev))
1555 				__vlan_hwaccel_clear_tag(skb);
1556 		}
1557 
1558 		skb_record_rx_queue(skb, 0);
1559 		napi_gro_receive(napi, skb);
1560 
1561 skip_rx:
1562 		ring->data[idx] = new_data;
1563 		rxd->rxd1 = (unsigned int)dma_addr;
1564 
1565 release_desc:
1566 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1567 			rxd->rxd2 = RX_DMA_LSO;
1568 		else
1569 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
1570 
1571 		ring->calc_idx = idx;
1572 
1573 		done++;
1574 	}
1575 
1576 rx_done:
1577 	if (done) {
1578 		/* make sure that all changes to the dma ring are flushed before
1579 		 * we continue
1580 		 */
1581 		wmb();
1582 		mtk_update_rx_cpu_idx(eth);
1583 	}
1584 
1585 	eth->rx_packets += done;
1586 	eth->rx_bytes += bytes;
1587 	dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
1588 			  &dim_sample);
1589 	net_dim(&eth->rx_dim, dim_sample);
1590 
1591 	return done;
1592 }
1593 
1594 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1595 			    unsigned int *done, unsigned int *bytes)
1596 {
1597 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1598 	struct mtk_tx_ring *ring = &eth->tx_ring;
1599 	struct mtk_tx_dma *desc;
1600 	struct sk_buff *skb;
1601 	struct mtk_tx_buf *tx_buf;
1602 	u32 cpu, dma;
1603 
1604 	cpu = ring->last_free_ptr;
1605 	dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
1606 
1607 	desc = mtk_qdma_phys_to_virt(ring, cpu);
1608 
1609 	while ((cpu != dma) && budget) {
1610 		u32 next_cpu = desc->txd2;
1611 		int mac = 0;
1612 
1613 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1614 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1615 			break;
1616 
1617 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
1618 					    eth->soc->txrx.txd_size);
1619 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1620 			mac = 1;
1621 
1622 		skb = tx_buf->skb;
1623 		if (!skb)
1624 			break;
1625 
1626 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1627 			bytes[mac] += skb->len;
1628 			done[mac]++;
1629 			budget--;
1630 		}
1631 		mtk_tx_unmap(eth, tx_buf, true);
1632 
1633 		ring->last_free = desc;
1634 		atomic_inc(&ring->free_count);
1635 
1636 		cpu = next_cpu;
1637 	}
1638 
1639 	ring->last_free_ptr = cpu;
1640 	mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
1641 
1642 	return budget;
1643 }
1644 
1645 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1646 			    unsigned int *done, unsigned int *bytes)
1647 {
1648 	struct mtk_tx_ring *ring = &eth->tx_ring;
1649 	struct mtk_tx_dma *desc;
1650 	struct sk_buff *skb;
1651 	struct mtk_tx_buf *tx_buf;
1652 	u32 cpu, dma;
1653 
1654 	cpu = ring->cpu_idx;
1655 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1656 
1657 	while ((cpu != dma) && budget) {
1658 		tx_buf = &ring->buf[cpu];
1659 		skb = tx_buf->skb;
1660 		if (!skb)
1661 			break;
1662 
1663 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1664 			bytes[0] += skb->len;
1665 			done[0]++;
1666 			budget--;
1667 		}
1668 
1669 		mtk_tx_unmap(eth, tx_buf, true);
1670 
1671 		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
1672 		ring->last_free = desc;
1673 		atomic_inc(&ring->free_count);
1674 
1675 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1676 	}
1677 
1678 	ring->cpu_idx = cpu;
1679 
1680 	return budget;
1681 }
1682 
1683 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1684 {
1685 	struct mtk_tx_ring *ring = &eth->tx_ring;
1686 	struct dim_sample dim_sample = {};
1687 	unsigned int done[MTK_MAX_DEVS];
1688 	unsigned int bytes[MTK_MAX_DEVS];
1689 	int total = 0, i;
1690 
1691 	memset(done, 0, sizeof(done));
1692 	memset(bytes, 0, sizeof(bytes));
1693 
1694 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1695 		budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1696 	else
1697 		budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1698 
1699 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1700 		if (!eth->netdev[i] || !done[i])
1701 			continue;
1702 		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1703 		total += done[i];
1704 		eth->tx_packets += done[i];
1705 		eth->tx_bytes += bytes[i];
1706 	}
1707 
1708 	dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
1709 			  &dim_sample);
1710 	net_dim(&eth->tx_dim, dim_sample);
1711 
1712 	if (mtk_queue_stopped(eth) &&
1713 	    (atomic_read(&ring->free_count) > ring->thresh))
1714 		mtk_wake_queue(eth);
1715 
1716 	return total;
1717 }
1718 
1719 static void mtk_handle_status_irq(struct mtk_eth *eth)
1720 {
1721 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1722 
1723 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1724 		mtk_stats_update(eth);
1725 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1726 			MTK_INT_STATUS2);
1727 	}
1728 }
1729 
1730 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1731 {
1732 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1733 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1734 	int tx_done = 0;
1735 
1736 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1737 		mtk_handle_status_irq(eth);
1738 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
1739 	tx_done = mtk_poll_tx(eth, budget);
1740 
1741 	if (unlikely(netif_msg_intr(eth))) {
1742 		dev_info(eth->dev,
1743 			 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
1744 			 mtk_r32(eth, reg_map->tx_irq_status),
1745 			 mtk_r32(eth, reg_map->tx_irq_mask));
1746 	}
1747 
1748 	if (tx_done == budget)
1749 		return budget;
1750 
1751 	if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
1752 		return budget;
1753 
1754 	if (napi_complete_done(napi, tx_done))
1755 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1756 
1757 	return tx_done;
1758 }
1759 
1760 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1761 {
1762 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1763 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1764 	int rx_done_total = 0;
1765 
1766 	mtk_handle_status_irq(eth);
1767 
1768 	do {
1769 		int rx_done;
1770 
1771 		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
1772 			reg_map->pdma.irq_status);
1773 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
1774 		rx_done_total += rx_done;
1775 
1776 		if (unlikely(netif_msg_intr(eth))) {
1777 			dev_info(eth->dev,
1778 				 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
1779 				 mtk_r32(eth, reg_map->pdma.irq_status),
1780 				 mtk_r32(eth, reg_map->pdma.irq_mask));
1781 		}
1782 
1783 		if (rx_done_total == budget)
1784 			return budget;
1785 
1786 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
1787 		 eth->soc->txrx.rx_irq_done_mask);
1788 
1789 	if (napi_complete_done(napi, rx_done_total))
1790 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
1791 
1792 	return rx_done_total;
1793 }
1794 
1795 static int mtk_tx_alloc(struct mtk_eth *eth)
1796 {
1797 	const struct mtk_soc_data *soc = eth->soc;
1798 	struct mtk_tx_ring *ring = &eth->tx_ring;
1799 	int i, sz = soc->txrx.txd_size;
1800 	struct mtk_tx_dma_v2 *txd;
1801 
1802 	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1803 			       GFP_KERNEL);
1804 	if (!ring->buf)
1805 		goto no_tx_mem;
1806 
1807 	ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
1808 				       &ring->phys, GFP_KERNEL);
1809 	if (!ring->dma)
1810 		goto no_tx_mem;
1811 
1812 	for (i = 0; i < MTK_DMA_SIZE; i++) {
1813 		int next = (i + 1) % MTK_DMA_SIZE;
1814 		u32 next_ptr = ring->phys + next * sz;
1815 
1816 		txd = ring->dma + i * sz;
1817 		txd->txd2 = next_ptr;
1818 		txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1819 		txd->txd4 = 0;
1820 		if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1821 			txd->txd5 = 0;
1822 			txd->txd6 = 0;
1823 			txd->txd7 = 0;
1824 			txd->txd8 = 0;
1825 		}
1826 	}
1827 
1828 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
1829 	 * only as the framework. The real HW descriptors are the PDMA
1830 	 * descriptors in ring->dma_pdma.
1831 	 */
1832 	if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1833 		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
1834 						    &ring->phys_pdma, GFP_KERNEL);
1835 		if (!ring->dma_pdma)
1836 			goto no_tx_mem;
1837 
1838 		for (i = 0; i < MTK_DMA_SIZE; i++) {
1839 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1840 			ring->dma_pdma[i].txd4 = 0;
1841 		}
1842 	}
1843 
1844 	ring->dma_size = MTK_DMA_SIZE;
1845 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1846 	ring->next_free = ring->dma;
1847 	ring->last_free = (void *)txd;
1848 	ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
1849 	ring->thresh = MAX_SKB_FRAGS;
1850 
1851 	/* make sure that all changes to the dma ring are flushed before we
1852 	 * continue
1853 	 */
1854 	wmb();
1855 
1856 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1857 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
1858 		mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
1859 		mtk_w32(eth,
1860 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1861 			soc->reg_map->qdma.crx_ptr);
1862 		mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
1863 		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1864 			soc->reg_map->qdma.qtx_cfg);
1865 	} else {
1866 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1867 		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1868 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1869 		mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
1870 	}
1871 
1872 	return 0;
1873 
1874 no_tx_mem:
1875 	return -ENOMEM;
1876 }
1877 
1878 static void mtk_tx_clean(struct mtk_eth *eth)
1879 {
1880 	const struct mtk_soc_data *soc = eth->soc;
1881 	struct mtk_tx_ring *ring = &eth->tx_ring;
1882 	int i;
1883 
1884 	if (ring->buf) {
1885 		for (i = 0; i < MTK_DMA_SIZE; i++)
1886 			mtk_tx_unmap(eth, &ring->buf[i], false);
1887 		kfree(ring->buf);
1888 		ring->buf = NULL;
1889 	}
1890 
1891 	if (ring->dma) {
1892 		dma_free_coherent(eth->dma_dev,
1893 				  MTK_DMA_SIZE * soc->txrx.txd_size,
1894 				  ring->dma, ring->phys);
1895 		ring->dma = NULL;
1896 	}
1897 
1898 	if (ring->dma_pdma) {
1899 		dma_free_coherent(eth->dma_dev,
1900 				  MTK_DMA_SIZE * soc->txrx.txd_size,
1901 				  ring->dma_pdma, ring->phys_pdma);
1902 		ring->dma_pdma = NULL;
1903 	}
1904 }
1905 
1906 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1907 {
1908 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1909 	struct mtk_rx_ring *ring;
1910 	int rx_data_len, rx_dma_size;
1911 	int i;
1912 
1913 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
1914 		if (ring_no)
1915 			return -EINVAL;
1916 		ring = &eth->rx_ring_qdma;
1917 	} else {
1918 		ring = &eth->rx_ring[ring_no];
1919 	}
1920 
1921 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1922 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1923 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1924 	} else {
1925 		rx_data_len = ETH_DATA_LEN;
1926 		rx_dma_size = MTK_DMA_SIZE;
1927 	}
1928 
1929 	ring->frag_size = mtk_max_frag_size(rx_data_len);
1930 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
1931 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1932 			     GFP_KERNEL);
1933 	if (!ring->data)
1934 		return -ENOMEM;
1935 
1936 	for (i = 0; i < rx_dma_size; i++) {
1937 		if (ring->frag_size <= PAGE_SIZE)
1938 			ring->data[i] = netdev_alloc_frag(ring->frag_size);
1939 		else
1940 			ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1941 		if (!ring->data[i])
1942 			return -ENOMEM;
1943 	}
1944 
1945 	ring->dma = dma_alloc_coherent(eth->dma_dev,
1946 				       rx_dma_size * eth->soc->txrx.rxd_size,
1947 				       &ring->phys, GFP_KERNEL);
1948 	if (!ring->dma)
1949 		return -ENOMEM;
1950 
1951 	for (i = 0; i < rx_dma_size; i++) {
1952 		struct mtk_rx_dma_v2 *rxd;
1953 
1954 		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
1955 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
1956 				ring->buf_size,
1957 				DMA_FROM_DEVICE);
1958 		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1959 			return -ENOMEM;
1960 
1961 		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
1962 		rxd->rxd1 = (unsigned int)dma_addr;
1963 
1964 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1965 			rxd->rxd2 = RX_DMA_LSO;
1966 		else
1967 			rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
1968 
1969 		rxd->rxd3 = 0;
1970 		rxd->rxd4 = 0;
1971 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1972 			rxd->rxd5 = 0;
1973 			rxd->rxd6 = 0;
1974 			rxd->rxd7 = 0;
1975 			rxd->rxd8 = 0;
1976 		}
1977 	}
1978 	ring->dma_size = rx_dma_size;
1979 	ring->calc_idx_update = false;
1980 	ring->calc_idx = rx_dma_size - 1;
1981 	if (rx_flag == MTK_RX_FLAGS_QDMA)
1982 		ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
1983 				    ring_no * MTK_QRX_OFFSET;
1984 	else
1985 		ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
1986 				    ring_no * MTK_QRX_OFFSET;
1987 	/* make sure that all changes to the dma ring are flushed before we
1988 	 * continue
1989 	 */
1990 	wmb();
1991 
1992 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
1993 		mtk_w32(eth, ring->phys,
1994 			reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
1995 		mtk_w32(eth, rx_dma_size,
1996 			reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
1997 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
1998 			reg_map->qdma.rst_idx);
1999 	} else {
2000 		mtk_w32(eth, ring->phys,
2001 			reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2002 		mtk_w32(eth, rx_dma_size,
2003 			reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2004 		mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2005 			reg_map->pdma.rst_idx);
2006 	}
2007 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2008 
2009 	return 0;
2010 }
2011 
2012 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2013 {
2014 	int i;
2015 
2016 	if (ring->data && ring->dma) {
2017 		for (i = 0; i < ring->dma_size; i++) {
2018 			struct mtk_rx_dma *rxd;
2019 
2020 			if (!ring->data[i])
2021 				continue;
2022 
2023 			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2024 			if (!rxd->rxd1)
2025 				continue;
2026 
2027 			dma_unmap_single(eth->dma_dev, rxd->rxd1,
2028 					 ring->buf_size, DMA_FROM_DEVICE);
2029 			skb_free_frag(ring->data[i]);
2030 		}
2031 		kfree(ring->data);
2032 		ring->data = NULL;
2033 	}
2034 
2035 	if (ring->dma) {
2036 		dma_free_coherent(eth->dma_dev,
2037 				  ring->dma_size * eth->soc->txrx.rxd_size,
2038 				  ring->dma, ring->phys);
2039 		ring->dma = NULL;
2040 	}
2041 }
2042 
2043 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2044 {
2045 	int i;
2046 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2047 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2048 
2049 	/* set LRO rings to auto-learn modes */
2050 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2051 
2052 	/* validate LRO ring */
2053 	ring_ctrl_dw2 |= MTK_RING_VLD;
2054 
2055 	/* set AGE timer (unit: 20us) */
2056 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2057 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2058 
2059 	/* set max AGG timer (unit: 20us) */
2060 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2061 
2062 	/* set max LRO AGG count */
2063 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2064 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2065 
2066 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2067 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2068 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2069 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2070 	}
2071 
2072 	/* IPv4 checksum update enable */
2073 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2074 
2075 	/* switch priority comparison to packet count mode */
2076 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2077 
2078 	/* bandwidth threshold setting */
2079 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2080 
2081 	/* auto-learn score delta setting */
2082 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2083 
2084 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2085 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2086 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2087 
2088 	/* set HW LRO mode & the max aggregation count for rx packets */
2089 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2090 
2091 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
2092 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2093 
2094 	/* enable HW LRO */
2095 	lro_ctrl_dw0 |= MTK_LRO_EN;
2096 
2097 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2098 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2099 
2100 	return 0;
2101 }
2102 
2103 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2104 {
2105 	int i;
2106 	u32 val;
2107 
2108 	/* relinquish lro rings, flush aggregated packets */
2109 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2110 
2111 	/* wait for relinquishments done */
2112 	for (i = 0; i < 10; i++) {
2113 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2114 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2115 			msleep(20);
2116 			continue;
2117 		}
2118 		break;
2119 	}
2120 
2121 	/* invalidate lro rings */
2122 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2123 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2124 
2125 	/* disable HW LRO */
2126 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2127 }
2128 
2129 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2130 {
2131 	u32 reg_val;
2132 
2133 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2134 
2135 	/* invalidate the IP setting */
2136 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2137 
2138 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2139 
2140 	/* validate the IP setting */
2141 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2142 }
2143 
2144 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2145 {
2146 	u32 reg_val;
2147 
2148 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2149 
2150 	/* invalidate the IP setting */
2151 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2152 
2153 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2154 }
2155 
2156 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2157 {
2158 	int cnt = 0;
2159 	int i;
2160 
2161 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2162 		if (mac->hwlro_ip[i])
2163 			cnt++;
2164 	}
2165 
2166 	return cnt;
2167 }
2168 
2169 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2170 				struct ethtool_rxnfc *cmd)
2171 {
2172 	struct ethtool_rx_flow_spec *fsp =
2173 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2174 	struct mtk_mac *mac = netdev_priv(dev);
2175 	struct mtk_eth *eth = mac->hw;
2176 	int hwlro_idx;
2177 
2178 	if ((fsp->flow_type != TCP_V4_FLOW) ||
2179 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2180 	    (fsp->location > 1))
2181 		return -EINVAL;
2182 
2183 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2184 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2185 
2186 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2187 
2188 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2189 
2190 	return 0;
2191 }
2192 
2193 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2194 				struct ethtool_rxnfc *cmd)
2195 {
2196 	struct ethtool_rx_flow_spec *fsp =
2197 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2198 	struct mtk_mac *mac = netdev_priv(dev);
2199 	struct mtk_eth *eth = mac->hw;
2200 	int hwlro_idx;
2201 
2202 	if (fsp->location > 1)
2203 		return -EINVAL;
2204 
2205 	mac->hwlro_ip[fsp->location] = 0;
2206 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2207 
2208 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2209 
2210 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2211 
2212 	return 0;
2213 }
2214 
2215 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2216 {
2217 	struct mtk_mac *mac = netdev_priv(dev);
2218 	struct mtk_eth *eth = mac->hw;
2219 	int i, hwlro_idx;
2220 
2221 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2222 		mac->hwlro_ip[i] = 0;
2223 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2224 
2225 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2226 	}
2227 
2228 	mac->hwlro_ip_cnt = 0;
2229 }
2230 
2231 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2232 				    struct ethtool_rxnfc *cmd)
2233 {
2234 	struct mtk_mac *mac = netdev_priv(dev);
2235 	struct ethtool_rx_flow_spec *fsp =
2236 		(struct ethtool_rx_flow_spec *)&cmd->fs;
2237 
2238 	if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2239 		return -EINVAL;
2240 
2241 	/* only tcp dst ipv4 is meaningful, others are meaningless */
2242 	fsp->flow_type = TCP_V4_FLOW;
2243 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2244 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2245 
2246 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
2247 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2248 	fsp->h_u.tcp_ip4_spec.psrc = 0;
2249 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2250 	fsp->h_u.tcp_ip4_spec.pdst = 0;
2251 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2252 	fsp->h_u.tcp_ip4_spec.tos = 0;
2253 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
2254 
2255 	return 0;
2256 }
2257 
2258 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2259 				  struct ethtool_rxnfc *cmd,
2260 				  u32 *rule_locs)
2261 {
2262 	struct mtk_mac *mac = netdev_priv(dev);
2263 	int cnt = 0;
2264 	int i;
2265 
2266 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2267 		if (mac->hwlro_ip[i]) {
2268 			rule_locs[cnt] = i;
2269 			cnt++;
2270 		}
2271 	}
2272 
2273 	cmd->rule_cnt = cnt;
2274 
2275 	return 0;
2276 }
2277 
2278 static netdev_features_t mtk_fix_features(struct net_device *dev,
2279 					  netdev_features_t features)
2280 {
2281 	if (!(features & NETIF_F_LRO)) {
2282 		struct mtk_mac *mac = netdev_priv(dev);
2283 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2284 
2285 		if (ip_cnt) {
2286 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2287 
2288 			features |= NETIF_F_LRO;
2289 		}
2290 	}
2291 
2292 	return features;
2293 }
2294 
2295 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2296 {
2297 	int err = 0;
2298 
2299 	if (!((dev->features ^ features) & NETIF_F_LRO))
2300 		return 0;
2301 
2302 	if (!(features & NETIF_F_LRO))
2303 		mtk_hwlro_netdev_disable(dev);
2304 
2305 	return err;
2306 }
2307 
2308 /* wait for DMA to finish whatever it is doing before we start using it again */
2309 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2310 {
2311 	unsigned int reg;
2312 	int ret;
2313 	u32 val;
2314 
2315 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2316 		reg = eth->soc->reg_map->qdma.glo_cfg;
2317 	else
2318 		reg = eth->soc->reg_map->pdma.glo_cfg;
2319 
2320 	ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2321 					!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2322 					5, MTK_DMA_BUSY_TIMEOUT_US);
2323 	if (ret)
2324 		dev_err(eth->dev, "DMA init timeout\n");
2325 
2326 	return ret;
2327 }
2328 
2329 static int mtk_dma_init(struct mtk_eth *eth)
2330 {
2331 	int err;
2332 	u32 i;
2333 
2334 	if (mtk_dma_busy_wait(eth))
2335 		return -EBUSY;
2336 
2337 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2338 		/* QDMA needs scratch memory for internal reordering of the
2339 		 * descriptors
2340 		 */
2341 		err = mtk_init_fq_dma(eth);
2342 		if (err)
2343 			return err;
2344 	}
2345 
2346 	err = mtk_tx_alloc(eth);
2347 	if (err)
2348 		return err;
2349 
2350 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2351 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2352 		if (err)
2353 			return err;
2354 	}
2355 
2356 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2357 	if (err)
2358 		return err;
2359 
2360 	if (eth->hwlro) {
2361 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2362 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2363 			if (err)
2364 				return err;
2365 		}
2366 		err = mtk_hwlro_rx_init(eth);
2367 		if (err)
2368 			return err;
2369 	}
2370 
2371 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2372 		/* Enable random early drop and set drop threshold
2373 		 * automatically
2374 		 */
2375 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2376 			FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2377 		mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2378 	}
2379 
2380 	return 0;
2381 }
2382 
2383 static void mtk_dma_free(struct mtk_eth *eth)
2384 {
2385 	const struct mtk_soc_data *soc = eth->soc;
2386 	int i;
2387 
2388 	for (i = 0; i < MTK_MAC_COUNT; i++)
2389 		if (eth->netdev[i])
2390 			netdev_reset_queue(eth->netdev[i]);
2391 	if (eth->scratch_ring) {
2392 		dma_free_coherent(eth->dma_dev,
2393 				  MTK_DMA_SIZE * soc->txrx.txd_size,
2394 				  eth->scratch_ring, eth->phy_scratch_ring);
2395 		eth->scratch_ring = NULL;
2396 		eth->phy_scratch_ring = 0;
2397 	}
2398 	mtk_tx_clean(eth);
2399 	mtk_rx_clean(eth, &eth->rx_ring[0]);
2400 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
2401 
2402 	if (eth->hwlro) {
2403 		mtk_hwlro_rx_uninit(eth);
2404 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2405 			mtk_rx_clean(eth, &eth->rx_ring[i]);
2406 	}
2407 
2408 	kfree(eth->scratch_head);
2409 }
2410 
2411 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2412 {
2413 	struct mtk_mac *mac = netdev_priv(dev);
2414 	struct mtk_eth *eth = mac->hw;
2415 
2416 	eth->netdev[mac->id]->stats.tx_errors++;
2417 	netif_err(eth, tx_err, dev,
2418 		  "transmit timed out\n");
2419 	schedule_work(&eth->pending_work);
2420 }
2421 
2422 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2423 {
2424 	struct mtk_eth *eth = _eth;
2425 
2426 	eth->rx_events++;
2427 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
2428 		__napi_schedule(&eth->rx_napi);
2429 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2430 	}
2431 
2432 	return IRQ_HANDLED;
2433 }
2434 
2435 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2436 {
2437 	struct mtk_eth *eth = _eth;
2438 
2439 	eth->tx_events++;
2440 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
2441 		__napi_schedule(&eth->tx_napi);
2442 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2443 	}
2444 
2445 	return IRQ_HANDLED;
2446 }
2447 
2448 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2449 {
2450 	struct mtk_eth *eth = _eth;
2451 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2452 
2453 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
2454 	    eth->soc->txrx.rx_irq_done_mask) {
2455 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
2456 		    eth->soc->txrx.rx_irq_done_mask)
2457 			mtk_handle_irq_rx(irq, _eth);
2458 	}
2459 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
2460 		if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2461 			mtk_handle_irq_tx(irq, _eth);
2462 	}
2463 
2464 	return IRQ_HANDLED;
2465 }
2466 
2467 #ifdef CONFIG_NET_POLL_CONTROLLER
2468 static void mtk_poll_controller(struct net_device *dev)
2469 {
2470 	struct mtk_mac *mac = netdev_priv(dev);
2471 	struct mtk_eth *eth = mac->hw;
2472 
2473 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2474 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2475 	mtk_handle_irq_rx(eth->irq[2], dev);
2476 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2477 	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2478 }
2479 #endif
2480 
2481 static int mtk_start_dma(struct mtk_eth *eth)
2482 {
2483 	u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2484 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2485 	int err;
2486 
2487 	err = mtk_dma_init(eth);
2488 	if (err) {
2489 		mtk_dma_free(eth);
2490 		return err;
2491 	}
2492 
2493 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2494 		val = mtk_r32(eth, reg_map->qdma.glo_cfg);
2495 		val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2496 		       MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
2497 		       MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
2498 
2499 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2500 			val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
2501 			       MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
2502 			       MTK_CHK_DDONE_EN;
2503 		else
2504 			val |= MTK_RX_BT_32DWORDS;
2505 		mtk_w32(eth, val, reg_map->qdma.glo_cfg);
2506 
2507 		mtk_w32(eth,
2508 			MTK_RX_DMA_EN | rx_2b_offset |
2509 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2510 			reg_map->pdma.glo_cfg);
2511 	} else {
2512 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2513 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2514 			reg_map->pdma.glo_cfg);
2515 	}
2516 
2517 	return 0;
2518 }
2519 
2520 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2521 {
2522 	int i;
2523 
2524 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2525 		return;
2526 
2527 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2528 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2529 
2530 		/* default setup the forward port to send frame to PDMA */
2531 		val &= ~0xffff;
2532 
2533 		/* Enable RX checksum */
2534 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2535 
2536 		val |= config;
2537 
2538 		if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
2539 			val |= MTK_GDMA_SPECIAL_TAG;
2540 
2541 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2542 	}
2543 	/* Reset and enable PSE */
2544 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2545 	mtk_w32(eth, 0, MTK_RST_GL);
2546 }
2547 
2548 static int mtk_open(struct net_device *dev)
2549 {
2550 	struct mtk_mac *mac = netdev_priv(dev);
2551 	struct mtk_eth *eth = mac->hw;
2552 	int err;
2553 
2554 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2555 	if (err) {
2556 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2557 			   err);
2558 		return err;
2559 	}
2560 
2561 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
2562 	if (!refcount_read(&eth->dma_refcnt)) {
2563 		u32 gdm_config = MTK_GDMA_TO_PDMA;
2564 
2565 		err = mtk_start_dma(eth);
2566 		if (err)
2567 			return err;
2568 
2569 		if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
2570 			gdm_config = MTK_GDMA_TO_PPE;
2571 
2572 		mtk_gdm_config(eth, gdm_config);
2573 
2574 		napi_enable(&eth->tx_napi);
2575 		napi_enable(&eth->rx_napi);
2576 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2577 		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2578 		refcount_set(&eth->dma_refcnt, 1);
2579 	}
2580 	else
2581 		refcount_inc(&eth->dma_refcnt);
2582 
2583 	phylink_start(mac->phylink);
2584 	netif_start_queue(dev);
2585 	return 0;
2586 }
2587 
2588 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2589 {
2590 	u32 val;
2591 	int i;
2592 
2593 	/* stop the dma engine */
2594 	spin_lock_bh(&eth->page_lock);
2595 	val = mtk_r32(eth, glo_cfg);
2596 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2597 		glo_cfg);
2598 	spin_unlock_bh(&eth->page_lock);
2599 
2600 	/* wait for dma stop */
2601 	for (i = 0; i < 10; i++) {
2602 		val = mtk_r32(eth, glo_cfg);
2603 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2604 			msleep(20);
2605 			continue;
2606 		}
2607 		break;
2608 	}
2609 }
2610 
2611 static int mtk_stop(struct net_device *dev)
2612 {
2613 	struct mtk_mac *mac = netdev_priv(dev);
2614 	struct mtk_eth *eth = mac->hw;
2615 
2616 	phylink_stop(mac->phylink);
2617 
2618 	netif_tx_disable(dev);
2619 
2620 	phylink_disconnect_phy(mac->phylink);
2621 
2622 	/* only shutdown DMA if this is the last user */
2623 	if (!refcount_dec_and_test(&eth->dma_refcnt))
2624 		return 0;
2625 
2626 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2627 
2628 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2629 	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2630 	napi_disable(&eth->tx_napi);
2631 	napi_disable(&eth->rx_napi);
2632 
2633 	cancel_work_sync(&eth->rx_dim.work);
2634 	cancel_work_sync(&eth->tx_dim.work);
2635 
2636 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2637 		mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
2638 	mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
2639 
2640 	mtk_dma_free(eth);
2641 
2642 	if (eth->soc->offload_version)
2643 		mtk_ppe_stop(eth->ppe);
2644 
2645 	return 0;
2646 }
2647 
2648 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2649 {
2650 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2651 			   reset_bits,
2652 			   reset_bits);
2653 
2654 	usleep_range(1000, 1100);
2655 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2656 			   reset_bits,
2657 			   ~reset_bits);
2658 	mdelay(10);
2659 }
2660 
2661 static void mtk_clk_disable(struct mtk_eth *eth)
2662 {
2663 	int clk;
2664 
2665 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2666 		clk_disable_unprepare(eth->clks[clk]);
2667 }
2668 
2669 static int mtk_clk_enable(struct mtk_eth *eth)
2670 {
2671 	int clk, ret;
2672 
2673 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2674 		ret = clk_prepare_enable(eth->clks[clk]);
2675 		if (ret)
2676 			goto err_disable_clks;
2677 	}
2678 
2679 	return 0;
2680 
2681 err_disable_clks:
2682 	while (--clk >= 0)
2683 		clk_disable_unprepare(eth->clks[clk]);
2684 
2685 	return ret;
2686 }
2687 
2688 static void mtk_dim_rx(struct work_struct *work)
2689 {
2690 	struct dim *dim = container_of(work, struct dim, work);
2691 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
2692 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2693 	struct dim_cq_moder cur_profile;
2694 	u32 val, cur;
2695 
2696 	cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
2697 						dim->profile_ix);
2698 	spin_lock_bh(&eth->dim_lock);
2699 
2700 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
2701 	val &= MTK_PDMA_DELAY_TX_MASK;
2702 	val |= MTK_PDMA_DELAY_RX_EN;
2703 
2704 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2705 	val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
2706 
2707 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2708 	val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
2709 
2710 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
2711 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2712 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
2713 
2714 	spin_unlock_bh(&eth->dim_lock);
2715 
2716 	dim->state = DIM_START_MEASURE;
2717 }
2718 
2719 static void mtk_dim_tx(struct work_struct *work)
2720 {
2721 	struct dim *dim = container_of(work, struct dim, work);
2722 	struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
2723 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2724 	struct dim_cq_moder cur_profile;
2725 	u32 val, cur;
2726 
2727 	cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
2728 						dim->profile_ix);
2729 	spin_lock_bh(&eth->dim_lock);
2730 
2731 	val = mtk_r32(eth, reg_map->pdma.delay_irq);
2732 	val &= MTK_PDMA_DELAY_RX_MASK;
2733 	val |= MTK_PDMA_DELAY_TX_EN;
2734 
2735 	cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2736 	val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
2737 
2738 	cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2739 	val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
2740 
2741 	mtk_w32(eth, val, reg_map->pdma.delay_irq);
2742 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2743 		mtk_w32(eth, val, reg_map->qdma.delay_irq);
2744 
2745 	spin_unlock_bh(&eth->dim_lock);
2746 
2747 	dim->state = DIM_START_MEASURE;
2748 }
2749 
2750 static int mtk_hw_init(struct mtk_eth *eth)
2751 {
2752 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
2753 		       ETHSYS_DMA_AG_MAP_PPE;
2754 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2755 	int i, val, ret;
2756 
2757 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2758 		return 0;
2759 
2760 	pm_runtime_enable(eth->dev);
2761 	pm_runtime_get_sync(eth->dev);
2762 
2763 	ret = mtk_clk_enable(eth);
2764 	if (ret)
2765 		goto err_disable_pm;
2766 
2767 	if (eth->ethsys)
2768 		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
2769 				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
2770 
2771 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2772 		ret = device_reset(eth->dev);
2773 		if (ret) {
2774 			dev_err(eth->dev, "MAC reset failed!\n");
2775 			goto err_disable_pm;
2776 		}
2777 
2778 		/* set interrupt delays based on current Net DIM sample */
2779 		mtk_dim_rx(&eth->rx_dim.work);
2780 		mtk_dim_tx(&eth->tx_dim.work);
2781 
2782 		/* disable delay and normal interrupt */
2783 		mtk_tx_irq_disable(eth, ~0);
2784 		mtk_rx_irq_disable(eth, ~0);
2785 
2786 		return 0;
2787 	}
2788 
2789 	val = RSTCTRL_FE | RSTCTRL_PPE;
2790 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2791 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
2792 
2793 		val |= RSTCTRL_ETH;
2794 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
2795 			val |= RSTCTRL_PPE1;
2796 	}
2797 
2798 	ethsys_reset(eth, val);
2799 
2800 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2801 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
2802 			     0x3ffffff);
2803 
2804 		/* Set FE to PDMAv2 if necessary */
2805 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
2806 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
2807 	}
2808 
2809 	if (eth->pctl) {
2810 		/* Set GE2 driving and slew rate */
2811 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2812 
2813 		/* set GE2 TDSEL */
2814 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2815 
2816 		/* set GE2 TUNE */
2817 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2818 	}
2819 
2820 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
2821 	 * up with the more appropriate value when mtk_mac_config call is being
2822 	 * invoked.
2823 	 */
2824 	for (i = 0; i < MTK_MAC_COUNT; i++)
2825 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2826 
2827 	/* Indicates CDM to parse the MTK special tag from CPU
2828 	 * which also is working out for untag packets.
2829 	 */
2830 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2831 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2832 
2833 	/* Enable RX VLan Offloading */
2834 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2835 
2836 	/* set interrupt delays based on current Net DIM sample */
2837 	mtk_dim_rx(&eth->rx_dim.work);
2838 	mtk_dim_tx(&eth->tx_dim.work);
2839 
2840 	/* disable delay and normal interrupt */
2841 	mtk_tx_irq_disable(eth, ~0);
2842 	mtk_rx_irq_disable(eth, ~0);
2843 
2844 	/* FE int grouping */
2845 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
2846 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
2847 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
2848 	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
2849 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2850 
2851 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2852 		/* PSE should not drop port8 and port9 packets */
2853 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
2854 
2855 		/* PSE Free Queue Flow Control  */
2856 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
2857 
2858 		/* PSE config input queue threshold */
2859 		mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
2860 		mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
2861 		mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
2862 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
2863 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
2864 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
2865 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
2866 		mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
2867 
2868 		/* PSE config output queue threshold */
2869 		mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
2870 		mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
2871 		mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
2872 		mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
2873 		mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
2874 		mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
2875 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
2876 		mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
2877 
2878 		/* GDM and CDM Threshold */
2879 		mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
2880 		mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
2881 		mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
2882 		mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
2883 		mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
2884 		mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
2885 	}
2886 
2887 	return 0;
2888 
2889 err_disable_pm:
2890 	pm_runtime_put_sync(eth->dev);
2891 	pm_runtime_disable(eth->dev);
2892 
2893 	return ret;
2894 }
2895 
2896 static int mtk_hw_deinit(struct mtk_eth *eth)
2897 {
2898 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2899 		return 0;
2900 
2901 	mtk_clk_disable(eth);
2902 
2903 	pm_runtime_put_sync(eth->dev);
2904 	pm_runtime_disable(eth->dev);
2905 
2906 	return 0;
2907 }
2908 
2909 static int __init mtk_init(struct net_device *dev)
2910 {
2911 	struct mtk_mac *mac = netdev_priv(dev);
2912 	struct mtk_eth *eth = mac->hw;
2913 	int ret;
2914 
2915 	ret = of_get_ethdev_address(mac->of_node, dev);
2916 	if (ret) {
2917 		/* If the mac address is invalid, use random mac address */
2918 		eth_hw_addr_random(dev);
2919 		dev_err(eth->dev, "generated random MAC address %pM\n",
2920 			dev->dev_addr);
2921 	}
2922 
2923 	return 0;
2924 }
2925 
2926 static void mtk_uninit(struct net_device *dev)
2927 {
2928 	struct mtk_mac *mac = netdev_priv(dev);
2929 	struct mtk_eth *eth = mac->hw;
2930 
2931 	phylink_disconnect_phy(mac->phylink);
2932 	mtk_tx_irq_disable(eth, ~0);
2933 	mtk_rx_irq_disable(eth, ~0);
2934 }
2935 
2936 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
2937 {
2938 	int length = new_mtu + MTK_RX_ETH_HLEN;
2939 	struct mtk_mac *mac = netdev_priv(dev);
2940 	struct mtk_eth *eth = mac->hw;
2941 	u32 mcr_cur, mcr_new;
2942 
2943 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2944 		mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
2945 		mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
2946 
2947 		if (length <= 1518)
2948 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
2949 		else if (length <= 1536)
2950 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
2951 		else if (length <= 1552)
2952 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
2953 		else
2954 			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
2955 
2956 		if (mcr_new != mcr_cur)
2957 			mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
2958 	}
2959 
2960 	dev->mtu = new_mtu;
2961 
2962 	return 0;
2963 }
2964 
2965 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2966 {
2967 	struct mtk_mac *mac = netdev_priv(dev);
2968 
2969 	switch (cmd) {
2970 	case SIOCGMIIPHY:
2971 	case SIOCGMIIREG:
2972 	case SIOCSMIIREG:
2973 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2974 	default:
2975 		break;
2976 	}
2977 
2978 	return -EOPNOTSUPP;
2979 }
2980 
2981 static void mtk_pending_work(struct work_struct *work)
2982 {
2983 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2984 	int err, i;
2985 	unsigned long restart = 0;
2986 
2987 	rtnl_lock();
2988 
2989 	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2990 
2991 	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2992 		cpu_relax();
2993 
2994 	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2995 	/* stop all devices to make sure that dma is properly shut down */
2996 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2997 		if (!eth->netdev[i])
2998 			continue;
2999 		mtk_stop(eth->netdev[i]);
3000 		__set_bit(i, &restart);
3001 	}
3002 	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
3003 
3004 	/* restart underlying hardware such as power, clock, pin mux
3005 	 * and the connected phy
3006 	 */
3007 	mtk_hw_deinit(eth);
3008 
3009 	if (eth->dev->pins)
3010 		pinctrl_select_state(eth->dev->pins->p,
3011 				     eth->dev->pins->default_state);
3012 	mtk_hw_init(eth);
3013 
3014 	/* restart DMA and enable IRQs */
3015 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3016 		if (!test_bit(i, &restart))
3017 			continue;
3018 		err = mtk_open(eth->netdev[i]);
3019 		if (err) {
3020 			netif_alert(eth, ifup, eth->netdev[i],
3021 			      "Driver up/down cycle failed, closing device.\n");
3022 			dev_close(eth->netdev[i]);
3023 		}
3024 	}
3025 
3026 	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
3027 
3028 	clear_bit_unlock(MTK_RESETTING, &eth->state);
3029 
3030 	rtnl_unlock();
3031 }
3032 
3033 static int mtk_free_dev(struct mtk_eth *eth)
3034 {
3035 	int i;
3036 
3037 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3038 		if (!eth->netdev[i])
3039 			continue;
3040 		free_netdev(eth->netdev[i]);
3041 	}
3042 
3043 	return 0;
3044 }
3045 
3046 static int mtk_unreg_dev(struct mtk_eth *eth)
3047 {
3048 	int i;
3049 
3050 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3051 		if (!eth->netdev[i])
3052 			continue;
3053 		unregister_netdev(eth->netdev[i]);
3054 	}
3055 
3056 	return 0;
3057 }
3058 
3059 static int mtk_cleanup(struct mtk_eth *eth)
3060 {
3061 	mtk_unreg_dev(eth);
3062 	mtk_free_dev(eth);
3063 	cancel_work_sync(&eth->pending_work);
3064 
3065 	return 0;
3066 }
3067 
3068 static int mtk_get_link_ksettings(struct net_device *ndev,
3069 				  struct ethtool_link_ksettings *cmd)
3070 {
3071 	struct mtk_mac *mac = netdev_priv(ndev);
3072 
3073 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3074 		return -EBUSY;
3075 
3076 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3077 }
3078 
3079 static int mtk_set_link_ksettings(struct net_device *ndev,
3080 				  const struct ethtool_link_ksettings *cmd)
3081 {
3082 	struct mtk_mac *mac = netdev_priv(ndev);
3083 
3084 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3085 		return -EBUSY;
3086 
3087 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3088 }
3089 
3090 static void mtk_get_drvinfo(struct net_device *dev,
3091 			    struct ethtool_drvinfo *info)
3092 {
3093 	struct mtk_mac *mac = netdev_priv(dev);
3094 
3095 	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3096 	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3097 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3098 }
3099 
3100 static u32 mtk_get_msglevel(struct net_device *dev)
3101 {
3102 	struct mtk_mac *mac = netdev_priv(dev);
3103 
3104 	return mac->hw->msg_enable;
3105 }
3106 
3107 static void mtk_set_msglevel(struct net_device *dev, u32 value)
3108 {
3109 	struct mtk_mac *mac = netdev_priv(dev);
3110 
3111 	mac->hw->msg_enable = value;
3112 }
3113 
3114 static int mtk_nway_reset(struct net_device *dev)
3115 {
3116 	struct mtk_mac *mac = netdev_priv(dev);
3117 
3118 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3119 		return -EBUSY;
3120 
3121 	if (!mac->phylink)
3122 		return -ENOTSUPP;
3123 
3124 	return phylink_ethtool_nway_reset(mac->phylink);
3125 }
3126 
3127 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3128 {
3129 	int i;
3130 
3131 	switch (stringset) {
3132 	case ETH_SS_STATS:
3133 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3134 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3135 			data += ETH_GSTRING_LEN;
3136 		}
3137 		break;
3138 	}
3139 }
3140 
3141 static int mtk_get_sset_count(struct net_device *dev, int sset)
3142 {
3143 	switch (sset) {
3144 	case ETH_SS_STATS:
3145 		return ARRAY_SIZE(mtk_ethtool_stats);
3146 	default:
3147 		return -EOPNOTSUPP;
3148 	}
3149 }
3150 
3151 static void mtk_get_ethtool_stats(struct net_device *dev,
3152 				  struct ethtool_stats *stats, u64 *data)
3153 {
3154 	struct mtk_mac *mac = netdev_priv(dev);
3155 	struct mtk_hw_stats *hwstats = mac->hw_stats;
3156 	u64 *data_src, *data_dst;
3157 	unsigned int start;
3158 	int i;
3159 
3160 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3161 		return;
3162 
3163 	if (netif_running(dev) && netif_device_present(dev)) {
3164 		if (spin_trylock_bh(&hwstats->stats_lock)) {
3165 			mtk_stats_update_mac(mac);
3166 			spin_unlock_bh(&hwstats->stats_lock);
3167 		}
3168 	}
3169 
3170 	data_src = (u64 *)hwstats;
3171 
3172 	do {
3173 		data_dst = data;
3174 		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3175 
3176 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3177 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3178 	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3179 }
3180 
3181 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3182 			 u32 *rule_locs)
3183 {
3184 	int ret = -EOPNOTSUPP;
3185 
3186 	switch (cmd->cmd) {
3187 	case ETHTOOL_GRXRINGS:
3188 		if (dev->hw_features & NETIF_F_LRO) {
3189 			cmd->data = MTK_MAX_RX_RING_NUM;
3190 			ret = 0;
3191 		}
3192 		break;
3193 	case ETHTOOL_GRXCLSRLCNT:
3194 		if (dev->hw_features & NETIF_F_LRO) {
3195 			struct mtk_mac *mac = netdev_priv(dev);
3196 
3197 			cmd->rule_cnt = mac->hwlro_ip_cnt;
3198 			ret = 0;
3199 		}
3200 		break;
3201 	case ETHTOOL_GRXCLSRULE:
3202 		if (dev->hw_features & NETIF_F_LRO)
3203 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3204 		break;
3205 	case ETHTOOL_GRXCLSRLALL:
3206 		if (dev->hw_features & NETIF_F_LRO)
3207 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
3208 						     rule_locs);
3209 		break;
3210 	default:
3211 		break;
3212 	}
3213 
3214 	return ret;
3215 }
3216 
3217 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3218 {
3219 	int ret = -EOPNOTSUPP;
3220 
3221 	switch (cmd->cmd) {
3222 	case ETHTOOL_SRXCLSRLINS:
3223 		if (dev->hw_features & NETIF_F_LRO)
3224 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
3225 		break;
3226 	case ETHTOOL_SRXCLSRLDEL:
3227 		if (dev->hw_features & NETIF_F_LRO)
3228 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
3229 		break;
3230 	default:
3231 		break;
3232 	}
3233 
3234 	return ret;
3235 }
3236 
3237 static const struct ethtool_ops mtk_ethtool_ops = {
3238 	.get_link_ksettings	= mtk_get_link_ksettings,
3239 	.set_link_ksettings	= mtk_set_link_ksettings,
3240 	.get_drvinfo		= mtk_get_drvinfo,
3241 	.get_msglevel		= mtk_get_msglevel,
3242 	.set_msglevel		= mtk_set_msglevel,
3243 	.nway_reset		= mtk_nway_reset,
3244 	.get_link		= ethtool_op_get_link,
3245 	.get_strings		= mtk_get_strings,
3246 	.get_sset_count		= mtk_get_sset_count,
3247 	.get_ethtool_stats	= mtk_get_ethtool_stats,
3248 	.get_rxnfc		= mtk_get_rxnfc,
3249 	.set_rxnfc              = mtk_set_rxnfc,
3250 };
3251 
3252 static const struct net_device_ops mtk_netdev_ops = {
3253 	.ndo_init		= mtk_init,
3254 	.ndo_uninit		= mtk_uninit,
3255 	.ndo_open		= mtk_open,
3256 	.ndo_stop		= mtk_stop,
3257 	.ndo_start_xmit		= mtk_start_xmit,
3258 	.ndo_set_mac_address	= mtk_set_mac_address,
3259 	.ndo_validate_addr	= eth_validate_addr,
3260 	.ndo_eth_ioctl		= mtk_do_ioctl,
3261 	.ndo_change_mtu		= mtk_change_mtu,
3262 	.ndo_tx_timeout		= mtk_tx_timeout,
3263 	.ndo_get_stats64        = mtk_get_stats64,
3264 	.ndo_fix_features	= mtk_fix_features,
3265 	.ndo_set_features	= mtk_set_features,
3266 #ifdef CONFIG_NET_POLL_CONTROLLER
3267 	.ndo_poll_controller	= mtk_poll_controller,
3268 #endif
3269 	.ndo_setup_tc		= mtk_eth_setup_tc,
3270 };
3271 
3272 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3273 {
3274 	const __be32 *_id = of_get_property(np, "reg", NULL);
3275 	phy_interface_t phy_mode;
3276 	struct phylink *phylink;
3277 	struct mtk_mac *mac;
3278 	int id, err;
3279 
3280 	if (!_id) {
3281 		dev_err(eth->dev, "missing mac id\n");
3282 		return -EINVAL;
3283 	}
3284 
3285 	id = be32_to_cpup(_id);
3286 	if (id >= MTK_MAC_COUNT) {
3287 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
3288 		return -EINVAL;
3289 	}
3290 
3291 	if (eth->netdev[id]) {
3292 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3293 		return -EINVAL;
3294 	}
3295 
3296 	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3297 	if (!eth->netdev[id]) {
3298 		dev_err(eth->dev, "alloc_etherdev failed\n");
3299 		return -ENOMEM;
3300 	}
3301 	mac = netdev_priv(eth->netdev[id]);
3302 	eth->mac[id] = mac;
3303 	mac->id = id;
3304 	mac->hw = eth;
3305 	mac->of_node = np;
3306 
3307 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3308 	mac->hwlro_ip_cnt = 0;
3309 
3310 	mac->hw_stats = devm_kzalloc(eth->dev,
3311 				     sizeof(*mac->hw_stats),
3312 				     GFP_KERNEL);
3313 	if (!mac->hw_stats) {
3314 		dev_err(eth->dev, "failed to allocate counter memory\n");
3315 		err = -ENOMEM;
3316 		goto free_netdev;
3317 	}
3318 	spin_lock_init(&mac->hw_stats->stats_lock);
3319 	u64_stats_init(&mac->hw_stats->syncp);
3320 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3321 
3322 	/* phylink create */
3323 	err = of_get_phy_mode(np, &phy_mode);
3324 	if (err) {
3325 		dev_err(eth->dev, "incorrect phy-mode\n");
3326 		goto free_netdev;
3327 	}
3328 
3329 	/* mac config is not set */
3330 	mac->interface = PHY_INTERFACE_MODE_NA;
3331 	mac->speed = SPEED_UNKNOWN;
3332 
3333 	mac->phylink_config.dev = &eth->netdev[id]->dev;
3334 	mac->phylink_config.type = PHYLINK_NETDEV;
3335 	/* This driver makes use of state->speed in mac_config */
3336 	mac->phylink_config.legacy_pre_march2020 = true;
3337 	mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
3338 		MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
3339 
3340 	__set_bit(PHY_INTERFACE_MODE_MII,
3341 		  mac->phylink_config.supported_interfaces);
3342 	__set_bit(PHY_INTERFACE_MODE_GMII,
3343 		  mac->phylink_config.supported_interfaces);
3344 
3345 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
3346 		phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
3347 
3348 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
3349 		__set_bit(PHY_INTERFACE_MODE_TRGMII,
3350 			  mac->phylink_config.supported_interfaces);
3351 
3352 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
3353 		__set_bit(PHY_INTERFACE_MODE_SGMII,
3354 			  mac->phylink_config.supported_interfaces);
3355 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
3356 			  mac->phylink_config.supported_interfaces);
3357 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
3358 			  mac->phylink_config.supported_interfaces);
3359 	}
3360 
3361 	phylink = phylink_create(&mac->phylink_config,
3362 				 of_fwnode_handle(mac->of_node),
3363 				 phy_mode, &mtk_phylink_ops);
3364 	if (IS_ERR(phylink)) {
3365 		err = PTR_ERR(phylink);
3366 		goto free_netdev;
3367 	}
3368 
3369 	mac->phylink = phylink;
3370 
3371 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3372 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
3373 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3374 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
3375 
3376 	eth->netdev[id]->hw_features = eth->soc->hw_features;
3377 	if (eth->hwlro)
3378 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
3379 
3380 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
3381 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3382 	eth->netdev[id]->features |= eth->soc->hw_features;
3383 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3384 
3385 	eth->netdev[id]->irq = eth->irq[0];
3386 	eth->netdev[id]->dev.of_node = np;
3387 
3388 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3389 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
3390 	else
3391 		eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
3392 
3393 	return 0;
3394 
3395 free_netdev:
3396 	free_netdev(eth->netdev[id]);
3397 	return err;
3398 }
3399 
3400 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
3401 {
3402 	struct net_device *dev, *tmp;
3403 	LIST_HEAD(dev_list);
3404 	int i;
3405 
3406 	rtnl_lock();
3407 
3408 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3409 		dev = eth->netdev[i];
3410 
3411 		if (!dev || !(dev->flags & IFF_UP))
3412 			continue;
3413 
3414 		list_add_tail(&dev->close_list, &dev_list);
3415 	}
3416 
3417 	dev_close_many(&dev_list, false);
3418 
3419 	eth->dma_dev = dma_dev;
3420 
3421 	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
3422 		list_del_init(&dev->close_list);
3423 		dev_open(dev, NULL);
3424 	}
3425 
3426 	rtnl_unlock();
3427 }
3428 
3429 static int mtk_probe(struct platform_device *pdev)
3430 {
3431 	struct device_node *mac_np;
3432 	struct mtk_eth *eth;
3433 	int err, i;
3434 
3435 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3436 	if (!eth)
3437 		return -ENOMEM;
3438 
3439 	eth->soc = of_device_get_match_data(&pdev->dev);
3440 
3441 	eth->dev = &pdev->dev;
3442 	eth->dma_dev = &pdev->dev;
3443 	eth->base = devm_platform_ioremap_resource(pdev, 0);
3444 	if (IS_ERR(eth->base))
3445 		return PTR_ERR(eth->base);
3446 
3447 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3448 		eth->ip_align = NET_IP_ALIGN;
3449 
3450 	spin_lock_init(&eth->page_lock);
3451 	spin_lock_init(&eth->tx_irq_lock);
3452 	spin_lock_init(&eth->rx_irq_lock);
3453 	spin_lock_init(&eth->dim_lock);
3454 
3455 	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3456 	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
3457 
3458 	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3459 	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
3460 
3461 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3462 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3463 							      "mediatek,ethsys");
3464 		if (IS_ERR(eth->ethsys)) {
3465 			dev_err(&pdev->dev, "no ethsys regmap found\n");
3466 			return PTR_ERR(eth->ethsys);
3467 		}
3468 	}
3469 
3470 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3471 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3472 							     "mediatek,infracfg");
3473 		if (IS_ERR(eth->infra)) {
3474 			dev_err(&pdev->dev, "no infracfg regmap found\n");
3475 			return PTR_ERR(eth->infra);
3476 		}
3477 	}
3478 
3479 	if (of_dma_is_coherent(pdev->dev.of_node)) {
3480 		struct regmap *cci;
3481 
3482 		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3483 						      "cci-control-port");
3484 		/* enable CPU/bus coherency */
3485 		if (!IS_ERR(cci))
3486 			regmap_write(cci, 0, 3);
3487 	}
3488 
3489 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3490 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3491 					  GFP_KERNEL);
3492 		if (!eth->sgmii)
3493 			return -ENOMEM;
3494 
3495 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3496 				     eth->soc->ana_rgc3);
3497 
3498 		if (err)
3499 			return err;
3500 	}
3501 
3502 	if (eth->soc->required_pctl) {
3503 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3504 							    "mediatek,pctl");
3505 		if (IS_ERR(eth->pctl)) {
3506 			dev_err(&pdev->dev, "no pctl regmap found\n");
3507 			return PTR_ERR(eth->pctl);
3508 		}
3509 	}
3510 
3511 	for (i = 0;; i++) {
3512 		struct device_node *np = of_parse_phandle(pdev->dev.of_node,
3513 							  "mediatek,wed", i);
3514 		static const u32 wdma_regs[] = {
3515 			MTK_WDMA0_BASE,
3516 			MTK_WDMA1_BASE
3517 		};
3518 		void __iomem *wdma;
3519 
3520 		if (!np || i >= ARRAY_SIZE(wdma_regs))
3521 			break;
3522 
3523 		wdma = eth->base + wdma_regs[i];
3524 		mtk_wed_add_hw(np, eth, wdma, i);
3525 	}
3526 
3527 	for (i = 0; i < 3; i++) {
3528 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3529 			eth->irq[i] = eth->irq[0];
3530 		else
3531 			eth->irq[i] = platform_get_irq(pdev, i);
3532 		if (eth->irq[i] < 0) {
3533 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3534 			return -ENXIO;
3535 		}
3536 	}
3537 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3538 		eth->clks[i] = devm_clk_get(eth->dev,
3539 					    mtk_clks_source_name[i]);
3540 		if (IS_ERR(eth->clks[i])) {
3541 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3542 				return -EPROBE_DEFER;
3543 			if (eth->soc->required_clks & BIT(i)) {
3544 				dev_err(&pdev->dev, "clock %s not found\n",
3545 					mtk_clks_source_name[i]);
3546 				return -EINVAL;
3547 			}
3548 			eth->clks[i] = NULL;
3549 		}
3550 	}
3551 
3552 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3553 	INIT_WORK(&eth->pending_work, mtk_pending_work);
3554 
3555 	err = mtk_hw_init(eth);
3556 	if (err)
3557 		return err;
3558 
3559 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3560 
3561 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
3562 		if (!of_device_is_compatible(mac_np,
3563 					     "mediatek,eth-mac"))
3564 			continue;
3565 
3566 		if (!of_device_is_available(mac_np))
3567 			continue;
3568 
3569 		err = mtk_add_mac(eth, mac_np);
3570 		if (err) {
3571 			of_node_put(mac_np);
3572 			goto err_deinit_hw;
3573 		}
3574 	}
3575 
3576 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3577 		err = devm_request_irq(eth->dev, eth->irq[0],
3578 				       mtk_handle_irq, 0,
3579 				       dev_name(eth->dev), eth);
3580 	} else {
3581 		err = devm_request_irq(eth->dev, eth->irq[1],
3582 				       mtk_handle_irq_tx, 0,
3583 				       dev_name(eth->dev), eth);
3584 		if (err)
3585 			goto err_free_dev;
3586 
3587 		err = devm_request_irq(eth->dev, eth->irq[2],
3588 				       mtk_handle_irq_rx, 0,
3589 				       dev_name(eth->dev), eth);
3590 	}
3591 	if (err)
3592 		goto err_free_dev;
3593 
3594 	/* No MT7628/88 support yet */
3595 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3596 		err = mtk_mdio_init(eth);
3597 		if (err)
3598 			goto err_free_dev;
3599 	}
3600 
3601 	if (eth->soc->offload_version) {
3602 		eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
3603 		if (!eth->ppe) {
3604 			err = -ENOMEM;
3605 			goto err_free_dev;
3606 		}
3607 
3608 		err = mtk_eth_offload_init(eth);
3609 		if (err)
3610 			goto err_free_dev;
3611 	}
3612 
3613 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3614 		if (!eth->netdev[i])
3615 			continue;
3616 
3617 		err = register_netdev(eth->netdev[i]);
3618 		if (err) {
3619 			dev_err(eth->dev, "error bringing up device\n");
3620 			goto err_deinit_mdio;
3621 		} else
3622 			netif_info(eth, probe, eth->netdev[i],
3623 				   "mediatek frame engine at 0x%08lx, irq %d\n",
3624 				   eth->netdev[i]->base_addr, eth->irq[0]);
3625 	}
3626 
3627 	/* we run 2 devices on the same DMA ring so we need a dummy device
3628 	 * for NAPI to work
3629 	 */
3630 	init_dummy_netdev(&eth->dummy_dev);
3631 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3632 		       NAPI_POLL_WEIGHT);
3633 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
3634 		       NAPI_POLL_WEIGHT);
3635 
3636 	platform_set_drvdata(pdev, eth);
3637 
3638 	return 0;
3639 
3640 err_deinit_mdio:
3641 	mtk_mdio_cleanup(eth);
3642 err_free_dev:
3643 	mtk_free_dev(eth);
3644 err_deinit_hw:
3645 	mtk_hw_deinit(eth);
3646 
3647 	return err;
3648 }
3649 
3650 static int mtk_remove(struct platform_device *pdev)
3651 {
3652 	struct mtk_eth *eth = platform_get_drvdata(pdev);
3653 	struct mtk_mac *mac;
3654 	int i;
3655 
3656 	/* stop all devices to make sure that dma is properly shut down */
3657 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3658 		if (!eth->netdev[i])
3659 			continue;
3660 		mtk_stop(eth->netdev[i]);
3661 		mac = netdev_priv(eth->netdev[i]);
3662 		phylink_disconnect_phy(mac->phylink);
3663 	}
3664 
3665 	mtk_hw_deinit(eth);
3666 
3667 	netif_napi_del(&eth->tx_napi);
3668 	netif_napi_del(&eth->rx_napi);
3669 	mtk_cleanup(eth);
3670 	mtk_mdio_cleanup(eth);
3671 
3672 	return 0;
3673 }
3674 
3675 static const struct mtk_soc_data mt2701_data = {
3676 	.reg_map = &mtk_reg_map,
3677 	.caps = MT7623_CAPS | MTK_HWLRO,
3678 	.hw_features = MTK_HW_FEATURES,
3679 	.required_clks = MT7623_CLKS_BITMAP,
3680 	.required_pctl = true,
3681 	.txrx = {
3682 		.txd_size = sizeof(struct mtk_tx_dma),
3683 		.rxd_size = sizeof(struct mtk_rx_dma),
3684 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3685 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
3686 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3687 		.dma_len_offset = 16,
3688 	},
3689 };
3690 
3691 static const struct mtk_soc_data mt7621_data = {
3692 	.reg_map = &mtk_reg_map,
3693 	.caps = MT7621_CAPS,
3694 	.hw_features = MTK_HW_FEATURES,
3695 	.required_clks = MT7621_CLKS_BITMAP,
3696 	.required_pctl = false,
3697 	.offload_version = 2,
3698 	.txrx = {
3699 		.txd_size = sizeof(struct mtk_tx_dma),
3700 		.rxd_size = sizeof(struct mtk_rx_dma),
3701 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3702 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
3703 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3704 		.dma_len_offset = 16,
3705 	},
3706 };
3707 
3708 static const struct mtk_soc_data mt7622_data = {
3709 	.reg_map = &mtk_reg_map,
3710 	.ana_rgc3 = 0x2028,
3711 	.caps = MT7622_CAPS | MTK_HWLRO,
3712 	.hw_features = MTK_HW_FEATURES,
3713 	.required_clks = MT7622_CLKS_BITMAP,
3714 	.required_pctl = false,
3715 	.offload_version = 2,
3716 	.txrx = {
3717 		.txd_size = sizeof(struct mtk_tx_dma),
3718 		.rxd_size = sizeof(struct mtk_rx_dma),
3719 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3720 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
3721 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3722 		.dma_len_offset = 16,
3723 	},
3724 };
3725 
3726 static const struct mtk_soc_data mt7623_data = {
3727 	.reg_map = &mtk_reg_map,
3728 	.caps = MT7623_CAPS | MTK_HWLRO,
3729 	.hw_features = MTK_HW_FEATURES,
3730 	.required_clks = MT7623_CLKS_BITMAP,
3731 	.required_pctl = true,
3732 	.offload_version = 2,
3733 	.txrx = {
3734 		.txd_size = sizeof(struct mtk_tx_dma),
3735 		.rxd_size = sizeof(struct mtk_rx_dma),
3736 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3737 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
3738 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3739 		.dma_len_offset = 16,
3740 	},
3741 };
3742 
3743 static const struct mtk_soc_data mt7629_data = {
3744 	.reg_map = &mtk_reg_map,
3745 	.ana_rgc3 = 0x128,
3746 	.caps = MT7629_CAPS | MTK_HWLRO,
3747 	.hw_features = MTK_HW_FEATURES,
3748 	.required_clks = MT7629_CLKS_BITMAP,
3749 	.required_pctl = false,
3750 	.txrx = {
3751 		.txd_size = sizeof(struct mtk_tx_dma),
3752 		.rxd_size = sizeof(struct mtk_rx_dma),
3753 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3754 		.rx_dma_l4_valid = RX_DMA_L4_VALID,
3755 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3756 		.dma_len_offset = 16,
3757 	},
3758 };
3759 
3760 static const struct mtk_soc_data mt7986_data = {
3761 	.reg_map = &mt7986_reg_map,
3762 	.ana_rgc3 = 0x128,
3763 	.caps = MT7986_CAPS,
3764 	.required_clks = MT7986_CLKS_BITMAP,
3765 	.required_pctl = false,
3766 	.txrx = {
3767 		.txd_size = sizeof(struct mtk_tx_dma_v2),
3768 		.rxd_size = sizeof(struct mtk_rx_dma_v2),
3769 		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
3770 		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
3771 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
3772 		.dma_len_offset = 8,
3773 	},
3774 };
3775 
3776 static const struct mtk_soc_data rt5350_data = {
3777 	.reg_map = &mt7628_reg_map,
3778 	.caps = MT7628_CAPS,
3779 	.hw_features = MTK_HW_FEATURES_MT7628,
3780 	.required_clks = MT7628_CLKS_BITMAP,
3781 	.required_pctl = false,
3782 	.txrx = {
3783 		.txd_size = sizeof(struct mtk_tx_dma),
3784 		.rxd_size = sizeof(struct mtk_rx_dma),
3785 		.rx_irq_done_mask = MTK_RX_DONE_INT,
3786 		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
3787 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
3788 		.dma_len_offset = 16,
3789 	},
3790 };
3791 
3792 const struct of_device_id of_mtk_match[] = {
3793 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3794 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3795 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3796 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3797 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3798 	{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
3799 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3800 	{},
3801 };
3802 MODULE_DEVICE_TABLE(of, of_mtk_match);
3803 
3804 static struct platform_driver mtk_driver = {
3805 	.probe = mtk_probe,
3806 	.remove = mtk_remove,
3807 	.driver = {
3808 		.name = "mtk_soc_eth",
3809 		.of_match_table = of_mtk_match,
3810 	},
3811 };
3812 
3813 module_platform_driver(mtk_driver);
3814 
3815 MODULE_LICENSE("GPL");
3816 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3817 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
3818