1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
22 
23 #include "mtk_eth_soc.h"
24 
25 static int mtk_msg_level = -1;
26 module_param_named(msg_level, mtk_msg_level, int, 0);
27 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
28 
29 #define MTK_ETHTOOL_STAT(x) { #x, \
30 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
31 
32 /* strings used by ethtool */
33 static const struct mtk_ethtool_stats {
34 	char str[ETH_GSTRING_LEN];
35 	u32 offset;
36 } mtk_ethtool_stats[] = {
37 	MTK_ETHTOOL_STAT(tx_bytes),
38 	MTK_ETHTOOL_STAT(tx_packets),
39 	MTK_ETHTOOL_STAT(tx_skip),
40 	MTK_ETHTOOL_STAT(tx_collisions),
41 	MTK_ETHTOOL_STAT(rx_bytes),
42 	MTK_ETHTOOL_STAT(rx_packets),
43 	MTK_ETHTOOL_STAT(rx_overflow),
44 	MTK_ETHTOOL_STAT(rx_fcs_errors),
45 	MTK_ETHTOOL_STAT(rx_short_errors),
46 	MTK_ETHTOOL_STAT(rx_long_errors),
47 	MTK_ETHTOOL_STAT(rx_checksum_errors),
48 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
49 };
50 
51 static const char * const mtk_clks_source_name[] = {
52 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
53 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
54 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
55 	"sgmii_ck", "eth2pll",
56 };
57 
58 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
59 {
60 	__raw_writel(val, eth->base + reg);
61 }
62 
63 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
64 {
65 	return __raw_readl(eth->base + reg);
66 }
67 
68 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
69 {
70 	u32 val;
71 
72 	val = mtk_r32(eth, reg);
73 	val &= ~mask;
74 	val |= set;
75 	mtk_w32(eth, val, reg);
76 	return reg;
77 }
78 
79 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
80 {
81 	unsigned long t_start = jiffies;
82 
83 	while (1) {
84 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
85 			return 0;
86 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
87 			break;
88 		usleep_range(10, 20);
89 	}
90 
91 	dev_err(eth->dev, "mdio: MDIO timeout\n");
92 	return -1;
93 }
94 
95 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
96 			   u32 phy_register, u32 write_data)
97 {
98 	if (mtk_mdio_busy_wait(eth))
99 		return -1;
100 
101 	write_data &= 0xffff;
102 
103 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
104 		(phy_register << PHY_IAC_REG_SHIFT) |
105 		(phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
106 		MTK_PHY_IAC);
107 
108 	if (mtk_mdio_busy_wait(eth))
109 		return -1;
110 
111 	return 0;
112 }
113 
114 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
115 {
116 	u32 d;
117 
118 	if (mtk_mdio_busy_wait(eth))
119 		return 0xffff;
120 
121 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
122 		(phy_reg << PHY_IAC_REG_SHIFT) |
123 		(phy_addr << PHY_IAC_ADDR_SHIFT),
124 		MTK_PHY_IAC);
125 
126 	if (mtk_mdio_busy_wait(eth))
127 		return 0xffff;
128 
129 	d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
130 
131 	return d;
132 }
133 
134 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
135 			  int phy_reg, u16 val)
136 {
137 	struct mtk_eth *eth = bus->priv;
138 
139 	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
140 }
141 
142 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
143 {
144 	struct mtk_eth *eth = bus->priv;
145 
146 	return _mtk_mdio_read(eth, phy_addr, phy_reg);
147 }
148 
149 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
150 				     phy_interface_t interface)
151 {
152 	u32 val;
153 
154 	/* Check DDR memory type.
155 	 * Currently TRGMII mode with DDR2 memory is not supported.
156 	 */
157 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
158 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
159 	    val & SYSCFG_DRAM_TYPE_DDR2) {
160 		dev_err(eth->dev,
161 			"TRGMII mode with DDR2 memory is not supported!\n");
162 		return -EOPNOTSUPP;
163 	}
164 
165 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
166 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
167 
168 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
169 			   ETHSYS_TRGMII_MT7621_MASK, val);
170 
171 	return 0;
172 }
173 
174 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
175 {
176 	u32 val;
177 	int ret;
178 
179 	val = (speed == SPEED_1000) ?
180 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
181 	mtk_w32(eth, val, INTF_MODE);
182 
183 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
184 			   ETHSYS_TRGMII_CLK_SEL362_5,
185 			   ETHSYS_TRGMII_CLK_SEL362_5);
186 
187 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
188 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
189 	if (ret)
190 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
191 
192 	val = (speed == SPEED_1000) ?
193 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
194 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
195 
196 	val = (speed == SPEED_1000) ?
197 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
198 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
199 }
200 
201 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
202 			   const struct phylink_link_state *state)
203 {
204 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
205 					   phylink_config);
206 	struct mtk_eth *eth = mac->hw;
207 	u32 mcr_cur, mcr_new, sid, i;
208 	int val, ge_mode, err;
209 
210 	/* MT76x8 has no hardware settings between for the MAC */
211 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
212 	    mac->interface != state->interface) {
213 		/* Setup soc pin functions */
214 		switch (state->interface) {
215 		case PHY_INTERFACE_MODE_TRGMII:
216 			if (mac->id)
217 				goto err_phy;
218 			if (!MTK_HAS_CAPS(mac->hw->soc->caps,
219 					  MTK_GMAC1_TRGMII))
220 				goto err_phy;
221 			/* fall through */
222 		case PHY_INTERFACE_MODE_RGMII_TXID:
223 		case PHY_INTERFACE_MODE_RGMII_RXID:
224 		case PHY_INTERFACE_MODE_RGMII_ID:
225 		case PHY_INTERFACE_MODE_RGMII:
226 		case PHY_INTERFACE_MODE_MII:
227 		case PHY_INTERFACE_MODE_REVMII:
228 		case PHY_INTERFACE_MODE_RMII:
229 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
230 				err = mtk_gmac_rgmii_path_setup(eth, mac->id);
231 				if (err)
232 					goto init_err;
233 			}
234 			break;
235 		case PHY_INTERFACE_MODE_1000BASEX:
236 		case PHY_INTERFACE_MODE_2500BASEX:
237 		case PHY_INTERFACE_MODE_SGMII:
238 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
239 				err = mtk_gmac_sgmii_path_setup(eth, mac->id);
240 				if (err)
241 					goto init_err;
242 			}
243 			break;
244 		case PHY_INTERFACE_MODE_GMII:
245 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
246 				err = mtk_gmac_gephy_path_setup(eth, mac->id);
247 				if (err)
248 					goto init_err;
249 			}
250 			break;
251 		default:
252 			goto err_phy;
253 		}
254 
255 		/* Setup clock for 1st gmac */
256 		if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
257 		    !phy_interface_mode_is_8023z(state->interface) &&
258 		    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
259 			if (MTK_HAS_CAPS(mac->hw->soc->caps,
260 					 MTK_TRGMII_MT7621_CLK)) {
261 				if (mt7621_gmac0_rgmii_adjust(mac->hw,
262 							      state->interface))
263 					goto err_phy;
264 			} else {
265 				if (state->interface !=
266 				    PHY_INTERFACE_MODE_TRGMII)
267 					mtk_gmac0_rgmii_adjust(mac->hw,
268 							       state->speed);
269 
270 				/* mt7623_pad_clk_setup */
271 				for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
272 					mtk_w32(mac->hw,
273 						TD_DM_DRVP(8) | TD_DM_DRVN(8),
274 						TRGMII_TD_ODT(i));
275 
276 				/* Assert/release MT7623 RXC reset */
277 				mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
278 					TRGMII_RCK_CTRL);
279 				mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
280 			}
281 		}
282 
283 		ge_mode = 0;
284 		switch (state->interface) {
285 		case PHY_INTERFACE_MODE_MII:
286 		case PHY_INTERFACE_MODE_GMII:
287 			ge_mode = 1;
288 			break;
289 		case PHY_INTERFACE_MODE_REVMII:
290 			ge_mode = 2;
291 			break;
292 		case PHY_INTERFACE_MODE_RMII:
293 			if (mac->id)
294 				goto err_phy;
295 			ge_mode = 3;
296 			break;
297 		default:
298 			break;
299 		}
300 
301 		/* put the gmac into the right mode */
302 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
303 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
304 		val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
305 		regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
306 
307 		mac->interface = state->interface;
308 	}
309 
310 	/* SGMII */
311 	if (state->interface == PHY_INTERFACE_MODE_SGMII ||
312 	    phy_interface_mode_is_8023z(state->interface)) {
313 		/* The path GMAC to SGMII will be enabled once the SGMIISYS is
314 		 * being setup done.
315 		 */
316 		regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
317 
318 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
319 				   SYSCFG0_SGMII_MASK,
320 				   ~(u32)SYSCFG0_SGMII_MASK);
321 
322 		/* Decide how GMAC and SGMIISYS be mapped */
323 		sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
324 		       0 : mac->id;
325 
326 		/* Setup SGMIISYS with the determined property */
327 		if (state->interface != PHY_INTERFACE_MODE_SGMII)
328 			err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
329 							 state);
330 		else if (phylink_autoneg_inband(mode))
331 			err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
332 
333 		if (err)
334 			goto init_err;
335 
336 		regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
337 				   SYSCFG0_SGMII_MASK, val);
338 	} else if (phylink_autoneg_inband(mode)) {
339 		dev_err(eth->dev,
340 			"In-band mode not supported in non SGMII mode!\n");
341 		return;
342 	}
343 
344 	/* Setup gmac */
345 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
346 	mcr_new = mcr_cur;
347 	mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
348 		     MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
349 		     MAC_MCR_FORCE_RX_FC);
350 	mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
351 		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
352 
353 	switch (state->speed) {
354 	case SPEED_2500:
355 	case SPEED_1000:
356 		mcr_new |= MAC_MCR_SPEED_1000;
357 		break;
358 	case SPEED_100:
359 		mcr_new |= MAC_MCR_SPEED_100;
360 		break;
361 	}
362 	if (state->duplex == DUPLEX_FULL) {
363 		mcr_new |= MAC_MCR_FORCE_DPX;
364 		if (state->pause & MLO_PAUSE_TX)
365 			mcr_new |= MAC_MCR_FORCE_TX_FC;
366 		if (state->pause & MLO_PAUSE_RX)
367 			mcr_new |= MAC_MCR_FORCE_RX_FC;
368 	}
369 
370 	/* Only update control register when needed! */
371 	if (mcr_new != mcr_cur)
372 		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
373 
374 	return;
375 
376 err_phy:
377 	dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
378 		mac->id, phy_modes(state->interface));
379 	return;
380 
381 init_err:
382 	dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
383 		mac->id, phy_modes(state->interface), err);
384 }
385 
386 static void mtk_mac_pcs_get_state(struct phylink_config *config,
387 				  struct phylink_link_state *state)
388 {
389 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
390 					   phylink_config);
391 	u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
392 
393 	state->link = (pmsr & MAC_MSR_LINK);
394 	state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
395 
396 	switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
397 	case 0:
398 		state->speed = SPEED_10;
399 		break;
400 	case MAC_MSR_SPEED_100:
401 		state->speed = SPEED_100;
402 		break;
403 	case MAC_MSR_SPEED_1000:
404 		state->speed = SPEED_1000;
405 		break;
406 	default:
407 		state->speed = SPEED_UNKNOWN;
408 		break;
409 	}
410 
411 	state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
412 	if (pmsr & MAC_MSR_RX_FC)
413 		state->pause |= MLO_PAUSE_RX;
414 	if (pmsr & MAC_MSR_TX_FC)
415 		state->pause |= MLO_PAUSE_TX;
416 }
417 
418 static void mtk_mac_an_restart(struct phylink_config *config)
419 {
420 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
421 					   phylink_config);
422 
423 	mtk_sgmii_restart_an(mac->hw, mac->id);
424 }
425 
426 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
427 			      phy_interface_t interface)
428 {
429 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
430 					   phylink_config);
431 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
432 
433 	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
434 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
435 }
436 
437 static void mtk_mac_link_up(struct phylink_config *config,
438 			    struct phy_device *phy,
439 			    unsigned int mode, phy_interface_t interface,
440 			    int speed, int duplex, bool tx_pause, bool rx_pause)
441 {
442 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
443 					   phylink_config);
444 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
445 
446 	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
447 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
448 }
449 
450 static void mtk_validate(struct phylink_config *config,
451 			 unsigned long *supported,
452 			 struct phylink_link_state *state)
453 {
454 	struct mtk_mac *mac = container_of(config, struct mtk_mac,
455 					   phylink_config);
456 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
457 
458 	if (state->interface != PHY_INTERFACE_MODE_NA &&
459 	    state->interface != PHY_INTERFACE_MODE_MII &&
460 	    state->interface != PHY_INTERFACE_MODE_GMII &&
461 	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
462 	      phy_interface_mode_is_rgmii(state->interface)) &&
463 	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
464 	      !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
465 	    !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
466 	      (state->interface == PHY_INTERFACE_MODE_SGMII ||
467 	       phy_interface_mode_is_8023z(state->interface)))) {
468 		linkmode_zero(supported);
469 		return;
470 	}
471 
472 	phylink_set_port_modes(mask);
473 	phylink_set(mask, Autoneg);
474 
475 	switch (state->interface) {
476 	case PHY_INTERFACE_MODE_TRGMII:
477 		phylink_set(mask, 1000baseT_Full);
478 		break;
479 	case PHY_INTERFACE_MODE_1000BASEX:
480 	case PHY_INTERFACE_MODE_2500BASEX:
481 		phylink_set(mask, 1000baseX_Full);
482 		phylink_set(mask, 2500baseX_Full);
483 		break;
484 	case PHY_INTERFACE_MODE_GMII:
485 	case PHY_INTERFACE_MODE_RGMII:
486 	case PHY_INTERFACE_MODE_RGMII_ID:
487 	case PHY_INTERFACE_MODE_RGMII_RXID:
488 	case PHY_INTERFACE_MODE_RGMII_TXID:
489 		phylink_set(mask, 1000baseT_Half);
490 		/* fall through */
491 	case PHY_INTERFACE_MODE_SGMII:
492 		phylink_set(mask, 1000baseT_Full);
493 		phylink_set(mask, 1000baseX_Full);
494 		/* fall through */
495 	case PHY_INTERFACE_MODE_MII:
496 	case PHY_INTERFACE_MODE_RMII:
497 	case PHY_INTERFACE_MODE_REVMII:
498 	case PHY_INTERFACE_MODE_NA:
499 	default:
500 		phylink_set(mask, 10baseT_Half);
501 		phylink_set(mask, 10baseT_Full);
502 		phylink_set(mask, 100baseT_Half);
503 		phylink_set(mask, 100baseT_Full);
504 		break;
505 	}
506 
507 	if (state->interface == PHY_INTERFACE_MODE_NA) {
508 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
509 			phylink_set(mask, 1000baseT_Full);
510 			phylink_set(mask, 1000baseX_Full);
511 			phylink_set(mask, 2500baseX_Full);
512 		}
513 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
514 			phylink_set(mask, 1000baseT_Full);
515 			phylink_set(mask, 1000baseT_Half);
516 			phylink_set(mask, 1000baseX_Full);
517 		}
518 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
519 			phylink_set(mask, 1000baseT_Full);
520 			phylink_set(mask, 1000baseT_Half);
521 		}
522 	}
523 
524 	phylink_set(mask, Pause);
525 	phylink_set(mask, Asym_Pause);
526 
527 	linkmode_and(supported, supported, mask);
528 	linkmode_and(state->advertising, state->advertising, mask);
529 
530 	/* We can only operate at 2500BaseX or 1000BaseX. If requested
531 	 * to advertise both, only report advertising at 2500BaseX.
532 	 */
533 	phylink_helper_basex_speed(state);
534 }
535 
536 static const struct phylink_mac_ops mtk_phylink_ops = {
537 	.validate = mtk_validate,
538 	.mac_pcs_get_state = mtk_mac_pcs_get_state,
539 	.mac_an_restart = mtk_mac_an_restart,
540 	.mac_config = mtk_mac_config,
541 	.mac_link_down = mtk_mac_link_down,
542 	.mac_link_up = mtk_mac_link_up,
543 };
544 
545 static int mtk_mdio_init(struct mtk_eth *eth)
546 {
547 	struct device_node *mii_np;
548 	int ret;
549 
550 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
551 	if (!mii_np) {
552 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
553 		return -ENODEV;
554 	}
555 
556 	if (!of_device_is_available(mii_np)) {
557 		ret = -ENODEV;
558 		goto err_put_node;
559 	}
560 
561 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
562 	if (!eth->mii_bus) {
563 		ret = -ENOMEM;
564 		goto err_put_node;
565 	}
566 
567 	eth->mii_bus->name = "mdio";
568 	eth->mii_bus->read = mtk_mdio_read;
569 	eth->mii_bus->write = mtk_mdio_write;
570 	eth->mii_bus->priv = eth;
571 	eth->mii_bus->parent = eth->dev;
572 
573 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
574 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
575 
576 err_put_node:
577 	of_node_put(mii_np);
578 	return ret;
579 }
580 
581 static void mtk_mdio_cleanup(struct mtk_eth *eth)
582 {
583 	if (!eth->mii_bus)
584 		return;
585 
586 	mdiobus_unregister(eth->mii_bus);
587 }
588 
589 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
590 {
591 	unsigned long flags;
592 	u32 val;
593 
594 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
595 	val = mtk_r32(eth, eth->tx_int_mask_reg);
596 	mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
597 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
598 }
599 
600 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
601 {
602 	unsigned long flags;
603 	u32 val;
604 
605 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
606 	val = mtk_r32(eth, eth->tx_int_mask_reg);
607 	mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
608 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
609 }
610 
611 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
612 {
613 	unsigned long flags;
614 	u32 val;
615 
616 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
617 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
618 	mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
619 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
620 }
621 
622 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
623 {
624 	unsigned long flags;
625 	u32 val;
626 
627 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
628 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
629 	mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
630 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
631 }
632 
633 static int mtk_set_mac_address(struct net_device *dev, void *p)
634 {
635 	int ret = eth_mac_addr(dev, p);
636 	struct mtk_mac *mac = netdev_priv(dev);
637 	struct mtk_eth *eth = mac->hw;
638 	const char *macaddr = dev->dev_addr;
639 
640 	if (ret)
641 		return ret;
642 
643 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
644 		return -EBUSY;
645 
646 	spin_lock_bh(&mac->hw->page_lock);
647 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
648 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
649 			MT7628_SDM_MAC_ADRH);
650 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
651 			(macaddr[4] << 8) | macaddr[5],
652 			MT7628_SDM_MAC_ADRL);
653 	} else {
654 		mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
655 			MTK_GDMA_MAC_ADRH(mac->id));
656 		mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
657 			(macaddr[4] << 8) | macaddr[5],
658 			MTK_GDMA_MAC_ADRL(mac->id));
659 	}
660 	spin_unlock_bh(&mac->hw->page_lock);
661 
662 	return 0;
663 }
664 
665 void mtk_stats_update_mac(struct mtk_mac *mac)
666 {
667 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
668 	unsigned int base = MTK_GDM1_TX_GBCNT;
669 	u64 stats;
670 
671 	base += hw_stats->reg_offset;
672 
673 	u64_stats_update_begin(&hw_stats->syncp);
674 
675 	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
676 	stats =  mtk_r32(mac->hw, base + 0x04);
677 	if (stats)
678 		hw_stats->rx_bytes += (stats << 32);
679 	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
680 	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
681 	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
682 	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
683 	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
684 	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
685 	hw_stats->rx_flow_control_packets +=
686 					mtk_r32(mac->hw, base + 0x24);
687 	hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
688 	hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
689 	hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
690 	stats =  mtk_r32(mac->hw, base + 0x34);
691 	if (stats)
692 		hw_stats->tx_bytes += (stats << 32);
693 	hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
694 	u64_stats_update_end(&hw_stats->syncp);
695 }
696 
697 static void mtk_stats_update(struct mtk_eth *eth)
698 {
699 	int i;
700 
701 	for (i = 0; i < MTK_MAC_COUNT; i++) {
702 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
703 			continue;
704 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
705 			mtk_stats_update_mac(eth->mac[i]);
706 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
707 		}
708 	}
709 }
710 
711 static void mtk_get_stats64(struct net_device *dev,
712 			    struct rtnl_link_stats64 *storage)
713 {
714 	struct mtk_mac *mac = netdev_priv(dev);
715 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
716 	unsigned int start;
717 
718 	if (netif_running(dev) && netif_device_present(dev)) {
719 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
720 			mtk_stats_update_mac(mac);
721 			spin_unlock_bh(&hw_stats->stats_lock);
722 		}
723 	}
724 
725 	do {
726 		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
727 		storage->rx_packets = hw_stats->rx_packets;
728 		storage->tx_packets = hw_stats->tx_packets;
729 		storage->rx_bytes = hw_stats->rx_bytes;
730 		storage->tx_bytes = hw_stats->tx_bytes;
731 		storage->collisions = hw_stats->tx_collisions;
732 		storage->rx_length_errors = hw_stats->rx_short_errors +
733 			hw_stats->rx_long_errors;
734 		storage->rx_over_errors = hw_stats->rx_overflow;
735 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
736 		storage->rx_errors = hw_stats->rx_checksum_errors;
737 		storage->tx_aborted_errors = hw_stats->tx_skip;
738 	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
739 
740 	storage->tx_errors = dev->stats.tx_errors;
741 	storage->rx_dropped = dev->stats.rx_dropped;
742 	storage->tx_dropped = dev->stats.tx_dropped;
743 }
744 
745 static inline int mtk_max_frag_size(int mtu)
746 {
747 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
748 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
749 		mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
750 
751 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
752 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
753 }
754 
755 static inline int mtk_max_buf_size(int frag_size)
756 {
757 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
758 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
759 
760 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
761 
762 	return buf_size;
763 }
764 
765 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
766 				   struct mtk_rx_dma *dma_rxd)
767 {
768 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
769 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
770 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
771 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
772 }
773 
774 /* the qdma core needs scratch memory to be setup */
775 static int mtk_init_fq_dma(struct mtk_eth *eth)
776 {
777 	dma_addr_t phy_ring_tail;
778 	int cnt = MTK_DMA_SIZE;
779 	dma_addr_t dma_addr;
780 	int i;
781 
782 	eth->scratch_ring = dma_alloc_coherent(eth->dev,
783 					       cnt * sizeof(struct mtk_tx_dma),
784 					       &eth->phy_scratch_ring,
785 					       GFP_ATOMIC);
786 	if (unlikely(!eth->scratch_ring))
787 		return -ENOMEM;
788 
789 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
790 				    GFP_KERNEL);
791 	if (unlikely(!eth->scratch_head))
792 		return -ENOMEM;
793 
794 	dma_addr = dma_map_single(eth->dev,
795 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
796 				  DMA_FROM_DEVICE);
797 	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
798 		return -ENOMEM;
799 
800 	phy_ring_tail = eth->phy_scratch_ring +
801 			(sizeof(struct mtk_tx_dma) * (cnt - 1));
802 
803 	for (i = 0; i < cnt; i++) {
804 		eth->scratch_ring[i].txd1 =
805 					(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
806 		if (i < cnt - 1)
807 			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
808 				((i + 1) * sizeof(struct mtk_tx_dma)));
809 		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
810 	}
811 
812 	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
813 	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
814 	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
815 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
816 
817 	return 0;
818 }
819 
820 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
821 {
822 	void *ret = ring->dma;
823 
824 	return ret + (desc - ring->phys);
825 }
826 
827 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
828 						    struct mtk_tx_dma *txd)
829 {
830 	int idx = txd - ring->dma;
831 
832 	return &ring->buf[idx];
833 }
834 
835 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
836 				       struct mtk_tx_dma *dma)
837 {
838 	return ring->dma_pdma - ring->dma + dma;
839 }
840 
841 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
842 {
843 	return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
844 }
845 
846 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
847 {
848 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
849 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
850 			dma_unmap_single(eth->dev,
851 					 dma_unmap_addr(tx_buf, dma_addr0),
852 					 dma_unmap_len(tx_buf, dma_len0),
853 					 DMA_TO_DEVICE);
854 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
855 			dma_unmap_page(eth->dev,
856 				       dma_unmap_addr(tx_buf, dma_addr0),
857 				       dma_unmap_len(tx_buf, dma_len0),
858 				       DMA_TO_DEVICE);
859 		}
860 	} else {
861 		if (dma_unmap_len(tx_buf, dma_len0)) {
862 			dma_unmap_page(eth->dev,
863 				       dma_unmap_addr(tx_buf, dma_addr0),
864 				       dma_unmap_len(tx_buf, dma_len0),
865 				       DMA_TO_DEVICE);
866 		}
867 
868 		if (dma_unmap_len(tx_buf, dma_len1)) {
869 			dma_unmap_page(eth->dev,
870 				       dma_unmap_addr(tx_buf, dma_addr1),
871 				       dma_unmap_len(tx_buf, dma_len1),
872 				       DMA_TO_DEVICE);
873 		}
874 	}
875 
876 	tx_buf->flags = 0;
877 	if (tx_buf->skb &&
878 	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
879 		dev_kfree_skb_any(tx_buf->skb);
880 	tx_buf->skb = NULL;
881 }
882 
883 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
884 			 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
885 			 size_t size, int idx)
886 {
887 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
888 		dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
889 		dma_unmap_len_set(tx_buf, dma_len0, size);
890 	} else {
891 		if (idx & 1) {
892 			txd->txd3 = mapped_addr;
893 			txd->txd2 |= TX_DMA_PLEN1(size);
894 			dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
895 			dma_unmap_len_set(tx_buf, dma_len1, size);
896 		} else {
897 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
898 			txd->txd1 = mapped_addr;
899 			txd->txd2 = TX_DMA_PLEN0(size);
900 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
901 			dma_unmap_len_set(tx_buf, dma_len0, size);
902 		}
903 	}
904 }
905 
906 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
907 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
908 {
909 	struct mtk_mac *mac = netdev_priv(dev);
910 	struct mtk_eth *eth = mac->hw;
911 	struct mtk_tx_dma *itxd, *txd;
912 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
913 	struct mtk_tx_buf *itx_buf, *tx_buf;
914 	dma_addr_t mapped_addr;
915 	unsigned int nr_frags;
916 	int i, n_desc = 1;
917 	u32 txd4 = 0, fport;
918 	int k = 0;
919 
920 	itxd = ring->next_free;
921 	itxd_pdma = qdma_to_pdma(ring, itxd);
922 	if (itxd == ring->last_free)
923 		return -ENOMEM;
924 
925 	/* set the forward port */
926 	fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
927 	txd4 |= fport;
928 
929 	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
930 	memset(itx_buf, 0, sizeof(*itx_buf));
931 
932 	if (gso)
933 		txd4 |= TX_DMA_TSO;
934 
935 	/* TX Checksum offload */
936 	if (skb->ip_summed == CHECKSUM_PARTIAL)
937 		txd4 |= TX_DMA_CHKSUM;
938 
939 	/* VLAN header offload */
940 	if (skb_vlan_tag_present(skb))
941 		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
942 
943 	mapped_addr = dma_map_single(eth->dev, skb->data,
944 				     skb_headlen(skb), DMA_TO_DEVICE);
945 	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
946 		return -ENOMEM;
947 
948 	WRITE_ONCE(itxd->txd1, mapped_addr);
949 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
950 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
951 			  MTK_TX_FLAGS_FPORT1;
952 	setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
953 		     k++);
954 
955 	/* TX SG offload */
956 	txd = itxd;
957 	txd_pdma = qdma_to_pdma(ring, txd);
958 	nr_frags = skb_shinfo(skb)->nr_frags;
959 
960 	for (i = 0; i < nr_frags; i++) {
961 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
962 		unsigned int offset = 0;
963 		int frag_size = skb_frag_size(frag);
964 
965 		while (frag_size) {
966 			bool last_frag = false;
967 			unsigned int frag_map_size;
968 			bool new_desc = true;
969 
970 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
971 			    (i & 0x1)) {
972 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
973 				txd_pdma = qdma_to_pdma(ring, txd);
974 				if (txd == ring->last_free)
975 					goto err_dma;
976 
977 				n_desc++;
978 			} else {
979 				new_desc = false;
980 			}
981 
982 
983 			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
984 			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
985 						       frag_map_size,
986 						       DMA_TO_DEVICE);
987 			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
988 				goto err_dma;
989 
990 			if (i == nr_frags - 1 &&
991 			    (frag_size - frag_map_size) == 0)
992 				last_frag = true;
993 
994 			WRITE_ONCE(txd->txd1, mapped_addr);
995 			WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
996 					       TX_DMA_PLEN0(frag_map_size) |
997 					       last_frag * TX_DMA_LS0));
998 			WRITE_ONCE(txd->txd4, fport);
999 
1000 			tx_buf = mtk_desc_to_tx_buf(ring, txd);
1001 			if (new_desc)
1002 				memset(tx_buf, 0, sizeof(*tx_buf));
1003 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1004 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1005 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1006 					 MTK_TX_FLAGS_FPORT1;
1007 
1008 			setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1009 				     frag_map_size, k++);
1010 
1011 			frag_size -= frag_map_size;
1012 			offset += frag_map_size;
1013 		}
1014 	}
1015 
1016 	/* store skb to cleanup */
1017 	itx_buf->skb = skb;
1018 
1019 	WRITE_ONCE(itxd->txd4, txd4);
1020 	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1021 				(!nr_frags * TX_DMA_LS0)));
1022 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1023 		if (k & 0x1)
1024 			txd_pdma->txd2 |= TX_DMA_LS0;
1025 		else
1026 			txd_pdma->txd2 |= TX_DMA_LS1;
1027 	}
1028 
1029 	netdev_sent_queue(dev, skb->len);
1030 	skb_tx_timestamp(skb);
1031 
1032 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1033 	atomic_sub(n_desc, &ring->free_count);
1034 
1035 	/* make sure that all changes to the dma ring are flushed before we
1036 	 * continue
1037 	 */
1038 	wmb();
1039 
1040 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1041 		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1042 		    !netdev_xmit_more())
1043 			mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1044 	} else {
1045 		int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1046 					     ring->dma_size);
1047 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1048 	}
1049 
1050 	return 0;
1051 
1052 err_dma:
1053 	do {
1054 		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1055 
1056 		/* unmap dma */
1057 		mtk_tx_unmap(eth, tx_buf);
1058 
1059 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1060 		if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1061 			itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1062 
1063 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1064 		itxd_pdma = qdma_to_pdma(ring, itxd);
1065 	} while (itxd != txd);
1066 
1067 	return -ENOMEM;
1068 }
1069 
1070 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1071 {
1072 	int i, nfrags;
1073 	skb_frag_t *frag;
1074 
1075 	nfrags = 1;
1076 	if (skb_is_gso(skb)) {
1077 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1078 			frag = &skb_shinfo(skb)->frags[i];
1079 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1080 						MTK_TX_DMA_BUF_LEN);
1081 		}
1082 	} else {
1083 		nfrags += skb_shinfo(skb)->nr_frags;
1084 	}
1085 
1086 	return nfrags;
1087 }
1088 
1089 static int mtk_queue_stopped(struct mtk_eth *eth)
1090 {
1091 	int i;
1092 
1093 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1094 		if (!eth->netdev[i])
1095 			continue;
1096 		if (netif_queue_stopped(eth->netdev[i]))
1097 			return 1;
1098 	}
1099 
1100 	return 0;
1101 }
1102 
1103 static void mtk_wake_queue(struct mtk_eth *eth)
1104 {
1105 	int i;
1106 
1107 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1108 		if (!eth->netdev[i])
1109 			continue;
1110 		netif_wake_queue(eth->netdev[i]);
1111 	}
1112 }
1113 
1114 static void mtk_stop_queue(struct mtk_eth *eth)
1115 {
1116 	int i;
1117 
1118 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1119 		if (!eth->netdev[i])
1120 			continue;
1121 		netif_stop_queue(eth->netdev[i]);
1122 	}
1123 }
1124 
1125 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1126 {
1127 	struct mtk_mac *mac = netdev_priv(dev);
1128 	struct mtk_eth *eth = mac->hw;
1129 	struct mtk_tx_ring *ring = &eth->tx_ring;
1130 	struct net_device_stats *stats = &dev->stats;
1131 	bool gso = false;
1132 	int tx_num;
1133 
1134 	/* normally we can rely on the stack not calling this more than once,
1135 	 * however we have 2 queues running on the same ring so we need to lock
1136 	 * the ring access
1137 	 */
1138 	spin_lock(&eth->page_lock);
1139 
1140 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1141 		goto drop;
1142 
1143 	tx_num = mtk_cal_txd_req(skb);
1144 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1145 		mtk_stop_queue(eth);
1146 		netif_err(eth, tx_queued, dev,
1147 			  "Tx Ring full when queue awake!\n");
1148 		spin_unlock(&eth->page_lock);
1149 		return NETDEV_TX_BUSY;
1150 	}
1151 
1152 	/* TSO: fill MSS info in tcp checksum field */
1153 	if (skb_is_gso(skb)) {
1154 		if (skb_cow_head(skb, 0)) {
1155 			netif_warn(eth, tx_err, dev,
1156 				   "GSO expand head fail.\n");
1157 			goto drop;
1158 		}
1159 
1160 		if (skb_shinfo(skb)->gso_type &
1161 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1162 			gso = true;
1163 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1164 		}
1165 	}
1166 
1167 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1168 		goto drop;
1169 
1170 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1171 		mtk_stop_queue(eth);
1172 
1173 	spin_unlock(&eth->page_lock);
1174 
1175 	return NETDEV_TX_OK;
1176 
1177 drop:
1178 	spin_unlock(&eth->page_lock);
1179 	stats->tx_dropped++;
1180 	dev_kfree_skb_any(skb);
1181 	return NETDEV_TX_OK;
1182 }
1183 
1184 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1185 {
1186 	int i;
1187 	struct mtk_rx_ring *ring;
1188 	int idx;
1189 
1190 	if (!eth->hwlro)
1191 		return &eth->rx_ring[0];
1192 
1193 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1194 		ring = &eth->rx_ring[i];
1195 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1196 		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1197 			ring->calc_idx_update = true;
1198 			return ring;
1199 		}
1200 	}
1201 
1202 	return NULL;
1203 }
1204 
1205 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1206 {
1207 	struct mtk_rx_ring *ring;
1208 	int i;
1209 
1210 	if (!eth->hwlro) {
1211 		ring = &eth->rx_ring[0];
1212 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1213 	} else {
1214 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1215 			ring = &eth->rx_ring[i];
1216 			if (ring->calc_idx_update) {
1217 				ring->calc_idx_update = false;
1218 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1219 			}
1220 		}
1221 	}
1222 }
1223 
1224 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1225 		       struct mtk_eth *eth)
1226 {
1227 	struct mtk_rx_ring *ring;
1228 	int idx;
1229 	struct sk_buff *skb;
1230 	u8 *data, *new_data;
1231 	struct mtk_rx_dma *rxd, trxd;
1232 	int done = 0;
1233 
1234 	while (done < budget) {
1235 		struct net_device *netdev;
1236 		unsigned int pktlen;
1237 		dma_addr_t dma_addr;
1238 		int mac;
1239 
1240 		ring = mtk_get_rx_ring(eth);
1241 		if (unlikely(!ring))
1242 			goto rx_done;
1243 
1244 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1245 		rxd = &ring->dma[idx];
1246 		data = ring->data[idx];
1247 
1248 		mtk_rx_get_desc(&trxd, rxd);
1249 		if (!(trxd.rxd2 & RX_DMA_DONE))
1250 			break;
1251 
1252 		/* find out which mac the packet come from. values start at 1 */
1253 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1254 			mac = 0;
1255 		} else {
1256 			mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1257 				RX_DMA_FPORT_MASK;
1258 			mac--;
1259 		}
1260 
1261 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1262 			     !eth->netdev[mac]))
1263 			goto release_desc;
1264 
1265 		netdev = eth->netdev[mac];
1266 
1267 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1268 			goto release_desc;
1269 
1270 		/* alloc new buffer */
1271 		new_data = napi_alloc_frag(ring->frag_size);
1272 		if (unlikely(!new_data)) {
1273 			netdev->stats.rx_dropped++;
1274 			goto release_desc;
1275 		}
1276 		dma_addr = dma_map_single(eth->dev,
1277 					  new_data + NET_SKB_PAD +
1278 					  eth->ip_align,
1279 					  ring->buf_size,
1280 					  DMA_FROM_DEVICE);
1281 		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1282 			skb_free_frag(new_data);
1283 			netdev->stats.rx_dropped++;
1284 			goto release_desc;
1285 		}
1286 
1287 		/* receive data */
1288 		skb = build_skb(data, ring->frag_size);
1289 		if (unlikely(!skb)) {
1290 			skb_free_frag(new_data);
1291 			netdev->stats.rx_dropped++;
1292 			goto release_desc;
1293 		}
1294 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1295 
1296 		dma_unmap_single(eth->dev, trxd.rxd1,
1297 				 ring->buf_size, DMA_FROM_DEVICE);
1298 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1299 		skb->dev = netdev;
1300 		skb_put(skb, pktlen);
1301 		if (trxd.rxd4 & eth->rx_dma_l4_valid)
1302 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1303 		else
1304 			skb_checksum_none_assert(skb);
1305 		skb->protocol = eth_type_trans(skb, netdev);
1306 
1307 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1308 		    RX_DMA_VID(trxd.rxd3))
1309 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1310 					       RX_DMA_VID(trxd.rxd3));
1311 		skb_record_rx_queue(skb, 0);
1312 		napi_gro_receive(napi, skb);
1313 
1314 		ring->data[idx] = new_data;
1315 		rxd->rxd1 = (unsigned int)dma_addr;
1316 
1317 release_desc:
1318 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1319 			rxd->rxd2 = RX_DMA_LSO;
1320 		else
1321 			rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1322 
1323 		ring->calc_idx = idx;
1324 
1325 		done++;
1326 	}
1327 
1328 rx_done:
1329 	if (done) {
1330 		/* make sure that all changes to the dma ring are flushed before
1331 		 * we continue
1332 		 */
1333 		wmb();
1334 		mtk_update_rx_cpu_idx(eth);
1335 	}
1336 
1337 	return done;
1338 }
1339 
1340 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1341 			    unsigned int *done, unsigned int *bytes)
1342 {
1343 	struct mtk_tx_ring *ring = &eth->tx_ring;
1344 	struct mtk_tx_dma *desc;
1345 	struct sk_buff *skb;
1346 	struct mtk_tx_buf *tx_buf;
1347 	u32 cpu, dma;
1348 
1349 	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1350 	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1351 
1352 	desc = mtk_qdma_phys_to_virt(ring, cpu);
1353 
1354 	while ((cpu != dma) && budget) {
1355 		u32 next_cpu = desc->txd2;
1356 		int mac = 0;
1357 
1358 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1359 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1360 			break;
1361 
1362 		tx_buf = mtk_desc_to_tx_buf(ring, desc);
1363 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1364 			mac = 1;
1365 
1366 		skb = tx_buf->skb;
1367 		if (!skb)
1368 			break;
1369 
1370 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1371 			bytes[mac] += skb->len;
1372 			done[mac]++;
1373 			budget--;
1374 		}
1375 		mtk_tx_unmap(eth, tx_buf);
1376 
1377 		ring->last_free = desc;
1378 		atomic_inc(&ring->free_count);
1379 
1380 		cpu = next_cpu;
1381 	}
1382 
1383 	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1384 
1385 	return budget;
1386 }
1387 
1388 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1389 			    unsigned int *done, unsigned int *bytes)
1390 {
1391 	struct mtk_tx_ring *ring = &eth->tx_ring;
1392 	struct mtk_tx_dma *desc;
1393 	struct sk_buff *skb;
1394 	struct mtk_tx_buf *tx_buf;
1395 	u32 cpu, dma;
1396 
1397 	cpu = ring->cpu_idx;
1398 	dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1399 
1400 	while ((cpu != dma) && budget) {
1401 		tx_buf = &ring->buf[cpu];
1402 		skb = tx_buf->skb;
1403 		if (!skb)
1404 			break;
1405 
1406 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1407 			bytes[0] += skb->len;
1408 			done[0]++;
1409 			budget--;
1410 		}
1411 
1412 		mtk_tx_unmap(eth, tx_buf);
1413 
1414 		desc = &ring->dma[cpu];
1415 		ring->last_free = desc;
1416 		atomic_inc(&ring->free_count);
1417 
1418 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1419 	}
1420 
1421 	ring->cpu_idx = cpu;
1422 
1423 	return budget;
1424 }
1425 
1426 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1427 {
1428 	struct mtk_tx_ring *ring = &eth->tx_ring;
1429 	unsigned int done[MTK_MAX_DEVS];
1430 	unsigned int bytes[MTK_MAX_DEVS];
1431 	int total = 0, i;
1432 
1433 	memset(done, 0, sizeof(done));
1434 	memset(bytes, 0, sizeof(bytes));
1435 
1436 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1437 		budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1438 	else
1439 		budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1440 
1441 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1442 		if (!eth->netdev[i] || !done[i])
1443 			continue;
1444 		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1445 		total += done[i];
1446 	}
1447 
1448 	if (mtk_queue_stopped(eth) &&
1449 	    (atomic_read(&ring->free_count) > ring->thresh))
1450 		mtk_wake_queue(eth);
1451 
1452 	return total;
1453 }
1454 
1455 static void mtk_handle_status_irq(struct mtk_eth *eth)
1456 {
1457 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1458 
1459 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1460 		mtk_stats_update(eth);
1461 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1462 			MTK_INT_STATUS2);
1463 	}
1464 }
1465 
1466 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1467 {
1468 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1469 	u32 status, mask;
1470 	int tx_done = 0;
1471 
1472 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1473 		mtk_handle_status_irq(eth);
1474 	mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1475 	tx_done = mtk_poll_tx(eth, budget);
1476 
1477 	if (unlikely(netif_msg_intr(eth))) {
1478 		status = mtk_r32(eth, eth->tx_int_status_reg);
1479 		mask = mtk_r32(eth, eth->tx_int_mask_reg);
1480 		dev_info(eth->dev,
1481 			 "done tx %d, intr 0x%08x/0x%x\n",
1482 			 tx_done, status, mask);
1483 	}
1484 
1485 	if (tx_done == budget)
1486 		return budget;
1487 
1488 	status = mtk_r32(eth, eth->tx_int_status_reg);
1489 	if (status & MTK_TX_DONE_INT)
1490 		return budget;
1491 
1492 	napi_complete(napi);
1493 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1494 
1495 	return tx_done;
1496 }
1497 
1498 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1499 {
1500 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1501 	u32 status, mask;
1502 	int rx_done = 0;
1503 	int remain_budget = budget;
1504 
1505 	mtk_handle_status_irq(eth);
1506 
1507 poll_again:
1508 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1509 	rx_done = mtk_poll_rx(napi, remain_budget, eth);
1510 
1511 	if (unlikely(netif_msg_intr(eth))) {
1512 		status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1513 		mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1514 		dev_info(eth->dev,
1515 			 "done rx %d, intr 0x%08x/0x%x\n",
1516 			 rx_done, status, mask);
1517 	}
1518 	if (rx_done == remain_budget)
1519 		return budget;
1520 
1521 	status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1522 	if (status & MTK_RX_DONE_INT) {
1523 		remain_budget -= rx_done;
1524 		goto poll_again;
1525 	}
1526 	napi_complete(napi);
1527 	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1528 
1529 	return rx_done + budget - remain_budget;
1530 }
1531 
1532 static int mtk_tx_alloc(struct mtk_eth *eth)
1533 {
1534 	struct mtk_tx_ring *ring = &eth->tx_ring;
1535 	int i, sz = sizeof(*ring->dma);
1536 
1537 	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1538 			       GFP_KERNEL);
1539 	if (!ring->buf)
1540 		goto no_tx_mem;
1541 
1542 	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1543 				       &ring->phys, GFP_ATOMIC);
1544 	if (!ring->dma)
1545 		goto no_tx_mem;
1546 
1547 	for (i = 0; i < MTK_DMA_SIZE; i++) {
1548 		int next = (i + 1) % MTK_DMA_SIZE;
1549 		u32 next_ptr = ring->phys + next * sz;
1550 
1551 		ring->dma[i].txd2 = next_ptr;
1552 		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1553 	}
1554 
1555 	/* On MT7688 (PDMA only) this driver uses the ring->dma structs
1556 	 * only as the framework. The real HW descriptors are the PDMA
1557 	 * descriptors in ring->dma_pdma.
1558 	 */
1559 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1560 		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1561 						    &ring->phys_pdma,
1562 						    GFP_ATOMIC);
1563 		if (!ring->dma_pdma)
1564 			goto no_tx_mem;
1565 
1566 		for (i = 0; i < MTK_DMA_SIZE; i++) {
1567 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1568 			ring->dma_pdma[i].txd4 = 0;
1569 		}
1570 	}
1571 
1572 	ring->dma_size = MTK_DMA_SIZE;
1573 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1574 	ring->next_free = &ring->dma[0];
1575 	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1576 	ring->thresh = MAX_SKB_FRAGS;
1577 
1578 	/* make sure that all changes to the dma ring are flushed before we
1579 	 * continue
1580 	 */
1581 	wmb();
1582 
1583 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1584 		mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1585 		mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1586 		mtk_w32(eth,
1587 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1588 			MTK_QTX_CRX_PTR);
1589 		mtk_w32(eth,
1590 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1591 			MTK_QTX_DRX_PTR);
1592 		mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1593 			MTK_QTX_CFG(0));
1594 	} else {
1595 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1596 		mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1597 		mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1598 		mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1599 	}
1600 
1601 	return 0;
1602 
1603 no_tx_mem:
1604 	return -ENOMEM;
1605 }
1606 
1607 static void mtk_tx_clean(struct mtk_eth *eth)
1608 {
1609 	struct mtk_tx_ring *ring = &eth->tx_ring;
1610 	int i;
1611 
1612 	if (ring->buf) {
1613 		for (i = 0; i < MTK_DMA_SIZE; i++)
1614 			mtk_tx_unmap(eth, &ring->buf[i]);
1615 		kfree(ring->buf);
1616 		ring->buf = NULL;
1617 	}
1618 
1619 	if (ring->dma) {
1620 		dma_free_coherent(eth->dev,
1621 				  MTK_DMA_SIZE * sizeof(*ring->dma),
1622 				  ring->dma,
1623 				  ring->phys);
1624 		ring->dma = NULL;
1625 	}
1626 
1627 	if (ring->dma_pdma) {
1628 		dma_free_coherent(eth->dev,
1629 				  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1630 				  ring->dma_pdma,
1631 				  ring->phys_pdma);
1632 		ring->dma_pdma = NULL;
1633 	}
1634 }
1635 
1636 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1637 {
1638 	struct mtk_rx_ring *ring;
1639 	int rx_data_len, rx_dma_size;
1640 	int i;
1641 	u32 offset = 0;
1642 
1643 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
1644 		if (ring_no)
1645 			return -EINVAL;
1646 		ring = &eth->rx_ring_qdma;
1647 		offset = 0x1000;
1648 	} else {
1649 		ring = &eth->rx_ring[ring_no];
1650 	}
1651 
1652 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1653 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1654 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1655 	} else {
1656 		rx_data_len = ETH_DATA_LEN;
1657 		rx_dma_size = MTK_DMA_SIZE;
1658 	}
1659 
1660 	ring->frag_size = mtk_max_frag_size(rx_data_len);
1661 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
1662 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1663 			     GFP_KERNEL);
1664 	if (!ring->data)
1665 		return -ENOMEM;
1666 
1667 	for (i = 0; i < rx_dma_size; i++) {
1668 		ring->data[i] = netdev_alloc_frag(ring->frag_size);
1669 		if (!ring->data[i])
1670 			return -ENOMEM;
1671 	}
1672 
1673 	ring->dma = dma_alloc_coherent(eth->dev,
1674 				       rx_dma_size * sizeof(*ring->dma),
1675 				       &ring->phys, GFP_ATOMIC);
1676 	if (!ring->dma)
1677 		return -ENOMEM;
1678 
1679 	for (i = 0; i < rx_dma_size; i++) {
1680 		dma_addr_t dma_addr = dma_map_single(eth->dev,
1681 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
1682 				ring->buf_size,
1683 				DMA_FROM_DEVICE);
1684 		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1685 			return -ENOMEM;
1686 		ring->dma[i].rxd1 = (unsigned int)dma_addr;
1687 
1688 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1689 			ring->dma[i].rxd2 = RX_DMA_LSO;
1690 		else
1691 			ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1692 	}
1693 	ring->dma_size = rx_dma_size;
1694 	ring->calc_idx_update = false;
1695 	ring->calc_idx = rx_dma_size - 1;
1696 	ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1697 	/* make sure that all changes to the dma ring are flushed before we
1698 	 * continue
1699 	 */
1700 	wmb();
1701 
1702 	mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1703 	mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1704 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1705 	mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1706 
1707 	return 0;
1708 }
1709 
1710 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1711 {
1712 	int i;
1713 
1714 	if (ring->data && ring->dma) {
1715 		for (i = 0; i < ring->dma_size; i++) {
1716 			if (!ring->data[i])
1717 				continue;
1718 			if (!ring->dma[i].rxd1)
1719 				continue;
1720 			dma_unmap_single(eth->dev,
1721 					 ring->dma[i].rxd1,
1722 					 ring->buf_size,
1723 					 DMA_FROM_DEVICE);
1724 			skb_free_frag(ring->data[i]);
1725 		}
1726 		kfree(ring->data);
1727 		ring->data = NULL;
1728 	}
1729 
1730 	if (ring->dma) {
1731 		dma_free_coherent(eth->dev,
1732 				  ring->dma_size * sizeof(*ring->dma),
1733 				  ring->dma,
1734 				  ring->phys);
1735 		ring->dma = NULL;
1736 	}
1737 }
1738 
1739 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1740 {
1741 	int i;
1742 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1743 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1744 
1745 	/* set LRO rings to auto-learn modes */
1746 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1747 
1748 	/* validate LRO ring */
1749 	ring_ctrl_dw2 |= MTK_RING_VLD;
1750 
1751 	/* set AGE timer (unit: 20us) */
1752 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1753 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1754 
1755 	/* set max AGG timer (unit: 20us) */
1756 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1757 
1758 	/* set max LRO AGG count */
1759 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1760 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1761 
1762 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1763 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1764 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1765 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1766 	}
1767 
1768 	/* IPv4 checksum update enable */
1769 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1770 
1771 	/* switch priority comparison to packet count mode */
1772 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1773 
1774 	/* bandwidth threshold setting */
1775 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1776 
1777 	/* auto-learn score delta setting */
1778 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1779 
1780 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1781 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1782 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1783 
1784 	/* set HW LRO mode & the max aggregation count for rx packets */
1785 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1786 
1787 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
1788 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1789 
1790 	/* enable HW LRO */
1791 	lro_ctrl_dw0 |= MTK_LRO_EN;
1792 
1793 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1794 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1795 
1796 	return 0;
1797 }
1798 
1799 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1800 {
1801 	int i;
1802 	u32 val;
1803 
1804 	/* relinquish lro rings, flush aggregated packets */
1805 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1806 
1807 	/* wait for relinquishments done */
1808 	for (i = 0; i < 10; i++) {
1809 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1810 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1811 			msleep(20);
1812 			continue;
1813 		}
1814 		break;
1815 	}
1816 
1817 	/* invalidate lro rings */
1818 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1819 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1820 
1821 	/* disable HW LRO */
1822 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1823 }
1824 
1825 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1826 {
1827 	u32 reg_val;
1828 
1829 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1830 
1831 	/* invalidate the IP setting */
1832 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1833 
1834 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1835 
1836 	/* validate the IP setting */
1837 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1838 }
1839 
1840 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1841 {
1842 	u32 reg_val;
1843 
1844 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1845 
1846 	/* invalidate the IP setting */
1847 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1848 
1849 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1850 }
1851 
1852 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1853 {
1854 	int cnt = 0;
1855 	int i;
1856 
1857 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1858 		if (mac->hwlro_ip[i])
1859 			cnt++;
1860 	}
1861 
1862 	return cnt;
1863 }
1864 
1865 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1866 				struct ethtool_rxnfc *cmd)
1867 {
1868 	struct ethtool_rx_flow_spec *fsp =
1869 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1870 	struct mtk_mac *mac = netdev_priv(dev);
1871 	struct mtk_eth *eth = mac->hw;
1872 	int hwlro_idx;
1873 
1874 	if ((fsp->flow_type != TCP_V4_FLOW) ||
1875 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1876 	    (fsp->location > 1))
1877 		return -EINVAL;
1878 
1879 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1880 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1881 
1882 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1883 
1884 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1885 
1886 	return 0;
1887 }
1888 
1889 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1890 				struct ethtool_rxnfc *cmd)
1891 {
1892 	struct ethtool_rx_flow_spec *fsp =
1893 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1894 	struct mtk_mac *mac = netdev_priv(dev);
1895 	struct mtk_eth *eth = mac->hw;
1896 	int hwlro_idx;
1897 
1898 	if (fsp->location > 1)
1899 		return -EINVAL;
1900 
1901 	mac->hwlro_ip[fsp->location] = 0;
1902 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1903 
1904 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1905 
1906 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1907 
1908 	return 0;
1909 }
1910 
1911 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1912 {
1913 	struct mtk_mac *mac = netdev_priv(dev);
1914 	struct mtk_eth *eth = mac->hw;
1915 	int i, hwlro_idx;
1916 
1917 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1918 		mac->hwlro_ip[i] = 0;
1919 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1920 
1921 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1922 	}
1923 
1924 	mac->hwlro_ip_cnt = 0;
1925 }
1926 
1927 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1928 				    struct ethtool_rxnfc *cmd)
1929 {
1930 	struct mtk_mac *mac = netdev_priv(dev);
1931 	struct ethtool_rx_flow_spec *fsp =
1932 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1933 
1934 	/* only tcp dst ipv4 is meaningful, others are meaningless */
1935 	fsp->flow_type = TCP_V4_FLOW;
1936 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1937 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1938 
1939 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
1940 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1941 	fsp->h_u.tcp_ip4_spec.psrc = 0;
1942 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1943 	fsp->h_u.tcp_ip4_spec.pdst = 0;
1944 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1945 	fsp->h_u.tcp_ip4_spec.tos = 0;
1946 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
1947 
1948 	return 0;
1949 }
1950 
1951 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1952 				  struct ethtool_rxnfc *cmd,
1953 				  u32 *rule_locs)
1954 {
1955 	struct mtk_mac *mac = netdev_priv(dev);
1956 	int cnt = 0;
1957 	int i;
1958 
1959 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1960 		if (mac->hwlro_ip[i]) {
1961 			rule_locs[cnt] = i;
1962 			cnt++;
1963 		}
1964 	}
1965 
1966 	cmd->rule_cnt = cnt;
1967 
1968 	return 0;
1969 }
1970 
1971 static netdev_features_t mtk_fix_features(struct net_device *dev,
1972 					  netdev_features_t features)
1973 {
1974 	if (!(features & NETIF_F_LRO)) {
1975 		struct mtk_mac *mac = netdev_priv(dev);
1976 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1977 
1978 		if (ip_cnt) {
1979 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1980 
1981 			features |= NETIF_F_LRO;
1982 		}
1983 	}
1984 
1985 	return features;
1986 }
1987 
1988 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1989 {
1990 	int err = 0;
1991 
1992 	if (!((dev->features ^ features) & NETIF_F_LRO))
1993 		return 0;
1994 
1995 	if (!(features & NETIF_F_LRO))
1996 		mtk_hwlro_netdev_disable(dev);
1997 
1998 	return err;
1999 }
2000 
2001 /* wait for DMA to finish whatever it is doing before we start using it again */
2002 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2003 {
2004 	unsigned long t_start = jiffies;
2005 
2006 	while (1) {
2007 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2008 			if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2009 			      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2010 				return 0;
2011 		} else {
2012 			if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2013 			      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2014 				return 0;
2015 		}
2016 
2017 		if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2018 			break;
2019 	}
2020 
2021 	dev_err(eth->dev, "DMA init timeout\n");
2022 	return -1;
2023 }
2024 
2025 static int mtk_dma_init(struct mtk_eth *eth)
2026 {
2027 	int err;
2028 	u32 i;
2029 
2030 	if (mtk_dma_busy_wait(eth))
2031 		return -EBUSY;
2032 
2033 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2034 		/* QDMA needs scratch memory for internal reordering of the
2035 		 * descriptors
2036 		 */
2037 		err = mtk_init_fq_dma(eth);
2038 		if (err)
2039 			return err;
2040 	}
2041 
2042 	err = mtk_tx_alloc(eth);
2043 	if (err)
2044 		return err;
2045 
2046 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2047 		err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2048 		if (err)
2049 			return err;
2050 	}
2051 
2052 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2053 	if (err)
2054 		return err;
2055 
2056 	if (eth->hwlro) {
2057 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2058 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2059 			if (err)
2060 				return err;
2061 		}
2062 		err = mtk_hwlro_rx_init(eth);
2063 		if (err)
2064 			return err;
2065 	}
2066 
2067 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2068 		/* Enable random early drop and set drop threshold
2069 		 * automatically
2070 		 */
2071 		mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2072 			FC_THRES_MIN, MTK_QDMA_FC_THRES);
2073 		mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 static void mtk_dma_free(struct mtk_eth *eth)
2080 {
2081 	int i;
2082 
2083 	for (i = 0; i < MTK_MAC_COUNT; i++)
2084 		if (eth->netdev[i])
2085 			netdev_reset_queue(eth->netdev[i]);
2086 	if (eth->scratch_ring) {
2087 		dma_free_coherent(eth->dev,
2088 				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2089 				  eth->scratch_ring,
2090 				  eth->phy_scratch_ring);
2091 		eth->scratch_ring = NULL;
2092 		eth->phy_scratch_ring = 0;
2093 	}
2094 	mtk_tx_clean(eth);
2095 	mtk_rx_clean(eth, &eth->rx_ring[0]);
2096 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
2097 
2098 	if (eth->hwlro) {
2099 		mtk_hwlro_rx_uninit(eth);
2100 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2101 			mtk_rx_clean(eth, &eth->rx_ring[i]);
2102 	}
2103 
2104 	kfree(eth->scratch_head);
2105 }
2106 
2107 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2108 {
2109 	struct mtk_mac *mac = netdev_priv(dev);
2110 	struct mtk_eth *eth = mac->hw;
2111 
2112 	eth->netdev[mac->id]->stats.tx_errors++;
2113 	netif_err(eth, tx_err, dev,
2114 		  "transmit timed out\n");
2115 	schedule_work(&eth->pending_work);
2116 }
2117 
2118 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2119 {
2120 	struct mtk_eth *eth = _eth;
2121 
2122 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
2123 		__napi_schedule(&eth->rx_napi);
2124 		mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2125 	}
2126 
2127 	return IRQ_HANDLED;
2128 }
2129 
2130 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2131 {
2132 	struct mtk_eth *eth = _eth;
2133 
2134 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
2135 		__napi_schedule(&eth->tx_napi);
2136 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2137 	}
2138 
2139 	return IRQ_HANDLED;
2140 }
2141 
2142 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2143 {
2144 	struct mtk_eth *eth = _eth;
2145 
2146 	if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2147 		if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2148 			mtk_handle_irq_rx(irq, _eth);
2149 	}
2150 	if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2151 		if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2152 			mtk_handle_irq_tx(irq, _eth);
2153 	}
2154 
2155 	return IRQ_HANDLED;
2156 }
2157 
2158 #ifdef CONFIG_NET_POLL_CONTROLLER
2159 static void mtk_poll_controller(struct net_device *dev)
2160 {
2161 	struct mtk_mac *mac = netdev_priv(dev);
2162 	struct mtk_eth *eth = mac->hw;
2163 
2164 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2165 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2166 	mtk_handle_irq_rx(eth->irq[2], dev);
2167 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2168 	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2169 }
2170 #endif
2171 
2172 static int mtk_start_dma(struct mtk_eth *eth)
2173 {
2174 	u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2175 	int err;
2176 
2177 	err = mtk_dma_init(eth);
2178 	if (err) {
2179 		mtk_dma_free(eth);
2180 		return err;
2181 	}
2182 
2183 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2184 		mtk_w32(eth,
2185 			MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2186 			MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2187 			MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2188 			MTK_RX_BT_32DWORDS,
2189 			MTK_QDMA_GLO_CFG);
2190 
2191 		mtk_w32(eth,
2192 			MTK_RX_DMA_EN | rx_2b_offset |
2193 			MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2194 			MTK_PDMA_GLO_CFG);
2195 	} else {
2196 		mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2197 			MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2198 			MTK_PDMA_GLO_CFG);
2199 	}
2200 
2201 	return 0;
2202 }
2203 
2204 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2205 {
2206 	int i;
2207 
2208 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2209 		return;
2210 
2211 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2212 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2213 
2214 		/* default setup the forward port to send frame to PDMA */
2215 		val &= ~0xffff;
2216 
2217 		/* Enable RX checksum */
2218 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2219 
2220 		val |= config;
2221 
2222 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2223 	}
2224 	/* Reset and enable PSE */
2225 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2226 	mtk_w32(eth, 0, MTK_RST_GL);
2227 }
2228 
2229 static int mtk_open(struct net_device *dev)
2230 {
2231 	struct mtk_mac *mac = netdev_priv(dev);
2232 	struct mtk_eth *eth = mac->hw;
2233 	int err;
2234 
2235 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2236 	if (err) {
2237 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2238 			   err);
2239 		return err;
2240 	}
2241 
2242 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
2243 	if (!refcount_read(&eth->dma_refcnt)) {
2244 		int err = mtk_start_dma(eth);
2245 
2246 		if (err)
2247 			return err;
2248 
2249 		mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2250 
2251 		napi_enable(&eth->tx_napi);
2252 		napi_enable(&eth->rx_napi);
2253 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2254 		mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2255 		refcount_set(&eth->dma_refcnt, 1);
2256 	}
2257 	else
2258 		refcount_inc(&eth->dma_refcnt);
2259 
2260 	phylink_start(mac->phylink);
2261 	netif_start_queue(dev);
2262 	return 0;
2263 }
2264 
2265 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2266 {
2267 	u32 val;
2268 	int i;
2269 
2270 	/* stop the dma engine */
2271 	spin_lock_bh(&eth->page_lock);
2272 	val = mtk_r32(eth, glo_cfg);
2273 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2274 		glo_cfg);
2275 	spin_unlock_bh(&eth->page_lock);
2276 
2277 	/* wait for dma stop */
2278 	for (i = 0; i < 10; i++) {
2279 		val = mtk_r32(eth, glo_cfg);
2280 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2281 			msleep(20);
2282 			continue;
2283 		}
2284 		break;
2285 	}
2286 }
2287 
2288 static int mtk_stop(struct net_device *dev)
2289 {
2290 	struct mtk_mac *mac = netdev_priv(dev);
2291 	struct mtk_eth *eth = mac->hw;
2292 
2293 	phylink_stop(mac->phylink);
2294 
2295 	netif_tx_disable(dev);
2296 
2297 	phylink_disconnect_phy(mac->phylink);
2298 
2299 	/* only shutdown DMA if this is the last user */
2300 	if (!refcount_dec_and_test(&eth->dma_refcnt))
2301 		return 0;
2302 
2303 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2304 
2305 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2306 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2307 	napi_disable(&eth->tx_napi);
2308 	napi_disable(&eth->rx_napi);
2309 
2310 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2311 		mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2312 	mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2313 
2314 	mtk_dma_free(eth);
2315 
2316 	return 0;
2317 }
2318 
2319 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2320 {
2321 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2322 			   reset_bits,
2323 			   reset_bits);
2324 
2325 	usleep_range(1000, 1100);
2326 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2327 			   reset_bits,
2328 			   ~reset_bits);
2329 	mdelay(10);
2330 }
2331 
2332 static void mtk_clk_disable(struct mtk_eth *eth)
2333 {
2334 	int clk;
2335 
2336 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2337 		clk_disable_unprepare(eth->clks[clk]);
2338 }
2339 
2340 static int mtk_clk_enable(struct mtk_eth *eth)
2341 {
2342 	int clk, ret;
2343 
2344 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2345 		ret = clk_prepare_enable(eth->clks[clk]);
2346 		if (ret)
2347 			goto err_disable_clks;
2348 	}
2349 
2350 	return 0;
2351 
2352 err_disable_clks:
2353 	while (--clk >= 0)
2354 		clk_disable_unprepare(eth->clks[clk]);
2355 
2356 	return ret;
2357 }
2358 
2359 static int mtk_hw_init(struct mtk_eth *eth)
2360 {
2361 	int i, val, ret;
2362 
2363 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2364 		return 0;
2365 
2366 	pm_runtime_enable(eth->dev);
2367 	pm_runtime_get_sync(eth->dev);
2368 
2369 	ret = mtk_clk_enable(eth);
2370 	if (ret)
2371 		goto err_disable_pm;
2372 
2373 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2374 		ret = device_reset(eth->dev);
2375 		if (ret) {
2376 			dev_err(eth->dev, "MAC reset failed!\n");
2377 			goto err_disable_pm;
2378 		}
2379 
2380 		/* enable interrupt delay for RX */
2381 		mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2382 
2383 		/* disable delay and normal interrupt */
2384 		mtk_tx_irq_disable(eth, ~0);
2385 		mtk_rx_irq_disable(eth, ~0);
2386 
2387 		return 0;
2388 	}
2389 
2390 	/* Non-MT7628 handling... */
2391 	ethsys_reset(eth, RSTCTRL_FE);
2392 	ethsys_reset(eth, RSTCTRL_PPE);
2393 
2394 	if (eth->pctl) {
2395 		/* Set GE2 driving and slew rate */
2396 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2397 
2398 		/* set GE2 TDSEL */
2399 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2400 
2401 		/* set GE2 TUNE */
2402 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2403 	}
2404 
2405 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
2406 	 * up with the more appropriate value when mtk_mac_config call is being
2407 	 * invoked.
2408 	 */
2409 	for (i = 0; i < MTK_MAC_COUNT; i++)
2410 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2411 
2412 	/* Indicates CDM to parse the MTK special tag from CPU
2413 	 * which also is working out for untag packets.
2414 	 */
2415 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2416 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2417 
2418 	/* Enable RX VLan Offloading */
2419 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2420 
2421 	/* enable interrupt delay for RX */
2422 	mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2423 
2424 	/* disable delay and normal interrupt */
2425 	mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2426 	mtk_tx_irq_disable(eth, ~0);
2427 	mtk_rx_irq_disable(eth, ~0);
2428 
2429 	/* FE int grouping */
2430 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2431 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2432 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2433 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2434 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2435 
2436 	return 0;
2437 
2438 err_disable_pm:
2439 	pm_runtime_put_sync(eth->dev);
2440 	pm_runtime_disable(eth->dev);
2441 
2442 	return ret;
2443 }
2444 
2445 static int mtk_hw_deinit(struct mtk_eth *eth)
2446 {
2447 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2448 		return 0;
2449 
2450 	mtk_clk_disable(eth);
2451 
2452 	pm_runtime_put_sync(eth->dev);
2453 	pm_runtime_disable(eth->dev);
2454 
2455 	return 0;
2456 }
2457 
2458 static int __init mtk_init(struct net_device *dev)
2459 {
2460 	struct mtk_mac *mac = netdev_priv(dev);
2461 	struct mtk_eth *eth = mac->hw;
2462 	const char *mac_addr;
2463 
2464 	mac_addr = of_get_mac_address(mac->of_node);
2465 	if (!IS_ERR(mac_addr))
2466 		ether_addr_copy(dev->dev_addr, mac_addr);
2467 
2468 	/* If the mac address is invalid, use random mac address  */
2469 	if (!is_valid_ether_addr(dev->dev_addr)) {
2470 		eth_hw_addr_random(dev);
2471 		dev_err(eth->dev, "generated random MAC address %pM\n",
2472 			dev->dev_addr);
2473 	}
2474 
2475 	return 0;
2476 }
2477 
2478 static void mtk_uninit(struct net_device *dev)
2479 {
2480 	struct mtk_mac *mac = netdev_priv(dev);
2481 	struct mtk_eth *eth = mac->hw;
2482 
2483 	phylink_disconnect_phy(mac->phylink);
2484 	mtk_tx_irq_disable(eth, ~0);
2485 	mtk_rx_irq_disable(eth, ~0);
2486 }
2487 
2488 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2489 {
2490 	struct mtk_mac *mac = netdev_priv(dev);
2491 
2492 	switch (cmd) {
2493 	case SIOCGMIIPHY:
2494 	case SIOCGMIIREG:
2495 	case SIOCSMIIREG:
2496 		return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2497 	default:
2498 		break;
2499 	}
2500 
2501 	return -EOPNOTSUPP;
2502 }
2503 
2504 static void mtk_pending_work(struct work_struct *work)
2505 {
2506 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2507 	int err, i;
2508 	unsigned long restart = 0;
2509 
2510 	rtnl_lock();
2511 
2512 	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2513 
2514 	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2515 		cpu_relax();
2516 
2517 	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2518 	/* stop all devices to make sure that dma is properly shut down */
2519 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2520 		if (!eth->netdev[i])
2521 			continue;
2522 		mtk_stop(eth->netdev[i]);
2523 		__set_bit(i, &restart);
2524 	}
2525 	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2526 
2527 	/* restart underlying hardware such as power, clock, pin mux
2528 	 * and the connected phy
2529 	 */
2530 	mtk_hw_deinit(eth);
2531 
2532 	if (eth->dev->pins)
2533 		pinctrl_select_state(eth->dev->pins->p,
2534 				     eth->dev->pins->default_state);
2535 	mtk_hw_init(eth);
2536 
2537 	/* restart DMA and enable IRQs */
2538 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2539 		if (!test_bit(i, &restart))
2540 			continue;
2541 		err = mtk_open(eth->netdev[i]);
2542 		if (err) {
2543 			netif_alert(eth, ifup, eth->netdev[i],
2544 			      "Driver up/down cycle failed, closing device.\n");
2545 			dev_close(eth->netdev[i]);
2546 		}
2547 	}
2548 
2549 	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2550 
2551 	clear_bit_unlock(MTK_RESETTING, &eth->state);
2552 
2553 	rtnl_unlock();
2554 }
2555 
2556 static int mtk_free_dev(struct mtk_eth *eth)
2557 {
2558 	int i;
2559 
2560 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2561 		if (!eth->netdev[i])
2562 			continue;
2563 		free_netdev(eth->netdev[i]);
2564 	}
2565 
2566 	return 0;
2567 }
2568 
2569 static int mtk_unreg_dev(struct mtk_eth *eth)
2570 {
2571 	int i;
2572 
2573 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2574 		if (!eth->netdev[i])
2575 			continue;
2576 		unregister_netdev(eth->netdev[i]);
2577 	}
2578 
2579 	return 0;
2580 }
2581 
2582 static int mtk_cleanup(struct mtk_eth *eth)
2583 {
2584 	mtk_unreg_dev(eth);
2585 	mtk_free_dev(eth);
2586 	cancel_work_sync(&eth->pending_work);
2587 
2588 	return 0;
2589 }
2590 
2591 static int mtk_get_link_ksettings(struct net_device *ndev,
2592 				  struct ethtool_link_ksettings *cmd)
2593 {
2594 	struct mtk_mac *mac = netdev_priv(ndev);
2595 
2596 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2597 		return -EBUSY;
2598 
2599 	return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2600 }
2601 
2602 static int mtk_set_link_ksettings(struct net_device *ndev,
2603 				  const struct ethtool_link_ksettings *cmd)
2604 {
2605 	struct mtk_mac *mac = netdev_priv(ndev);
2606 
2607 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2608 		return -EBUSY;
2609 
2610 	return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2611 }
2612 
2613 static void mtk_get_drvinfo(struct net_device *dev,
2614 			    struct ethtool_drvinfo *info)
2615 {
2616 	struct mtk_mac *mac = netdev_priv(dev);
2617 
2618 	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2619 	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2620 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2621 }
2622 
2623 static u32 mtk_get_msglevel(struct net_device *dev)
2624 {
2625 	struct mtk_mac *mac = netdev_priv(dev);
2626 
2627 	return mac->hw->msg_enable;
2628 }
2629 
2630 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2631 {
2632 	struct mtk_mac *mac = netdev_priv(dev);
2633 
2634 	mac->hw->msg_enable = value;
2635 }
2636 
2637 static int mtk_nway_reset(struct net_device *dev)
2638 {
2639 	struct mtk_mac *mac = netdev_priv(dev);
2640 
2641 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2642 		return -EBUSY;
2643 
2644 	if (!mac->phylink)
2645 		return -ENOTSUPP;
2646 
2647 	return phylink_ethtool_nway_reset(mac->phylink);
2648 }
2649 
2650 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2651 {
2652 	int i;
2653 
2654 	switch (stringset) {
2655 	case ETH_SS_STATS:
2656 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2657 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2658 			data += ETH_GSTRING_LEN;
2659 		}
2660 		break;
2661 	}
2662 }
2663 
2664 static int mtk_get_sset_count(struct net_device *dev, int sset)
2665 {
2666 	switch (sset) {
2667 	case ETH_SS_STATS:
2668 		return ARRAY_SIZE(mtk_ethtool_stats);
2669 	default:
2670 		return -EOPNOTSUPP;
2671 	}
2672 }
2673 
2674 static void mtk_get_ethtool_stats(struct net_device *dev,
2675 				  struct ethtool_stats *stats, u64 *data)
2676 {
2677 	struct mtk_mac *mac = netdev_priv(dev);
2678 	struct mtk_hw_stats *hwstats = mac->hw_stats;
2679 	u64 *data_src, *data_dst;
2680 	unsigned int start;
2681 	int i;
2682 
2683 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2684 		return;
2685 
2686 	if (netif_running(dev) && netif_device_present(dev)) {
2687 		if (spin_trylock_bh(&hwstats->stats_lock)) {
2688 			mtk_stats_update_mac(mac);
2689 			spin_unlock_bh(&hwstats->stats_lock);
2690 		}
2691 	}
2692 
2693 	data_src = (u64 *)hwstats;
2694 
2695 	do {
2696 		data_dst = data;
2697 		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2698 
2699 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2700 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2701 	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2702 }
2703 
2704 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2705 			 u32 *rule_locs)
2706 {
2707 	int ret = -EOPNOTSUPP;
2708 
2709 	switch (cmd->cmd) {
2710 	case ETHTOOL_GRXRINGS:
2711 		if (dev->hw_features & NETIF_F_LRO) {
2712 			cmd->data = MTK_MAX_RX_RING_NUM;
2713 			ret = 0;
2714 		}
2715 		break;
2716 	case ETHTOOL_GRXCLSRLCNT:
2717 		if (dev->hw_features & NETIF_F_LRO) {
2718 			struct mtk_mac *mac = netdev_priv(dev);
2719 
2720 			cmd->rule_cnt = mac->hwlro_ip_cnt;
2721 			ret = 0;
2722 		}
2723 		break;
2724 	case ETHTOOL_GRXCLSRULE:
2725 		if (dev->hw_features & NETIF_F_LRO)
2726 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2727 		break;
2728 	case ETHTOOL_GRXCLSRLALL:
2729 		if (dev->hw_features & NETIF_F_LRO)
2730 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
2731 						     rule_locs);
2732 		break;
2733 	default:
2734 		break;
2735 	}
2736 
2737 	return ret;
2738 }
2739 
2740 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2741 {
2742 	int ret = -EOPNOTSUPP;
2743 
2744 	switch (cmd->cmd) {
2745 	case ETHTOOL_SRXCLSRLINS:
2746 		if (dev->hw_features & NETIF_F_LRO)
2747 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
2748 		break;
2749 	case ETHTOOL_SRXCLSRLDEL:
2750 		if (dev->hw_features & NETIF_F_LRO)
2751 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
2752 		break;
2753 	default:
2754 		break;
2755 	}
2756 
2757 	return ret;
2758 }
2759 
2760 static const struct ethtool_ops mtk_ethtool_ops = {
2761 	.get_link_ksettings	= mtk_get_link_ksettings,
2762 	.set_link_ksettings	= mtk_set_link_ksettings,
2763 	.get_drvinfo		= mtk_get_drvinfo,
2764 	.get_msglevel		= mtk_get_msglevel,
2765 	.set_msglevel		= mtk_set_msglevel,
2766 	.nway_reset		= mtk_nway_reset,
2767 	.get_link		= ethtool_op_get_link,
2768 	.get_strings		= mtk_get_strings,
2769 	.get_sset_count		= mtk_get_sset_count,
2770 	.get_ethtool_stats	= mtk_get_ethtool_stats,
2771 	.get_rxnfc		= mtk_get_rxnfc,
2772 	.set_rxnfc              = mtk_set_rxnfc,
2773 };
2774 
2775 static const struct net_device_ops mtk_netdev_ops = {
2776 	.ndo_init		= mtk_init,
2777 	.ndo_uninit		= mtk_uninit,
2778 	.ndo_open		= mtk_open,
2779 	.ndo_stop		= mtk_stop,
2780 	.ndo_start_xmit		= mtk_start_xmit,
2781 	.ndo_set_mac_address	= mtk_set_mac_address,
2782 	.ndo_validate_addr	= eth_validate_addr,
2783 	.ndo_do_ioctl		= mtk_do_ioctl,
2784 	.ndo_tx_timeout		= mtk_tx_timeout,
2785 	.ndo_get_stats64        = mtk_get_stats64,
2786 	.ndo_fix_features	= mtk_fix_features,
2787 	.ndo_set_features	= mtk_set_features,
2788 #ifdef CONFIG_NET_POLL_CONTROLLER
2789 	.ndo_poll_controller	= mtk_poll_controller,
2790 #endif
2791 };
2792 
2793 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2794 {
2795 	const __be32 *_id = of_get_property(np, "reg", NULL);
2796 	phy_interface_t phy_mode;
2797 	struct phylink *phylink;
2798 	struct mtk_mac *mac;
2799 	int id, err;
2800 
2801 	if (!_id) {
2802 		dev_err(eth->dev, "missing mac id\n");
2803 		return -EINVAL;
2804 	}
2805 
2806 	id = be32_to_cpup(_id);
2807 	if (id >= MTK_MAC_COUNT) {
2808 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
2809 		return -EINVAL;
2810 	}
2811 
2812 	if (eth->netdev[id]) {
2813 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2814 		return -EINVAL;
2815 	}
2816 
2817 	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2818 	if (!eth->netdev[id]) {
2819 		dev_err(eth->dev, "alloc_etherdev failed\n");
2820 		return -ENOMEM;
2821 	}
2822 	mac = netdev_priv(eth->netdev[id]);
2823 	eth->mac[id] = mac;
2824 	mac->id = id;
2825 	mac->hw = eth;
2826 	mac->of_node = np;
2827 
2828 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2829 	mac->hwlro_ip_cnt = 0;
2830 
2831 	mac->hw_stats = devm_kzalloc(eth->dev,
2832 				     sizeof(*mac->hw_stats),
2833 				     GFP_KERNEL);
2834 	if (!mac->hw_stats) {
2835 		dev_err(eth->dev, "failed to allocate counter memory\n");
2836 		err = -ENOMEM;
2837 		goto free_netdev;
2838 	}
2839 	spin_lock_init(&mac->hw_stats->stats_lock);
2840 	u64_stats_init(&mac->hw_stats->syncp);
2841 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2842 
2843 	/* phylink create */
2844 	err = of_get_phy_mode(np, &phy_mode);
2845 	if (err) {
2846 		dev_err(eth->dev, "incorrect phy-mode\n");
2847 		goto free_netdev;
2848 	}
2849 
2850 	/* mac config is not set */
2851 	mac->interface = PHY_INTERFACE_MODE_NA;
2852 	mac->mode = MLO_AN_PHY;
2853 	mac->speed = SPEED_UNKNOWN;
2854 
2855 	mac->phylink_config.dev = &eth->netdev[id]->dev;
2856 	mac->phylink_config.type = PHYLINK_NETDEV;
2857 
2858 	phylink = phylink_create(&mac->phylink_config,
2859 				 of_fwnode_handle(mac->of_node),
2860 				 phy_mode, &mtk_phylink_ops);
2861 	if (IS_ERR(phylink)) {
2862 		err = PTR_ERR(phylink);
2863 		goto free_netdev;
2864 	}
2865 
2866 	mac->phylink = phylink;
2867 
2868 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2869 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
2870 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2871 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
2872 
2873 	eth->netdev[id]->hw_features = eth->soc->hw_features;
2874 	if (eth->hwlro)
2875 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
2876 
2877 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
2878 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2879 	eth->netdev[id]->features |= eth->soc->hw_features;
2880 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2881 
2882 	eth->netdev[id]->irq = eth->irq[0];
2883 	eth->netdev[id]->dev.of_node = np;
2884 
2885 	return 0;
2886 
2887 free_netdev:
2888 	free_netdev(eth->netdev[id]);
2889 	return err;
2890 }
2891 
2892 static int mtk_probe(struct platform_device *pdev)
2893 {
2894 	struct device_node *mac_np;
2895 	struct mtk_eth *eth;
2896 	int err, i;
2897 
2898 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2899 	if (!eth)
2900 		return -ENOMEM;
2901 
2902 	eth->soc = of_device_get_match_data(&pdev->dev);
2903 
2904 	eth->dev = &pdev->dev;
2905 	eth->base = devm_platform_ioremap_resource(pdev, 0);
2906 	if (IS_ERR(eth->base))
2907 		return PTR_ERR(eth->base);
2908 
2909 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2910 		eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2911 		eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2912 	} else {
2913 		eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2914 		eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2915 	}
2916 
2917 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2918 		eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2919 		eth->ip_align = NET_IP_ALIGN;
2920 	} else {
2921 		eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2922 	}
2923 
2924 	spin_lock_init(&eth->page_lock);
2925 	spin_lock_init(&eth->tx_irq_lock);
2926 	spin_lock_init(&eth->rx_irq_lock);
2927 
2928 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2929 		eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2930 							      "mediatek,ethsys");
2931 		if (IS_ERR(eth->ethsys)) {
2932 			dev_err(&pdev->dev, "no ethsys regmap found\n");
2933 			return PTR_ERR(eth->ethsys);
2934 		}
2935 	}
2936 
2937 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2938 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2939 							     "mediatek,infracfg");
2940 		if (IS_ERR(eth->infra)) {
2941 			dev_err(&pdev->dev, "no infracfg regmap found\n");
2942 			return PTR_ERR(eth->infra);
2943 		}
2944 	}
2945 
2946 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2947 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
2948 					  GFP_KERNEL);
2949 		if (!eth->sgmii)
2950 			return -ENOMEM;
2951 
2952 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
2953 				     eth->soc->ana_rgc3);
2954 
2955 		if (err)
2956 			return err;
2957 	}
2958 
2959 	if (eth->soc->required_pctl) {
2960 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2961 							    "mediatek,pctl");
2962 		if (IS_ERR(eth->pctl)) {
2963 			dev_err(&pdev->dev, "no pctl regmap found\n");
2964 			return PTR_ERR(eth->pctl);
2965 		}
2966 	}
2967 
2968 	for (i = 0; i < 3; i++) {
2969 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
2970 			eth->irq[i] = eth->irq[0];
2971 		else
2972 			eth->irq[i] = platform_get_irq(pdev, i);
2973 		if (eth->irq[i] < 0) {
2974 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2975 			return -ENXIO;
2976 		}
2977 	}
2978 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2979 		eth->clks[i] = devm_clk_get(eth->dev,
2980 					    mtk_clks_source_name[i]);
2981 		if (IS_ERR(eth->clks[i])) {
2982 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2983 				return -EPROBE_DEFER;
2984 			if (eth->soc->required_clks & BIT(i)) {
2985 				dev_err(&pdev->dev, "clock %s not found\n",
2986 					mtk_clks_source_name[i]);
2987 				return -EINVAL;
2988 			}
2989 			eth->clks[i] = NULL;
2990 		}
2991 	}
2992 
2993 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2994 	INIT_WORK(&eth->pending_work, mtk_pending_work);
2995 
2996 	err = mtk_hw_init(eth);
2997 	if (err)
2998 		return err;
2999 
3000 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3001 
3002 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
3003 		if (!of_device_is_compatible(mac_np,
3004 					     "mediatek,eth-mac"))
3005 			continue;
3006 
3007 		if (!of_device_is_available(mac_np))
3008 			continue;
3009 
3010 		err = mtk_add_mac(eth, mac_np);
3011 		if (err) {
3012 			of_node_put(mac_np);
3013 			goto err_deinit_hw;
3014 		}
3015 	}
3016 
3017 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3018 		err = devm_request_irq(eth->dev, eth->irq[0],
3019 				       mtk_handle_irq, 0,
3020 				       dev_name(eth->dev), eth);
3021 	} else {
3022 		err = devm_request_irq(eth->dev, eth->irq[1],
3023 				       mtk_handle_irq_tx, 0,
3024 				       dev_name(eth->dev), eth);
3025 		if (err)
3026 			goto err_free_dev;
3027 
3028 		err = devm_request_irq(eth->dev, eth->irq[2],
3029 				       mtk_handle_irq_rx, 0,
3030 				       dev_name(eth->dev), eth);
3031 	}
3032 	if (err)
3033 		goto err_free_dev;
3034 
3035 	/* No MT7628/88 support yet */
3036 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3037 		err = mtk_mdio_init(eth);
3038 		if (err)
3039 			goto err_free_dev;
3040 	}
3041 
3042 	for (i = 0; i < MTK_MAX_DEVS; i++) {
3043 		if (!eth->netdev[i])
3044 			continue;
3045 
3046 		err = register_netdev(eth->netdev[i]);
3047 		if (err) {
3048 			dev_err(eth->dev, "error bringing up device\n");
3049 			goto err_deinit_mdio;
3050 		} else
3051 			netif_info(eth, probe, eth->netdev[i],
3052 				   "mediatek frame engine at 0x%08lx, irq %d\n",
3053 				   eth->netdev[i]->base_addr, eth->irq[0]);
3054 	}
3055 
3056 	/* we run 2 devices on the same DMA ring so we need a dummy device
3057 	 * for NAPI to work
3058 	 */
3059 	init_dummy_netdev(&eth->dummy_dev);
3060 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3061 		       MTK_NAPI_WEIGHT);
3062 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
3063 		       MTK_NAPI_WEIGHT);
3064 
3065 	platform_set_drvdata(pdev, eth);
3066 
3067 	return 0;
3068 
3069 err_deinit_mdio:
3070 	mtk_mdio_cleanup(eth);
3071 err_free_dev:
3072 	mtk_free_dev(eth);
3073 err_deinit_hw:
3074 	mtk_hw_deinit(eth);
3075 
3076 	return err;
3077 }
3078 
3079 static int mtk_remove(struct platform_device *pdev)
3080 {
3081 	struct mtk_eth *eth = platform_get_drvdata(pdev);
3082 	struct mtk_mac *mac;
3083 	int i;
3084 
3085 	/* stop all devices to make sure that dma is properly shut down */
3086 	for (i = 0; i < MTK_MAC_COUNT; i++) {
3087 		if (!eth->netdev[i])
3088 			continue;
3089 		mtk_stop(eth->netdev[i]);
3090 		mac = netdev_priv(eth->netdev[i]);
3091 		phylink_disconnect_phy(mac->phylink);
3092 	}
3093 
3094 	mtk_hw_deinit(eth);
3095 
3096 	netif_napi_del(&eth->tx_napi);
3097 	netif_napi_del(&eth->rx_napi);
3098 	mtk_cleanup(eth);
3099 	mtk_mdio_cleanup(eth);
3100 
3101 	return 0;
3102 }
3103 
3104 static const struct mtk_soc_data mt2701_data = {
3105 	.caps = MT7623_CAPS | MTK_HWLRO,
3106 	.hw_features = MTK_HW_FEATURES,
3107 	.required_clks = MT7623_CLKS_BITMAP,
3108 	.required_pctl = true,
3109 };
3110 
3111 static const struct mtk_soc_data mt7621_data = {
3112 	.caps = MT7621_CAPS,
3113 	.hw_features = MTK_HW_FEATURES,
3114 	.required_clks = MT7621_CLKS_BITMAP,
3115 	.required_pctl = false,
3116 };
3117 
3118 static const struct mtk_soc_data mt7622_data = {
3119 	.ana_rgc3 = 0x2028,
3120 	.caps = MT7622_CAPS | MTK_HWLRO,
3121 	.hw_features = MTK_HW_FEATURES,
3122 	.required_clks = MT7622_CLKS_BITMAP,
3123 	.required_pctl = false,
3124 };
3125 
3126 static const struct mtk_soc_data mt7623_data = {
3127 	.caps = MT7623_CAPS | MTK_HWLRO,
3128 	.hw_features = MTK_HW_FEATURES,
3129 	.required_clks = MT7623_CLKS_BITMAP,
3130 	.required_pctl = true,
3131 };
3132 
3133 static const struct mtk_soc_data mt7629_data = {
3134 	.ana_rgc3 = 0x128,
3135 	.caps = MT7629_CAPS | MTK_HWLRO,
3136 	.hw_features = MTK_HW_FEATURES,
3137 	.required_clks = MT7629_CLKS_BITMAP,
3138 	.required_pctl = false,
3139 };
3140 
3141 static const struct mtk_soc_data rt5350_data = {
3142 	.caps = MT7628_CAPS,
3143 	.hw_features = MTK_HW_FEATURES_MT7628,
3144 	.required_clks = MT7628_CLKS_BITMAP,
3145 	.required_pctl = false,
3146 };
3147 
3148 const struct of_device_id of_mtk_match[] = {
3149 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3150 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3151 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3152 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3153 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3154 	{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3155 	{},
3156 };
3157 MODULE_DEVICE_TABLE(of, of_mtk_match);
3158 
3159 static struct platform_driver mtk_driver = {
3160 	.probe = mtk_probe,
3161 	.remove = mtk_remove,
3162 	.driver = {
3163 		.name = "mtk_soc_eth",
3164 		.of_match_table = of_mtk_match,
3165 	},
3166 };
3167 
3168 module_platform_driver(mtk_driver);
3169 
3170 MODULE_LICENSE("GPL");
3171 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3172 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
3173