1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 
22 #include "mtk_eth_soc.h"
23 
24 static int mtk_msg_level = -1;
25 module_param_named(msg_level, mtk_msg_level, int, 0);
26 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
27 
28 #define MTK_ETHTOOL_STAT(x) { #x, \
29 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
30 
31 /* strings used by ethtool */
32 static const struct mtk_ethtool_stats {
33 	char str[ETH_GSTRING_LEN];
34 	u32 offset;
35 } mtk_ethtool_stats[] = {
36 	MTK_ETHTOOL_STAT(tx_bytes),
37 	MTK_ETHTOOL_STAT(tx_packets),
38 	MTK_ETHTOOL_STAT(tx_skip),
39 	MTK_ETHTOOL_STAT(tx_collisions),
40 	MTK_ETHTOOL_STAT(rx_bytes),
41 	MTK_ETHTOOL_STAT(rx_packets),
42 	MTK_ETHTOOL_STAT(rx_overflow),
43 	MTK_ETHTOOL_STAT(rx_fcs_errors),
44 	MTK_ETHTOOL_STAT(rx_short_errors),
45 	MTK_ETHTOOL_STAT(rx_long_errors),
46 	MTK_ETHTOOL_STAT(rx_checksum_errors),
47 	MTK_ETHTOOL_STAT(rx_flow_control_packets),
48 };
49 
50 static const char * const mtk_clks_source_name[] = {
51 	"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
52 	"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
53 	"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
54 	"sgmii_ck", "eth2pll",
55 };
56 
57 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
58 {
59 	__raw_writel(val, eth->base + reg);
60 }
61 
62 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
63 {
64 	return __raw_readl(eth->base + reg);
65 }
66 
67 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
68 {
69 	unsigned long t_start = jiffies;
70 
71 	while (1) {
72 		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
73 			return 0;
74 		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
75 			break;
76 		usleep_range(10, 20);
77 	}
78 
79 	dev_err(eth->dev, "mdio: MDIO timeout\n");
80 	return -1;
81 }
82 
83 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
84 			   u32 phy_register, u32 write_data)
85 {
86 	if (mtk_mdio_busy_wait(eth))
87 		return -1;
88 
89 	write_data &= 0xffff;
90 
91 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
92 		(phy_register << PHY_IAC_REG_SHIFT) |
93 		(phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
94 		MTK_PHY_IAC);
95 
96 	if (mtk_mdio_busy_wait(eth))
97 		return -1;
98 
99 	return 0;
100 }
101 
102 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
103 {
104 	u32 d;
105 
106 	if (mtk_mdio_busy_wait(eth))
107 		return 0xffff;
108 
109 	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
110 		(phy_reg << PHY_IAC_REG_SHIFT) |
111 		(phy_addr << PHY_IAC_ADDR_SHIFT),
112 		MTK_PHY_IAC);
113 
114 	if (mtk_mdio_busy_wait(eth))
115 		return 0xffff;
116 
117 	d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
118 
119 	return d;
120 }
121 
122 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
123 			  int phy_reg, u16 val)
124 {
125 	struct mtk_eth *eth = bus->priv;
126 
127 	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
128 }
129 
130 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
131 {
132 	struct mtk_eth *eth = bus->priv;
133 
134 	return _mtk_mdio_read(eth, phy_addr, phy_reg);
135 }
136 
137 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
138 				     phy_interface_t interface)
139 {
140 	u32 val;
141 
142 	/* Check DDR memory type.
143 	 * Currently TRGMII mode with DDR2 memory is not supported.
144 	 */
145 	regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
146 	if (interface == PHY_INTERFACE_MODE_TRGMII &&
147 	    val & SYSCFG_DRAM_TYPE_DDR2) {
148 		dev_err(eth->dev,
149 			"TRGMII mode with DDR2 memory is not supported!\n");
150 		return -EOPNOTSUPP;
151 	}
152 
153 	val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
154 		ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
155 
156 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
157 			   ETHSYS_TRGMII_MT7621_MASK, val);
158 
159 	return 0;
160 }
161 
162 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
163 {
164 	u32 val;
165 	int ret;
166 
167 	val = (speed == SPEED_1000) ?
168 		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
169 	mtk_w32(eth, val, INTF_MODE);
170 
171 	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
172 			   ETHSYS_TRGMII_CLK_SEL362_5,
173 			   ETHSYS_TRGMII_CLK_SEL362_5);
174 
175 	val = (speed == SPEED_1000) ? 250000000 : 500000000;
176 	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
177 	if (ret)
178 		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
179 
180 	val = (speed == SPEED_1000) ?
181 		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
182 	mtk_w32(eth, val, TRGMII_RCK_CTRL);
183 
184 	val = (speed == SPEED_1000) ?
185 		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
186 	mtk_w32(eth, val, TRGMII_TCK_CTRL);
187 }
188 
189 static void mtk_phy_link_adjust(struct net_device *dev)
190 {
191 	struct mtk_mac *mac = netdev_priv(dev);
192 	u16 lcl_adv = 0, rmt_adv = 0;
193 	u8 flowctrl;
194 	u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
195 		  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
196 		  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
197 		  MAC_MCR_BACKPR_EN;
198 
199 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
200 		return;
201 
202 	switch (dev->phydev->speed) {
203 	case SPEED_1000:
204 		mcr |= MAC_MCR_SPEED_1000;
205 		break;
206 	case SPEED_100:
207 		mcr |= MAC_MCR_SPEED_100;
208 		break;
209 	}
210 
211 	if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) {
212 		if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
213 			if (mt7621_gmac0_rgmii_adjust(mac->hw,
214 						      dev->phydev->interface))
215 				return;
216 		} else {
217 			if (!mac->trgmii)
218 				mtk_gmac0_rgmii_adjust(mac->hw,
219 						       dev->phydev->speed);
220 		}
221 	}
222 
223 	if (dev->phydev->link)
224 		mcr |= MAC_MCR_FORCE_LINK;
225 
226 	if (dev->phydev->duplex) {
227 		mcr |= MAC_MCR_FORCE_DPX;
228 
229 		if (dev->phydev->pause)
230 			rmt_adv = LPA_PAUSE_CAP;
231 		if (dev->phydev->asym_pause)
232 			rmt_adv |= LPA_PAUSE_ASYM;
233 
234 		lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
235 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
236 
237 		if (flowctrl & FLOW_CTRL_TX)
238 			mcr |= MAC_MCR_FORCE_TX_FC;
239 		if (flowctrl & FLOW_CTRL_RX)
240 			mcr |= MAC_MCR_FORCE_RX_FC;
241 
242 		netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
243 			  flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
244 			  flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
245 	}
246 
247 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
248 
249 	if (!of_phy_is_fixed_link(mac->of_node))
250 		phy_print_status(dev->phydev);
251 }
252 
253 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
254 				struct device_node *phy_node)
255 {
256 	struct phy_device *phydev;
257 	int phy_mode;
258 
259 	phy_mode = of_get_phy_mode(phy_node);
260 	if (phy_mode < 0) {
261 		dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
262 		return -EINVAL;
263 	}
264 
265 	phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
266 				mtk_phy_link_adjust, 0, phy_mode);
267 	if (!phydev) {
268 		dev_err(eth->dev, "could not connect to PHY\n");
269 		return -ENODEV;
270 	}
271 
272 	dev_info(eth->dev,
273 		 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
274 		 mac->id, phydev_name(phydev), phydev->phy_id,
275 		 phydev->drv->name);
276 
277 	return 0;
278 }
279 
280 static int mtk_phy_connect(struct net_device *dev)
281 {
282 	struct mtk_mac *mac = netdev_priv(dev);
283 	struct mtk_eth *eth;
284 	struct device_node *np;
285 	u32 val;
286 	int err;
287 
288 	eth = mac->hw;
289 	np = of_parse_phandle(mac->of_node, "phy-handle", 0);
290 	if (!np && of_phy_is_fixed_link(mac->of_node))
291 		if (!of_phy_register_fixed_link(mac->of_node))
292 			np = of_node_get(mac->of_node);
293 	if (!np)
294 		return -ENODEV;
295 
296 	err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
297 	if (err)
298 		goto err_phy;
299 
300 	mac->ge_mode = 0;
301 	switch (of_get_phy_mode(np)) {
302 	case PHY_INTERFACE_MODE_TRGMII:
303 		mac->trgmii = true;
304 	case PHY_INTERFACE_MODE_RGMII_TXID:
305 	case PHY_INTERFACE_MODE_RGMII_RXID:
306 	case PHY_INTERFACE_MODE_RGMII_ID:
307 	case PHY_INTERFACE_MODE_RGMII:
308 	case PHY_INTERFACE_MODE_SGMII:
309 		break;
310 	case PHY_INTERFACE_MODE_MII:
311 	case PHY_INTERFACE_MODE_GMII:
312 		mac->ge_mode = 1;
313 		break;
314 	case PHY_INTERFACE_MODE_REVMII:
315 		mac->ge_mode = 2;
316 		break;
317 	case PHY_INTERFACE_MODE_RMII:
318 		if (!mac->id)
319 			goto err_phy;
320 		mac->ge_mode = 3;
321 		break;
322 	default:
323 		goto err_phy;
324 	}
325 
326 	/* put the gmac into the right mode */
327 	regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
328 	val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
329 	val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
330 	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
331 
332 	/* couple phydev to net_device */
333 	if (mtk_phy_connect_node(eth, mac, np))
334 		goto err_phy;
335 
336 	of_node_put(np);
337 
338 	return 0;
339 
340 err_phy:
341 	if (of_phy_is_fixed_link(mac->of_node))
342 		of_phy_deregister_fixed_link(mac->of_node);
343 	of_node_put(np);
344 	dev_err(eth->dev, "%s: invalid phy\n", __func__);
345 	return -EINVAL;
346 }
347 
348 static int mtk_mdio_init(struct mtk_eth *eth)
349 {
350 	struct device_node *mii_np;
351 	int ret;
352 
353 	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
354 	if (!mii_np) {
355 		dev_err(eth->dev, "no %s child node found", "mdio-bus");
356 		return -ENODEV;
357 	}
358 
359 	if (!of_device_is_available(mii_np)) {
360 		ret = -ENODEV;
361 		goto err_put_node;
362 	}
363 
364 	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
365 	if (!eth->mii_bus) {
366 		ret = -ENOMEM;
367 		goto err_put_node;
368 	}
369 
370 	eth->mii_bus->name = "mdio";
371 	eth->mii_bus->read = mtk_mdio_read;
372 	eth->mii_bus->write = mtk_mdio_write;
373 	eth->mii_bus->priv = eth;
374 	eth->mii_bus->parent = eth->dev;
375 
376 	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
377 	ret = of_mdiobus_register(eth->mii_bus, mii_np);
378 
379 err_put_node:
380 	of_node_put(mii_np);
381 	return ret;
382 }
383 
384 static void mtk_mdio_cleanup(struct mtk_eth *eth)
385 {
386 	if (!eth->mii_bus)
387 		return;
388 
389 	mdiobus_unregister(eth->mii_bus);
390 }
391 
392 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
393 {
394 	unsigned long flags;
395 	u32 val;
396 
397 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
398 	val = mtk_r32(eth, MTK_QDMA_INT_MASK);
399 	mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
400 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
401 }
402 
403 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
404 {
405 	unsigned long flags;
406 	u32 val;
407 
408 	spin_lock_irqsave(&eth->tx_irq_lock, flags);
409 	val = mtk_r32(eth, MTK_QDMA_INT_MASK);
410 	mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
411 	spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
412 }
413 
414 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
415 {
416 	unsigned long flags;
417 	u32 val;
418 
419 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
420 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
421 	mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
422 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
423 }
424 
425 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
426 {
427 	unsigned long flags;
428 	u32 val;
429 
430 	spin_lock_irqsave(&eth->rx_irq_lock, flags);
431 	val = mtk_r32(eth, MTK_PDMA_INT_MASK);
432 	mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
433 	spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
434 }
435 
436 static int mtk_set_mac_address(struct net_device *dev, void *p)
437 {
438 	int ret = eth_mac_addr(dev, p);
439 	struct mtk_mac *mac = netdev_priv(dev);
440 	const char *macaddr = dev->dev_addr;
441 
442 	if (ret)
443 		return ret;
444 
445 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
446 		return -EBUSY;
447 
448 	spin_lock_bh(&mac->hw->page_lock);
449 	mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
450 		MTK_GDMA_MAC_ADRH(mac->id));
451 	mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
452 		(macaddr[4] << 8) | macaddr[5],
453 		MTK_GDMA_MAC_ADRL(mac->id));
454 	spin_unlock_bh(&mac->hw->page_lock);
455 
456 	return 0;
457 }
458 
459 void mtk_stats_update_mac(struct mtk_mac *mac)
460 {
461 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
462 	unsigned int base = MTK_GDM1_TX_GBCNT;
463 	u64 stats;
464 
465 	base += hw_stats->reg_offset;
466 
467 	u64_stats_update_begin(&hw_stats->syncp);
468 
469 	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
470 	stats =  mtk_r32(mac->hw, base + 0x04);
471 	if (stats)
472 		hw_stats->rx_bytes += (stats << 32);
473 	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
474 	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
475 	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
476 	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
477 	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
478 	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
479 	hw_stats->rx_flow_control_packets +=
480 					mtk_r32(mac->hw, base + 0x24);
481 	hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
482 	hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
483 	hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
484 	stats =  mtk_r32(mac->hw, base + 0x34);
485 	if (stats)
486 		hw_stats->tx_bytes += (stats << 32);
487 	hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
488 	u64_stats_update_end(&hw_stats->syncp);
489 }
490 
491 static void mtk_stats_update(struct mtk_eth *eth)
492 {
493 	int i;
494 
495 	for (i = 0; i < MTK_MAC_COUNT; i++) {
496 		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
497 			continue;
498 		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
499 			mtk_stats_update_mac(eth->mac[i]);
500 			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
501 		}
502 	}
503 }
504 
505 static void mtk_get_stats64(struct net_device *dev,
506 			    struct rtnl_link_stats64 *storage)
507 {
508 	struct mtk_mac *mac = netdev_priv(dev);
509 	struct mtk_hw_stats *hw_stats = mac->hw_stats;
510 	unsigned int start;
511 
512 	if (netif_running(dev) && netif_device_present(dev)) {
513 		if (spin_trylock_bh(&hw_stats->stats_lock)) {
514 			mtk_stats_update_mac(mac);
515 			spin_unlock_bh(&hw_stats->stats_lock);
516 		}
517 	}
518 
519 	do {
520 		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
521 		storage->rx_packets = hw_stats->rx_packets;
522 		storage->tx_packets = hw_stats->tx_packets;
523 		storage->rx_bytes = hw_stats->rx_bytes;
524 		storage->tx_bytes = hw_stats->tx_bytes;
525 		storage->collisions = hw_stats->tx_collisions;
526 		storage->rx_length_errors = hw_stats->rx_short_errors +
527 			hw_stats->rx_long_errors;
528 		storage->rx_over_errors = hw_stats->rx_overflow;
529 		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
530 		storage->rx_errors = hw_stats->rx_checksum_errors;
531 		storage->tx_aborted_errors = hw_stats->tx_skip;
532 	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
533 
534 	storage->tx_errors = dev->stats.tx_errors;
535 	storage->rx_dropped = dev->stats.rx_dropped;
536 	storage->tx_dropped = dev->stats.tx_dropped;
537 }
538 
539 static inline int mtk_max_frag_size(int mtu)
540 {
541 	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
542 	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
543 		mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
544 
545 	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
546 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
547 }
548 
549 static inline int mtk_max_buf_size(int frag_size)
550 {
551 	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
552 		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
553 
554 	WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
555 
556 	return buf_size;
557 }
558 
559 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
560 				   struct mtk_rx_dma *dma_rxd)
561 {
562 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
563 	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
564 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
565 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
566 }
567 
568 /* the qdma core needs scratch memory to be setup */
569 static int mtk_init_fq_dma(struct mtk_eth *eth)
570 {
571 	dma_addr_t phy_ring_tail;
572 	int cnt = MTK_DMA_SIZE;
573 	dma_addr_t dma_addr;
574 	int i;
575 
576 	eth->scratch_ring = dma_alloc_coherent(eth->dev,
577 					       cnt * sizeof(struct mtk_tx_dma),
578 					       &eth->phy_scratch_ring,
579 					       GFP_ATOMIC);
580 	if (unlikely(!eth->scratch_ring))
581 		return -ENOMEM;
582 
583 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
584 				    GFP_KERNEL);
585 	if (unlikely(!eth->scratch_head))
586 		return -ENOMEM;
587 
588 	dma_addr = dma_map_single(eth->dev,
589 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
590 				  DMA_FROM_DEVICE);
591 	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
592 		return -ENOMEM;
593 
594 	phy_ring_tail = eth->phy_scratch_ring +
595 			(sizeof(struct mtk_tx_dma) * (cnt - 1));
596 
597 	for (i = 0; i < cnt; i++) {
598 		eth->scratch_ring[i].txd1 =
599 					(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
600 		if (i < cnt - 1)
601 			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
602 				((i + 1) * sizeof(struct mtk_tx_dma)));
603 		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
604 	}
605 
606 	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
607 	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
608 	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
609 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
610 
611 	return 0;
612 }
613 
614 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
615 {
616 	void *ret = ring->dma;
617 
618 	return ret + (desc - ring->phys);
619 }
620 
621 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
622 						    struct mtk_tx_dma *txd)
623 {
624 	int idx = txd - ring->dma;
625 
626 	return &ring->buf[idx];
627 }
628 
629 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
630 {
631 	if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
632 		dma_unmap_single(eth->dev,
633 				 dma_unmap_addr(tx_buf, dma_addr0),
634 				 dma_unmap_len(tx_buf, dma_len0),
635 				 DMA_TO_DEVICE);
636 	} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
637 		dma_unmap_page(eth->dev,
638 			       dma_unmap_addr(tx_buf, dma_addr0),
639 			       dma_unmap_len(tx_buf, dma_len0),
640 			       DMA_TO_DEVICE);
641 	}
642 	tx_buf->flags = 0;
643 	if (tx_buf->skb &&
644 	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
645 		dev_kfree_skb_any(tx_buf->skb);
646 	tx_buf->skb = NULL;
647 }
648 
649 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
650 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
651 {
652 	struct mtk_mac *mac = netdev_priv(dev);
653 	struct mtk_eth *eth = mac->hw;
654 	struct mtk_tx_dma *itxd, *txd;
655 	struct mtk_tx_buf *itx_buf, *tx_buf;
656 	dma_addr_t mapped_addr;
657 	unsigned int nr_frags;
658 	int i, n_desc = 1;
659 	u32 txd4 = 0, fport;
660 
661 	itxd = ring->next_free;
662 	if (itxd == ring->last_free)
663 		return -ENOMEM;
664 
665 	/* set the forward port */
666 	fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
667 	txd4 |= fport;
668 
669 	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
670 	memset(itx_buf, 0, sizeof(*itx_buf));
671 
672 	if (gso)
673 		txd4 |= TX_DMA_TSO;
674 
675 	/* TX Checksum offload */
676 	if (skb->ip_summed == CHECKSUM_PARTIAL)
677 		txd4 |= TX_DMA_CHKSUM;
678 
679 	/* VLAN header offload */
680 	if (skb_vlan_tag_present(skb))
681 		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
682 
683 	mapped_addr = dma_map_single(eth->dev, skb->data,
684 				     skb_headlen(skb), DMA_TO_DEVICE);
685 	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
686 		return -ENOMEM;
687 
688 	WRITE_ONCE(itxd->txd1, mapped_addr);
689 	itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
690 	itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
691 			  MTK_TX_FLAGS_FPORT1;
692 	dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
693 	dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
694 
695 	/* TX SG offload */
696 	txd = itxd;
697 	nr_frags = skb_shinfo(skb)->nr_frags;
698 	for (i = 0; i < nr_frags; i++) {
699 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
700 		unsigned int offset = 0;
701 		int frag_size = skb_frag_size(frag);
702 
703 		while (frag_size) {
704 			bool last_frag = false;
705 			unsigned int frag_map_size;
706 
707 			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
708 			if (txd == ring->last_free)
709 				goto err_dma;
710 
711 			n_desc++;
712 			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
713 			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
714 						       frag_map_size,
715 						       DMA_TO_DEVICE);
716 			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
717 				goto err_dma;
718 
719 			if (i == nr_frags - 1 &&
720 			    (frag_size - frag_map_size) == 0)
721 				last_frag = true;
722 
723 			WRITE_ONCE(txd->txd1, mapped_addr);
724 			WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
725 					       TX_DMA_PLEN0(frag_map_size) |
726 					       last_frag * TX_DMA_LS0));
727 			WRITE_ONCE(txd->txd4, fport);
728 
729 			tx_buf = mtk_desc_to_tx_buf(ring, txd);
730 			memset(tx_buf, 0, sizeof(*tx_buf));
731 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
732 			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
733 			tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
734 					 MTK_TX_FLAGS_FPORT1;
735 
736 			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
737 			dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
738 			frag_size -= frag_map_size;
739 			offset += frag_map_size;
740 		}
741 	}
742 
743 	/* store skb to cleanup */
744 	itx_buf->skb = skb;
745 
746 	WRITE_ONCE(itxd->txd4, txd4);
747 	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
748 				(!nr_frags * TX_DMA_LS0)));
749 
750 	netdev_sent_queue(dev, skb->len);
751 	skb_tx_timestamp(skb);
752 
753 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
754 	atomic_sub(n_desc, &ring->free_count);
755 
756 	/* make sure that all changes to the dma ring are flushed before we
757 	 * continue
758 	 */
759 	wmb();
760 
761 	if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
762 	    !netdev_xmit_more())
763 		mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
764 
765 	return 0;
766 
767 err_dma:
768 	do {
769 		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
770 
771 		/* unmap dma */
772 		mtk_tx_unmap(eth, tx_buf);
773 
774 		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
775 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
776 	} while (itxd != txd);
777 
778 	return -ENOMEM;
779 }
780 
781 static inline int mtk_cal_txd_req(struct sk_buff *skb)
782 {
783 	int i, nfrags;
784 	struct skb_frag_struct *frag;
785 
786 	nfrags = 1;
787 	if (skb_is_gso(skb)) {
788 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
789 			frag = &skb_shinfo(skb)->frags[i];
790 			nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
791 		}
792 	} else {
793 		nfrags += skb_shinfo(skb)->nr_frags;
794 	}
795 
796 	return nfrags;
797 }
798 
799 static int mtk_queue_stopped(struct mtk_eth *eth)
800 {
801 	int i;
802 
803 	for (i = 0; i < MTK_MAC_COUNT; i++) {
804 		if (!eth->netdev[i])
805 			continue;
806 		if (netif_queue_stopped(eth->netdev[i]))
807 			return 1;
808 	}
809 
810 	return 0;
811 }
812 
813 static void mtk_wake_queue(struct mtk_eth *eth)
814 {
815 	int i;
816 
817 	for (i = 0; i < MTK_MAC_COUNT; i++) {
818 		if (!eth->netdev[i])
819 			continue;
820 		netif_wake_queue(eth->netdev[i]);
821 	}
822 }
823 
824 static void mtk_stop_queue(struct mtk_eth *eth)
825 {
826 	int i;
827 
828 	for (i = 0; i < MTK_MAC_COUNT; i++) {
829 		if (!eth->netdev[i])
830 			continue;
831 		netif_stop_queue(eth->netdev[i]);
832 	}
833 }
834 
835 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
836 {
837 	struct mtk_mac *mac = netdev_priv(dev);
838 	struct mtk_eth *eth = mac->hw;
839 	struct mtk_tx_ring *ring = &eth->tx_ring;
840 	struct net_device_stats *stats = &dev->stats;
841 	bool gso = false;
842 	int tx_num;
843 
844 	/* normally we can rely on the stack not calling this more than once,
845 	 * however we have 2 queues running on the same ring so we need to lock
846 	 * the ring access
847 	 */
848 	spin_lock(&eth->page_lock);
849 
850 	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
851 		goto drop;
852 
853 	tx_num = mtk_cal_txd_req(skb);
854 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
855 		mtk_stop_queue(eth);
856 		netif_err(eth, tx_queued, dev,
857 			  "Tx Ring full when queue awake!\n");
858 		spin_unlock(&eth->page_lock);
859 		return NETDEV_TX_BUSY;
860 	}
861 
862 	/* TSO: fill MSS info in tcp checksum field */
863 	if (skb_is_gso(skb)) {
864 		if (skb_cow_head(skb, 0)) {
865 			netif_warn(eth, tx_err, dev,
866 				   "GSO expand head fail.\n");
867 			goto drop;
868 		}
869 
870 		if (skb_shinfo(skb)->gso_type &
871 				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
872 			gso = true;
873 			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
874 		}
875 	}
876 
877 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
878 		goto drop;
879 
880 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
881 		mtk_stop_queue(eth);
882 
883 	spin_unlock(&eth->page_lock);
884 
885 	return NETDEV_TX_OK;
886 
887 drop:
888 	spin_unlock(&eth->page_lock);
889 	stats->tx_dropped++;
890 	dev_kfree_skb_any(skb);
891 	return NETDEV_TX_OK;
892 }
893 
894 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
895 {
896 	int i;
897 	struct mtk_rx_ring *ring;
898 	int idx;
899 
900 	if (!eth->hwlro)
901 		return &eth->rx_ring[0];
902 
903 	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
904 		ring = &eth->rx_ring[i];
905 		idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
906 		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
907 			ring->calc_idx_update = true;
908 			return ring;
909 		}
910 	}
911 
912 	return NULL;
913 }
914 
915 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
916 {
917 	struct mtk_rx_ring *ring;
918 	int i;
919 
920 	if (!eth->hwlro) {
921 		ring = &eth->rx_ring[0];
922 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
923 	} else {
924 		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
925 			ring = &eth->rx_ring[i];
926 			if (ring->calc_idx_update) {
927 				ring->calc_idx_update = false;
928 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
929 			}
930 		}
931 	}
932 }
933 
934 static int mtk_poll_rx(struct napi_struct *napi, int budget,
935 		       struct mtk_eth *eth)
936 {
937 	struct mtk_rx_ring *ring;
938 	int idx;
939 	struct sk_buff *skb;
940 	u8 *data, *new_data;
941 	struct mtk_rx_dma *rxd, trxd;
942 	int done = 0;
943 
944 	while (done < budget) {
945 		struct net_device *netdev;
946 		unsigned int pktlen;
947 		dma_addr_t dma_addr;
948 		int mac = 0;
949 
950 		ring = mtk_get_rx_ring(eth);
951 		if (unlikely(!ring))
952 			goto rx_done;
953 
954 		idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
955 		rxd = &ring->dma[idx];
956 		data = ring->data[idx];
957 
958 		mtk_rx_get_desc(&trxd, rxd);
959 		if (!(trxd.rxd2 & RX_DMA_DONE))
960 			break;
961 
962 		/* find out which mac the packet come from. values start at 1 */
963 		mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
964 		      RX_DMA_FPORT_MASK;
965 		mac--;
966 
967 		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
968 			     !eth->netdev[mac]))
969 			goto release_desc;
970 
971 		netdev = eth->netdev[mac];
972 
973 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
974 			goto release_desc;
975 
976 		/* alloc new buffer */
977 		new_data = napi_alloc_frag(ring->frag_size);
978 		if (unlikely(!new_data)) {
979 			netdev->stats.rx_dropped++;
980 			goto release_desc;
981 		}
982 		dma_addr = dma_map_single(eth->dev,
983 					  new_data + NET_SKB_PAD,
984 					  ring->buf_size,
985 					  DMA_FROM_DEVICE);
986 		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
987 			skb_free_frag(new_data);
988 			netdev->stats.rx_dropped++;
989 			goto release_desc;
990 		}
991 
992 		/* receive data */
993 		skb = build_skb(data, ring->frag_size);
994 		if (unlikely(!skb)) {
995 			skb_free_frag(new_data);
996 			netdev->stats.rx_dropped++;
997 			goto release_desc;
998 		}
999 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1000 
1001 		dma_unmap_single(eth->dev, trxd.rxd1,
1002 				 ring->buf_size, DMA_FROM_DEVICE);
1003 		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1004 		skb->dev = netdev;
1005 		skb_put(skb, pktlen);
1006 		if (trxd.rxd4 & RX_DMA_L4_VALID)
1007 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1008 		else
1009 			skb_checksum_none_assert(skb);
1010 		skb->protocol = eth_type_trans(skb, netdev);
1011 
1012 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1013 		    RX_DMA_VID(trxd.rxd3))
1014 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1015 					       RX_DMA_VID(trxd.rxd3));
1016 		skb_record_rx_queue(skb, 0);
1017 		napi_gro_receive(napi, skb);
1018 
1019 		ring->data[idx] = new_data;
1020 		rxd->rxd1 = (unsigned int)dma_addr;
1021 
1022 release_desc:
1023 		rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1024 
1025 		ring->calc_idx = idx;
1026 
1027 		done++;
1028 	}
1029 
1030 rx_done:
1031 	if (done) {
1032 		/* make sure that all changes to the dma ring are flushed before
1033 		 * we continue
1034 		 */
1035 		wmb();
1036 		mtk_update_rx_cpu_idx(eth);
1037 	}
1038 
1039 	return done;
1040 }
1041 
1042 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1043 {
1044 	struct mtk_tx_ring *ring = &eth->tx_ring;
1045 	struct mtk_tx_dma *desc;
1046 	struct sk_buff *skb;
1047 	struct mtk_tx_buf *tx_buf;
1048 	unsigned int done[MTK_MAX_DEVS];
1049 	unsigned int bytes[MTK_MAX_DEVS];
1050 	u32 cpu, dma;
1051 	int total = 0, i;
1052 
1053 	memset(done, 0, sizeof(done));
1054 	memset(bytes, 0, sizeof(bytes));
1055 
1056 	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1057 	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1058 
1059 	desc = mtk_qdma_phys_to_virt(ring, cpu);
1060 
1061 	while ((cpu != dma) && budget) {
1062 		u32 next_cpu = desc->txd2;
1063 		int mac = 0;
1064 
1065 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1066 		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1067 			break;
1068 
1069 		tx_buf = mtk_desc_to_tx_buf(ring, desc);
1070 		if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1071 			mac = 1;
1072 
1073 		skb = tx_buf->skb;
1074 		if (!skb)
1075 			break;
1076 
1077 		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1078 			bytes[mac] += skb->len;
1079 			done[mac]++;
1080 			budget--;
1081 		}
1082 		mtk_tx_unmap(eth, tx_buf);
1083 
1084 		ring->last_free = desc;
1085 		atomic_inc(&ring->free_count);
1086 
1087 		cpu = next_cpu;
1088 	}
1089 
1090 	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1091 
1092 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1093 		if (!eth->netdev[i] || !done[i])
1094 			continue;
1095 		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1096 		total += done[i];
1097 	}
1098 
1099 	if (mtk_queue_stopped(eth) &&
1100 	    (atomic_read(&ring->free_count) > ring->thresh))
1101 		mtk_wake_queue(eth);
1102 
1103 	return total;
1104 }
1105 
1106 static void mtk_handle_status_irq(struct mtk_eth *eth)
1107 {
1108 	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1109 
1110 	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1111 		mtk_stats_update(eth);
1112 		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1113 			MTK_INT_STATUS2);
1114 	}
1115 }
1116 
1117 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1118 {
1119 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1120 	u32 status, mask;
1121 	int tx_done = 0;
1122 
1123 	mtk_handle_status_irq(eth);
1124 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1125 	tx_done = mtk_poll_tx(eth, budget);
1126 
1127 	if (unlikely(netif_msg_intr(eth))) {
1128 		status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1129 		mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1130 		dev_info(eth->dev,
1131 			 "done tx %d, intr 0x%08x/0x%x\n",
1132 			 tx_done, status, mask);
1133 	}
1134 
1135 	if (tx_done == budget)
1136 		return budget;
1137 
1138 	status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1139 	if (status & MTK_TX_DONE_INT)
1140 		return budget;
1141 
1142 	napi_complete(napi);
1143 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1144 
1145 	return tx_done;
1146 }
1147 
1148 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1149 {
1150 	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1151 	u32 status, mask;
1152 	int rx_done = 0;
1153 	int remain_budget = budget;
1154 
1155 	mtk_handle_status_irq(eth);
1156 
1157 poll_again:
1158 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1159 	rx_done = mtk_poll_rx(napi, remain_budget, eth);
1160 
1161 	if (unlikely(netif_msg_intr(eth))) {
1162 		status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1163 		mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1164 		dev_info(eth->dev,
1165 			 "done rx %d, intr 0x%08x/0x%x\n",
1166 			 rx_done, status, mask);
1167 	}
1168 	if (rx_done == remain_budget)
1169 		return budget;
1170 
1171 	status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1172 	if (status & MTK_RX_DONE_INT) {
1173 		remain_budget -= rx_done;
1174 		goto poll_again;
1175 	}
1176 	napi_complete(napi);
1177 	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1178 
1179 	return rx_done + budget - remain_budget;
1180 }
1181 
1182 static int mtk_tx_alloc(struct mtk_eth *eth)
1183 {
1184 	struct mtk_tx_ring *ring = &eth->tx_ring;
1185 	int i, sz = sizeof(*ring->dma);
1186 
1187 	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1188 			       GFP_KERNEL);
1189 	if (!ring->buf)
1190 		goto no_tx_mem;
1191 
1192 	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1193 				       &ring->phys, GFP_ATOMIC);
1194 	if (!ring->dma)
1195 		goto no_tx_mem;
1196 
1197 	for (i = 0; i < MTK_DMA_SIZE; i++) {
1198 		int next = (i + 1) % MTK_DMA_SIZE;
1199 		u32 next_ptr = ring->phys + next * sz;
1200 
1201 		ring->dma[i].txd2 = next_ptr;
1202 		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1203 	}
1204 
1205 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1206 	ring->next_free = &ring->dma[0];
1207 	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1208 	ring->thresh = MAX_SKB_FRAGS;
1209 
1210 	/* make sure that all changes to the dma ring are flushed before we
1211 	 * continue
1212 	 */
1213 	wmb();
1214 
1215 	mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1216 	mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1217 	mtk_w32(eth,
1218 		ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1219 		MTK_QTX_CRX_PTR);
1220 	mtk_w32(eth,
1221 		ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1222 		MTK_QTX_DRX_PTR);
1223 	mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1224 
1225 	return 0;
1226 
1227 no_tx_mem:
1228 	return -ENOMEM;
1229 }
1230 
1231 static void mtk_tx_clean(struct mtk_eth *eth)
1232 {
1233 	struct mtk_tx_ring *ring = &eth->tx_ring;
1234 	int i;
1235 
1236 	if (ring->buf) {
1237 		for (i = 0; i < MTK_DMA_SIZE; i++)
1238 			mtk_tx_unmap(eth, &ring->buf[i]);
1239 		kfree(ring->buf);
1240 		ring->buf = NULL;
1241 	}
1242 
1243 	if (ring->dma) {
1244 		dma_free_coherent(eth->dev,
1245 				  MTK_DMA_SIZE * sizeof(*ring->dma),
1246 				  ring->dma,
1247 				  ring->phys);
1248 		ring->dma = NULL;
1249 	}
1250 }
1251 
1252 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1253 {
1254 	struct mtk_rx_ring *ring;
1255 	int rx_data_len, rx_dma_size;
1256 	int i;
1257 	u32 offset = 0;
1258 
1259 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
1260 		if (ring_no)
1261 			return -EINVAL;
1262 		ring = &eth->rx_ring_qdma;
1263 		offset = 0x1000;
1264 	} else {
1265 		ring = &eth->rx_ring[ring_no];
1266 	}
1267 
1268 	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1269 		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1270 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1271 	} else {
1272 		rx_data_len = ETH_DATA_LEN;
1273 		rx_dma_size = MTK_DMA_SIZE;
1274 	}
1275 
1276 	ring->frag_size = mtk_max_frag_size(rx_data_len);
1277 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
1278 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1279 			     GFP_KERNEL);
1280 	if (!ring->data)
1281 		return -ENOMEM;
1282 
1283 	for (i = 0; i < rx_dma_size; i++) {
1284 		ring->data[i] = netdev_alloc_frag(ring->frag_size);
1285 		if (!ring->data[i])
1286 			return -ENOMEM;
1287 	}
1288 
1289 	ring->dma = dma_alloc_coherent(eth->dev,
1290 				       rx_dma_size * sizeof(*ring->dma),
1291 				       &ring->phys, GFP_ATOMIC);
1292 	if (!ring->dma)
1293 		return -ENOMEM;
1294 
1295 	for (i = 0; i < rx_dma_size; i++) {
1296 		dma_addr_t dma_addr = dma_map_single(eth->dev,
1297 				ring->data[i] + NET_SKB_PAD,
1298 				ring->buf_size,
1299 				DMA_FROM_DEVICE);
1300 		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1301 			return -ENOMEM;
1302 		ring->dma[i].rxd1 = (unsigned int)dma_addr;
1303 
1304 		ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1305 	}
1306 	ring->dma_size = rx_dma_size;
1307 	ring->calc_idx_update = false;
1308 	ring->calc_idx = rx_dma_size - 1;
1309 	ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1310 	/* make sure that all changes to the dma ring are flushed before we
1311 	 * continue
1312 	 */
1313 	wmb();
1314 
1315 	mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1316 	mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1317 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1318 	mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1319 
1320 	return 0;
1321 }
1322 
1323 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1324 {
1325 	int i;
1326 
1327 	if (ring->data && ring->dma) {
1328 		for (i = 0; i < ring->dma_size; i++) {
1329 			if (!ring->data[i])
1330 				continue;
1331 			if (!ring->dma[i].rxd1)
1332 				continue;
1333 			dma_unmap_single(eth->dev,
1334 					 ring->dma[i].rxd1,
1335 					 ring->buf_size,
1336 					 DMA_FROM_DEVICE);
1337 			skb_free_frag(ring->data[i]);
1338 		}
1339 		kfree(ring->data);
1340 		ring->data = NULL;
1341 	}
1342 
1343 	if (ring->dma) {
1344 		dma_free_coherent(eth->dev,
1345 				  ring->dma_size * sizeof(*ring->dma),
1346 				  ring->dma,
1347 				  ring->phys);
1348 		ring->dma = NULL;
1349 	}
1350 }
1351 
1352 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1353 {
1354 	int i;
1355 	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1356 	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1357 
1358 	/* set LRO rings to auto-learn modes */
1359 	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1360 
1361 	/* validate LRO ring */
1362 	ring_ctrl_dw2 |= MTK_RING_VLD;
1363 
1364 	/* set AGE timer (unit: 20us) */
1365 	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1366 	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1367 
1368 	/* set max AGG timer (unit: 20us) */
1369 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1370 
1371 	/* set max LRO AGG count */
1372 	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1373 	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1374 
1375 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1376 		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1377 		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1378 		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1379 	}
1380 
1381 	/* IPv4 checksum update enable */
1382 	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1383 
1384 	/* switch priority comparison to packet count mode */
1385 	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1386 
1387 	/* bandwidth threshold setting */
1388 	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1389 
1390 	/* auto-learn score delta setting */
1391 	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1392 
1393 	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1394 	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1395 		MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1396 
1397 	/* set HW LRO mode & the max aggregation count for rx packets */
1398 	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1399 
1400 	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
1401 	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1402 
1403 	/* enable HW LRO */
1404 	lro_ctrl_dw0 |= MTK_LRO_EN;
1405 
1406 	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1407 	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1408 
1409 	return 0;
1410 }
1411 
1412 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1413 {
1414 	int i;
1415 	u32 val;
1416 
1417 	/* relinquish lro rings, flush aggregated packets */
1418 	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1419 
1420 	/* wait for relinquishments done */
1421 	for (i = 0; i < 10; i++) {
1422 		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1423 		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1424 			msleep(20);
1425 			continue;
1426 		}
1427 		break;
1428 	}
1429 
1430 	/* invalidate lro rings */
1431 	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1432 		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1433 
1434 	/* disable HW LRO */
1435 	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1436 }
1437 
1438 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1439 {
1440 	u32 reg_val;
1441 
1442 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1443 
1444 	/* invalidate the IP setting */
1445 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1446 
1447 	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1448 
1449 	/* validate the IP setting */
1450 	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1451 }
1452 
1453 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1454 {
1455 	u32 reg_val;
1456 
1457 	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1458 
1459 	/* invalidate the IP setting */
1460 	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1461 
1462 	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1463 }
1464 
1465 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1466 {
1467 	int cnt = 0;
1468 	int i;
1469 
1470 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1471 		if (mac->hwlro_ip[i])
1472 			cnt++;
1473 	}
1474 
1475 	return cnt;
1476 }
1477 
1478 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1479 				struct ethtool_rxnfc *cmd)
1480 {
1481 	struct ethtool_rx_flow_spec *fsp =
1482 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1483 	struct mtk_mac *mac = netdev_priv(dev);
1484 	struct mtk_eth *eth = mac->hw;
1485 	int hwlro_idx;
1486 
1487 	if ((fsp->flow_type != TCP_V4_FLOW) ||
1488 	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1489 	    (fsp->location > 1))
1490 		return -EINVAL;
1491 
1492 	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1493 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1494 
1495 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1496 
1497 	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1498 
1499 	return 0;
1500 }
1501 
1502 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1503 				struct ethtool_rxnfc *cmd)
1504 {
1505 	struct ethtool_rx_flow_spec *fsp =
1506 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1507 	struct mtk_mac *mac = netdev_priv(dev);
1508 	struct mtk_eth *eth = mac->hw;
1509 	int hwlro_idx;
1510 
1511 	if (fsp->location > 1)
1512 		return -EINVAL;
1513 
1514 	mac->hwlro_ip[fsp->location] = 0;
1515 	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1516 
1517 	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1518 
1519 	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1520 
1521 	return 0;
1522 }
1523 
1524 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1525 {
1526 	struct mtk_mac *mac = netdev_priv(dev);
1527 	struct mtk_eth *eth = mac->hw;
1528 	int i, hwlro_idx;
1529 
1530 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1531 		mac->hwlro_ip[i] = 0;
1532 		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1533 
1534 		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1535 	}
1536 
1537 	mac->hwlro_ip_cnt = 0;
1538 }
1539 
1540 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1541 				    struct ethtool_rxnfc *cmd)
1542 {
1543 	struct mtk_mac *mac = netdev_priv(dev);
1544 	struct ethtool_rx_flow_spec *fsp =
1545 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1546 
1547 	/* only tcp dst ipv4 is meaningful, others are meaningless */
1548 	fsp->flow_type = TCP_V4_FLOW;
1549 	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1550 	fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1551 
1552 	fsp->h_u.tcp_ip4_spec.ip4src = 0;
1553 	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1554 	fsp->h_u.tcp_ip4_spec.psrc = 0;
1555 	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1556 	fsp->h_u.tcp_ip4_spec.pdst = 0;
1557 	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1558 	fsp->h_u.tcp_ip4_spec.tos = 0;
1559 	fsp->m_u.tcp_ip4_spec.tos = 0xff;
1560 
1561 	return 0;
1562 }
1563 
1564 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1565 				  struct ethtool_rxnfc *cmd,
1566 				  u32 *rule_locs)
1567 {
1568 	struct mtk_mac *mac = netdev_priv(dev);
1569 	int cnt = 0;
1570 	int i;
1571 
1572 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1573 		if (mac->hwlro_ip[i]) {
1574 			rule_locs[cnt] = i;
1575 			cnt++;
1576 		}
1577 	}
1578 
1579 	cmd->rule_cnt = cnt;
1580 
1581 	return 0;
1582 }
1583 
1584 static netdev_features_t mtk_fix_features(struct net_device *dev,
1585 					  netdev_features_t features)
1586 {
1587 	if (!(features & NETIF_F_LRO)) {
1588 		struct mtk_mac *mac = netdev_priv(dev);
1589 		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1590 
1591 		if (ip_cnt) {
1592 			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1593 
1594 			features |= NETIF_F_LRO;
1595 		}
1596 	}
1597 
1598 	return features;
1599 }
1600 
1601 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1602 {
1603 	int err = 0;
1604 
1605 	if (!((dev->features ^ features) & NETIF_F_LRO))
1606 		return 0;
1607 
1608 	if (!(features & NETIF_F_LRO))
1609 		mtk_hwlro_netdev_disable(dev);
1610 
1611 	return err;
1612 }
1613 
1614 /* wait for DMA to finish whatever it is doing before we start using it again */
1615 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1616 {
1617 	unsigned long t_start = jiffies;
1618 
1619 	while (1) {
1620 		if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1621 		      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1622 			return 0;
1623 		if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1624 			break;
1625 	}
1626 
1627 	dev_err(eth->dev, "DMA init timeout\n");
1628 	return -1;
1629 }
1630 
1631 static int mtk_dma_init(struct mtk_eth *eth)
1632 {
1633 	int err;
1634 	u32 i;
1635 
1636 	if (mtk_dma_busy_wait(eth))
1637 		return -EBUSY;
1638 
1639 	/* QDMA needs scratch memory for internal reordering of the
1640 	 * descriptors
1641 	 */
1642 	err = mtk_init_fq_dma(eth);
1643 	if (err)
1644 		return err;
1645 
1646 	err = mtk_tx_alloc(eth);
1647 	if (err)
1648 		return err;
1649 
1650 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
1651 	if (err)
1652 		return err;
1653 
1654 	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1655 	if (err)
1656 		return err;
1657 
1658 	if (eth->hwlro) {
1659 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1660 			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1661 			if (err)
1662 				return err;
1663 		}
1664 		err = mtk_hwlro_rx_init(eth);
1665 		if (err)
1666 			return err;
1667 	}
1668 
1669 	/* Enable random early drop and set drop threshold automatically */
1670 	mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1671 		MTK_QDMA_FC_THRES);
1672 	mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1673 
1674 	return 0;
1675 }
1676 
1677 static void mtk_dma_free(struct mtk_eth *eth)
1678 {
1679 	int i;
1680 
1681 	for (i = 0; i < MTK_MAC_COUNT; i++)
1682 		if (eth->netdev[i])
1683 			netdev_reset_queue(eth->netdev[i]);
1684 	if (eth->scratch_ring) {
1685 		dma_free_coherent(eth->dev,
1686 				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1687 				  eth->scratch_ring,
1688 				  eth->phy_scratch_ring);
1689 		eth->scratch_ring = NULL;
1690 		eth->phy_scratch_ring = 0;
1691 	}
1692 	mtk_tx_clean(eth);
1693 	mtk_rx_clean(eth, &eth->rx_ring[0]);
1694 	mtk_rx_clean(eth, &eth->rx_ring_qdma);
1695 
1696 	if (eth->hwlro) {
1697 		mtk_hwlro_rx_uninit(eth);
1698 		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1699 			mtk_rx_clean(eth, &eth->rx_ring[i]);
1700 	}
1701 
1702 	kfree(eth->scratch_head);
1703 }
1704 
1705 static void mtk_tx_timeout(struct net_device *dev)
1706 {
1707 	struct mtk_mac *mac = netdev_priv(dev);
1708 	struct mtk_eth *eth = mac->hw;
1709 
1710 	eth->netdev[mac->id]->stats.tx_errors++;
1711 	netif_err(eth, tx_err, dev,
1712 		  "transmit timed out\n");
1713 	schedule_work(&eth->pending_work);
1714 }
1715 
1716 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1717 {
1718 	struct mtk_eth *eth = _eth;
1719 
1720 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
1721 		__napi_schedule(&eth->rx_napi);
1722 		mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1723 	}
1724 
1725 	return IRQ_HANDLED;
1726 }
1727 
1728 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1729 {
1730 	struct mtk_eth *eth = _eth;
1731 
1732 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
1733 		__napi_schedule(&eth->tx_napi);
1734 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1735 	}
1736 
1737 	return IRQ_HANDLED;
1738 }
1739 
1740 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1741 {
1742 	struct mtk_eth *eth = _eth;
1743 
1744 	if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
1745 		if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
1746 			mtk_handle_irq_rx(irq, _eth);
1747 	}
1748 	if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) {
1749 		if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT)
1750 			mtk_handle_irq_tx(irq, _eth);
1751 	}
1752 
1753 	return IRQ_HANDLED;
1754 }
1755 
1756 #ifdef CONFIG_NET_POLL_CONTROLLER
1757 static void mtk_poll_controller(struct net_device *dev)
1758 {
1759 	struct mtk_mac *mac = netdev_priv(dev);
1760 	struct mtk_eth *eth = mac->hw;
1761 
1762 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1763 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1764 	mtk_handle_irq_rx(eth->irq[2], dev);
1765 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1766 	mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1767 }
1768 #endif
1769 
1770 static int mtk_start_dma(struct mtk_eth *eth)
1771 {
1772 	u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
1773 	int err;
1774 
1775 	err = mtk_dma_init(eth);
1776 	if (err) {
1777 		mtk_dma_free(eth);
1778 		return err;
1779 	}
1780 
1781 	mtk_w32(eth,
1782 		MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1783 		MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
1784 		MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1785 		MTK_RX_BT_32DWORDS,
1786 		MTK_QDMA_GLO_CFG);
1787 
1788 	mtk_w32(eth,
1789 		MTK_RX_DMA_EN | rx_2b_offset |
1790 		MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1791 		MTK_PDMA_GLO_CFG);
1792 
1793 	return 0;
1794 }
1795 
1796 static int mtk_open(struct net_device *dev)
1797 {
1798 	struct mtk_mac *mac = netdev_priv(dev);
1799 	struct mtk_eth *eth = mac->hw;
1800 
1801 	/* we run 2 netdevs on the same dma ring so we only bring it up once */
1802 	if (!refcount_read(&eth->dma_refcnt)) {
1803 		int err = mtk_start_dma(eth);
1804 
1805 		if (err)
1806 			return err;
1807 
1808 		napi_enable(&eth->tx_napi);
1809 		napi_enable(&eth->rx_napi);
1810 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1811 		mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1812 		refcount_set(&eth->dma_refcnt, 1);
1813 	}
1814 	else
1815 		refcount_inc(&eth->dma_refcnt);
1816 
1817 	phy_start(dev->phydev);
1818 	netif_start_queue(dev);
1819 
1820 	return 0;
1821 }
1822 
1823 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1824 {
1825 	u32 val;
1826 	int i;
1827 
1828 	/* stop the dma engine */
1829 	spin_lock_bh(&eth->page_lock);
1830 	val = mtk_r32(eth, glo_cfg);
1831 	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1832 		glo_cfg);
1833 	spin_unlock_bh(&eth->page_lock);
1834 
1835 	/* wait for dma stop */
1836 	for (i = 0; i < 10; i++) {
1837 		val = mtk_r32(eth, glo_cfg);
1838 		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1839 			msleep(20);
1840 			continue;
1841 		}
1842 		break;
1843 	}
1844 }
1845 
1846 static int mtk_stop(struct net_device *dev)
1847 {
1848 	struct mtk_mac *mac = netdev_priv(dev);
1849 	struct mtk_eth *eth = mac->hw;
1850 
1851 	netif_tx_disable(dev);
1852 	phy_stop(dev->phydev);
1853 
1854 	/* only shutdown DMA if this is the last user */
1855 	if (!refcount_dec_and_test(&eth->dma_refcnt))
1856 		return 0;
1857 
1858 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1859 	mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1860 	napi_disable(&eth->tx_napi);
1861 	napi_disable(&eth->rx_napi);
1862 
1863 	mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1864 	mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
1865 
1866 	mtk_dma_free(eth);
1867 
1868 	return 0;
1869 }
1870 
1871 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1872 {
1873 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1874 			   reset_bits,
1875 			   reset_bits);
1876 
1877 	usleep_range(1000, 1100);
1878 	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1879 			   reset_bits,
1880 			   ~reset_bits);
1881 	mdelay(10);
1882 }
1883 
1884 static void mtk_clk_disable(struct mtk_eth *eth)
1885 {
1886 	int clk;
1887 
1888 	for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
1889 		clk_disable_unprepare(eth->clks[clk]);
1890 }
1891 
1892 static int mtk_clk_enable(struct mtk_eth *eth)
1893 {
1894 	int clk, ret;
1895 
1896 	for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
1897 		ret = clk_prepare_enable(eth->clks[clk]);
1898 		if (ret)
1899 			goto err_disable_clks;
1900 	}
1901 
1902 	return 0;
1903 
1904 err_disable_clks:
1905 	while (--clk >= 0)
1906 		clk_disable_unprepare(eth->clks[clk]);
1907 
1908 	return ret;
1909 }
1910 
1911 static int mtk_hw_init(struct mtk_eth *eth)
1912 {
1913 	int i, val, ret;
1914 
1915 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
1916 		return 0;
1917 
1918 	pm_runtime_enable(eth->dev);
1919 	pm_runtime_get_sync(eth->dev);
1920 
1921 	ret = mtk_clk_enable(eth);
1922 	if (ret)
1923 		goto err_disable_pm;
1924 
1925 	ethsys_reset(eth, RSTCTRL_FE);
1926 	ethsys_reset(eth, RSTCTRL_PPE);
1927 
1928 	regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1929 	for (i = 0; i < MTK_MAC_COUNT; i++) {
1930 		if (!eth->mac[i])
1931 			continue;
1932 		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1933 		val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1934 	}
1935 	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1936 
1937 	if (eth->pctl) {
1938 		/* Set GE2 driving and slew rate */
1939 		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1940 
1941 		/* set GE2 TDSEL */
1942 		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1943 
1944 		/* set GE2 TUNE */
1945 		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1946 	}
1947 
1948 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
1949 	 * up with the more appropriate value when mtk_phy_link_adjust call is
1950 	 * being invoked.
1951 	 */
1952 	for (i = 0; i < MTK_MAC_COUNT; i++)
1953 		mtk_w32(eth, 0, MTK_MAC_MCR(i));
1954 
1955 	/* Indicates CDM to parse the MTK special tag from CPU
1956 	 * which also is working out for untag packets.
1957 	 */
1958 	val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
1959 	mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
1960 
1961 	/* Enable RX VLan Offloading */
1962 	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1963 
1964 	/* enable interrupt delay for RX */
1965 	mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
1966 
1967 	/* disable delay and normal interrupt */
1968 	mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1969 	mtk_tx_irq_disable(eth, ~0);
1970 	mtk_rx_irq_disable(eth, ~0);
1971 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1972 	mtk_w32(eth, 0, MTK_RST_GL);
1973 
1974 	/* FE int grouping */
1975 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1976 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1977 	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1978 	mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1979 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1980 
1981 	for (i = 0; i < 2; i++) {
1982 		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1983 
1984 		/* setup the forward port to send frame to PDMA */
1985 		val &= ~0xffff;
1986 
1987 		/* Enable RX checksum */
1988 		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1989 
1990 		/* setup the mac dma */
1991 		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1992 	}
1993 
1994 	return 0;
1995 
1996 err_disable_pm:
1997 	pm_runtime_put_sync(eth->dev);
1998 	pm_runtime_disable(eth->dev);
1999 
2000 	return ret;
2001 }
2002 
2003 static int mtk_hw_deinit(struct mtk_eth *eth)
2004 {
2005 	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2006 		return 0;
2007 
2008 	mtk_clk_disable(eth);
2009 
2010 	pm_runtime_put_sync(eth->dev);
2011 	pm_runtime_disable(eth->dev);
2012 
2013 	return 0;
2014 }
2015 
2016 static int __init mtk_init(struct net_device *dev)
2017 {
2018 	struct mtk_mac *mac = netdev_priv(dev);
2019 	struct mtk_eth *eth = mac->hw;
2020 	const char *mac_addr;
2021 
2022 	mac_addr = of_get_mac_address(mac->of_node);
2023 	if (!IS_ERR(mac_addr))
2024 		ether_addr_copy(dev->dev_addr, mac_addr);
2025 
2026 	/* If the mac address is invalid, use random mac address  */
2027 	if (!is_valid_ether_addr(dev->dev_addr)) {
2028 		eth_hw_addr_random(dev);
2029 		dev_err(eth->dev, "generated random MAC address %pM\n",
2030 			dev->dev_addr);
2031 	}
2032 
2033 	return mtk_phy_connect(dev);
2034 }
2035 
2036 static void mtk_uninit(struct net_device *dev)
2037 {
2038 	struct mtk_mac *mac = netdev_priv(dev);
2039 	struct mtk_eth *eth = mac->hw;
2040 
2041 	phy_disconnect(dev->phydev);
2042 	if (of_phy_is_fixed_link(mac->of_node))
2043 		of_phy_deregister_fixed_link(mac->of_node);
2044 	mtk_tx_irq_disable(eth, ~0);
2045 	mtk_rx_irq_disable(eth, ~0);
2046 }
2047 
2048 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2049 {
2050 	switch (cmd) {
2051 	case SIOCGMIIPHY:
2052 	case SIOCGMIIREG:
2053 	case SIOCSMIIREG:
2054 		return phy_mii_ioctl(dev->phydev, ifr, cmd);
2055 	default:
2056 		break;
2057 	}
2058 
2059 	return -EOPNOTSUPP;
2060 }
2061 
2062 static void mtk_pending_work(struct work_struct *work)
2063 {
2064 	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2065 	int err, i;
2066 	unsigned long restart = 0;
2067 
2068 	rtnl_lock();
2069 
2070 	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2071 
2072 	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2073 		cpu_relax();
2074 
2075 	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2076 	/* stop all devices to make sure that dma is properly shut down */
2077 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2078 		if (!eth->netdev[i])
2079 			continue;
2080 		mtk_stop(eth->netdev[i]);
2081 		__set_bit(i, &restart);
2082 	}
2083 	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2084 
2085 	/* restart underlying hardware such as power, clock, pin mux
2086 	 * and the connected phy
2087 	 */
2088 	mtk_hw_deinit(eth);
2089 
2090 	if (eth->dev->pins)
2091 		pinctrl_select_state(eth->dev->pins->p,
2092 				     eth->dev->pins->default_state);
2093 	mtk_hw_init(eth);
2094 
2095 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2096 		if (!eth->mac[i] ||
2097 		    of_phy_is_fixed_link(eth->mac[i]->of_node))
2098 			continue;
2099 		err = phy_init_hw(eth->netdev[i]->phydev);
2100 		if (err)
2101 			dev_err(eth->dev, "%s: PHY init failed.\n",
2102 				eth->netdev[i]->name);
2103 	}
2104 
2105 	/* restart DMA and enable IRQs */
2106 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2107 		if (!test_bit(i, &restart))
2108 			continue;
2109 		err = mtk_open(eth->netdev[i]);
2110 		if (err) {
2111 			netif_alert(eth, ifup, eth->netdev[i],
2112 			      "Driver up/down cycle failed, closing device.\n");
2113 			dev_close(eth->netdev[i]);
2114 		}
2115 	}
2116 
2117 	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2118 
2119 	clear_bit_unlock(MTK_RESETTING, &eth->state);
2120 
2121 	rtnl_unlock();
2122 }
2123 
2124 static int mtk_free_dev(struct mtk_eth *eth)
2125 {
2126 	int i;
2127 
2128 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2129 		if (!eth->netdev[i])
2130 			continue;
2131 		free_netdev(eth->netdev[i]);
2132 	}
2133 
2134 	return 0;
2135 }
2136 
2137 static int mtk_unreg_dev(struct mtk_eth *eth)
2138 {
2139 	int i;
2140 
2141 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2142 		if (!eth->netdev[i])
2143 			continue;
2144 		unregister_netdev(eth->netdev[i]);
2145 	}
2146 
2147 	return 0;
2148 }
2149 
2150 static int mtk_cleanup(struct mtk_eth *eth)
2151 {
2152 	mtk_unreg_dev(eth);
2153 	mtk_free_dev(eth);
2154 	cancel_work_sync(&eth->pending_work);
2155 
2156 	return 0;
2157 }
2158 
2159 static int mtk_get_link_ksettings(struct net_device *ndev,
2160 				  struct ethtool_link_ksettings *cmd)
2161 {
2162 	struct mtk_mac *mac = netdev_priv(ndev);
2163 
2164 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2165 		return -EBUSY;
2166 
2167 	phy_ethtool_ksettings_get(ndev->phydev, cmd);
2168 
2169 	return 0;
2170 }
2171 
2172 static int mtk_set_link_ksettings(struct net_device *ndev,
2173 				  const struct ethtool_link_ksettings *cmd)
2174 {
2175 	struct mtk_mac *mac = netdev_priv(ndev);
2176 
2177 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2178 		return -EBUSY;
2179 
2180 	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2181 }
2182 
2183 static void mtk_get_drvinfo(struct net_device *dev,
2184 			    struct ethtool_drvinfo *info)
2185 {
2186 	struct mtk_mac *mac = netdev_priv(dev);
2187 
2188 	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2189 	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2190 	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2191 }
2192 
2193 static u32 mtk_get_msglevel(struct net_device *dev)
2194 {
2195 	struct mtk_mac *mac = netdev_priv(dev);
2196 
2197 	return mac->hw->msg_enable;
2198 }
2199 
2200 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2201 {
2202 	struct mtk_mac *mac = netdev_priv(dev);
2203 
2204 	mac->hw->msg_enable = value;
2205 }
2206 
2207 static int mtk_nway_reset(struct net_device *dev)
2208 {
2209 	struct mtk_mac *mac = netdev_priv(dev);
2210 
2211 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2212 		return -EBUSY;
2213 
2214 	return genphy_restart_aneg(dev->phydev);
2215 }
2216 
2217 static u32 mtk_get_link(struct net_device *dev)
2218 {
2219 	struct mtk_mac *mac = netdev_priv(dev);
2220 	int err;
2221 
2222 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2223 		return -EBUSY;
2224 
2225 	err = genphy_update_link(dev->phydev);
2226 	if (err)
2227 		return ethtool_op_get_link(dev);
2228 
2229 	return dev->phydev->link;
2230 }
2231 
2232 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2233 {
2234 	int i;
2235 
2236 	switch (stringset) {
2237 	case ETH_SS_STATS:
2238 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2239 			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2240 			data += ETH_GSTRING_LEN;
2241 		}
2242 		break;
2243 	}
2244 }
2245 
2246 static int mtk_get_sset_count(struct net_device *dev, int sset)
2247 {
2248 	switch (sset) {
2249 	case ETH_SS_STATS:
2250 		return ARRAY_SIZE(mtk_ethtool_stats);
2251 	default:
2252 		return -EOPNOTSUPP;
2253 	}
2254 }
2255 
2256 static void mtk_get_ethtool_stats(struct net_device *dev,
2257 				  struct ethtool_stats *stats, u64 *data)
2258 {
2259 	struct mtk_mac *mac = netdev_priv(dev);
2260 	struct mtk_hw_stats *hwstats = mac->hw_stats;
2261 	u64 *data_src, *data_dst;
2262 	unsigned int start;
2263 	int i;
2264 
2265 	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2266 		return;
2267 
2268 	if (netif_running(dev) && netif_device_present(dev)) {
2269 		if (spin_trylock_bh(&hwstats->stats_lock)) {
2270 			mtk_stats_update_mac(mac);
2271 			spin_unlock_bh(&hwstats->stats_lock);
2272 		}
2273 	}
2274 
2275 	data_src = (u64 *)hwstats;
2276 
2277 	do {
2278 		data_dst = data;
2279 		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2280 
2281 		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2282 			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2283 	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2284 }
2285 
2286 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2287 			 u32 *rule_locs)
2288 {
2289 	int ret = -EOPNOTSUPP;
2290 
2291 	switch (cmd->cmd) {
2292 	case ETHTOOL_GRXRINGS:
2293 		if (dev->hw_features & NETIF_F_LRO) {
2294 			cmd->data = MTK_MAX_RX_RING_NUM;
2295 			ret = 0;
2296 		}
2297 		break;
2298 	case ETHTOOL_GRXCLSRLCNT:
2299 		if (dev->hw_features & NETIF_F_LRO) {
2300 			struct mtk_mac *mac = netdev_priv(dev);
2301 
2302 			cmd->rule_cnt = mac->hwlro_ip_cnt;
2303 			ret = 0;
2304 		}
2305 		break;
2306 	case ETHTOOL_GRXCLSRULE:
2307 		if (dev->hw_features & NETIF_F_LRO)
2308 			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2309 		break;
2310 	case ETHTOOL_GRXCLSRLALL:
2311 		if (dev->hw_features & NETIF_F_LRO)
2312 			ret = mtk_hwlro_get_fdir_all(dev, cmd,
2313 						     rule_locs);
2314 		break;
2315 	default:
2316 		break;
2317 	}
2318 
2319 	return ret;
2320 }
2321 
2322 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2323 {
2324 	int ret = -EOPNOTSUPP;
2325 
2326 	switch (cmd->cmd) {
2327 	case ETHTOOL_SRXCLSRLINS:
2328 		if (dev->hw_features & NETIF_F_LRO)
2329 			ret = mtk_hwlro_add_ipaddr(dev, cmd);
2330 		break;
2331 	case ETHTOOL_SRXCLSRLDEL:
2332 		if (dev->hw_features & NETIF_F_LRO)
2333 			ret = mtk_hwlro_del_ipaddr(dev, cmd);
2334 		break;
2335 	default:
2336 		break;
2337 	}
2338 
2339 	return ret;
2340 }
2341 
2342 static const struct ethtool_ops mtk_ethtool_ops = {
2343 	.get_link_ksettings	= mtk_get_link_ksettings,
2344 	.set_link_ksettings	= mtk_set_link_ksettings,
2345 	.get_drvinfo		= mtk_get_drvinfo,
2346 	.get_msglevel		= mtk_get_msglevel,
2347 	.set_msglevel		= mtk_set_msglevel,
2348 	.nway_reset		= mtk_nway_reset,
2349 	.get_link		= mtk_get_link,
2350 	.get_strings		= mtk_get_strings,
2351 	.get_sset_count		= mtk_get_sset_count,
2352 	.get_ethtool_stats	= mtk_get_ethtool_stats,
2353 	.get_rxnfc		= mtk_get_rxnfc,
2354 	.set_rxnfc              = mtk_set_rxnfc,
2355 };
2356 
2357 static const struct net_device_ops mtk_netdev_ops = {
2358 	.ndo_init		= mtk_init,
2359 	.ndo_uninit		= mtk_uninit,
2360 	.ndo_open		= mtk_open,
2361 	.ndo_stop		= mtk_stop,
2362 	.ndo_start_xmit		= mtk_start_xmit,
2363 	.ndo_set_mac_address	= mtk_set_mac_address,
2364 	.ndo_validate_addr	= eth_validate_addr,
2365 	.ndo_do_ioctl		= mtk_do_ioctl,
2366 	.ndo_tx_timeout		= mtk_tx_timeout,
2367 	.ndo_get_stats64        = mtk_get_stats64,
2368 	.ndo_fix_features	= mtk_fix_features,
2369 	.ndo_set_features	= mtk_set_features,
2370 #ifdef CONFIG_NET_POLL_CONTROLLER
2371 	.ndo_poll_controller	= mtk_poll_controller,
2372 #endif
2373 };
2374 
2375 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2376 {
2377 	struct mtk_mac *mac;
2378 	const __be32 *_id = of_get_property(np, "reg", NULL);
2379 	int id, err;
2380 
2381 	if (!_id) {
2382 		dev_err(eth->dev, "missing mac id\n");
2383 		return -EINVAL;
2384 	}
2385 
2386 	id = be32_to_cpup(_id);
2387 	if (id >= MTK_MAC_COUNT) {
2388 		dev_err(eth->dev, "%d is not a valid mac id\n", id);
2389 		return -EINVAL;
2390 	}
2391 
2392 	if (eth->netdev[id]) {
2393 		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2394 		return -EINVAL;
2395 	}
2396 
2397 	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2398 	if (!eth->netdev[id]) {
2399 		dev_err(eth->dev, "alloc_etherdev failed\n");
2400 		return -ENOMEM;
2401 	}
2402 	mac = netdev_priv(eth->netdev[id]);
2403 	eth->mac[id] = mac;
2404 	mac->id = id;
2405 	mac->hw = eth;
2406 	mac->of_node = np;
2407 
2408 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2409 	mac->hwlro_ip_cnt = 0;
2410 
2411 	mac->hw_stats = devm_kzalloc(eth->dev,
2412 				     sizeof(*mac->hw_stats),
2413 				     GFP_KERNEL);
2414 	if (!mac->hw_stats) {
2415 		dev_err(eth->dev, "failed to allocate counter memory\n");
2416 		err = -ENOMEM;
2417 		goto free_netdev;
2418 	}
2419 	spin_lock_init(&mac->hw_stats->stats_lock);
2420 	u64_stats_init(&mac->hw_stats->syncp);
2421 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2422 
2423 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2424 	eth->netdev[id]->watchdog_timeo = 5 * HZ;
2425 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2426 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
2427 
2428 	eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2429 	if (eth->hwlro)
2430 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
2431 
2432 	eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2433 		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2434 	eth->netdev[id]->features |= MTK_HW_FEATURES;
2435 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2436 
2437 	eth->netdev[id]->irq = eth->irq[0];
2438 	eth->netdev[id]->dev.of_node = np;
2439 
2440 	return 0;
2441 
2442 free_netdev:
2443 	free_netdev(eth->netdev[id]);
2444 	return err;
2445 }
2446 
2447 static int mtk_probe(struct platform_device *pdev)
2448 {
2449 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2450 	struct device_node *mac_np;
2451 	struct mtk_eth *eth;
2452 	int err;
2453 	int i;
2454 
2455 	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2456 	if (!eth)
2457 		return -ENOMEM;
2458 
2459 	eth->soc = of_device_get_match_data(&pdev->dev);
2460 
2461 	eth->dev = &pdev->dev;
2462 	eth->base = devm_ioremap_resource(&pdev->dev, res);
2463 	if (IS_ERR(eth->base))
2464 		return PTR_ERR(eth->base);
2465 
2466 	spin_lock_init(&eth->page_lock);
2467 	spin_lock_init(&eth->tx_irq_lock);
2468 	spin_lock_init(&eth->rx_irq_lock);
2469 
2470 	eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2471 						      "mediatek,ethsys");
2472 	if (IS_ERR(eth->ethsys)) {
2473 		dev_err(&pdev->dev, "no ethsys regmap found\n");
2474 		return PTR_ERR(eth->ethsys);
2475 	}
2476 
2477 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2478 		eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2479 							     "mediatek,infracfg");
2480 		if (IS_ERR(eth->infra)) {
2481 			dev_err(&pdev->dev, "no infracfg regmap found\n");
2482 			return PTR_ERR(eth->infra);
2483 		}
2484 	}
2485 
2486 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2487 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
2488 					  GFP_KERNEL);
2489 		if (!eth->sgmii)
2490 			return -ENOMEM;
2491 
2492 		err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
2493 				     eth->soc->ana_rgc3);
2494 
2495 		if (err)
2496 			return err;
2497 	}
2498 
2499 	if (eth->soc->required_pctl) {
2500 		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2501 							    "mediatek,pctl");
2502 		if (IS_ERR(eth->pctl)) {
2503 			dev_err(&pdev->dev, "no pctl regmap found\n");
2504 			return PTR_ERR(eth->pctl);
2505 		}
2506 	}
2507 
2508 	for (i = 0; i < 3; i++) {
2509 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
2510 			eth->irq[i] = eth->irq[0];
2511 		else
2512 			eth->irq[i] = platform_get_irq(pdev, i);
2513 		if (eth->irq[i] < 0) {
2514 			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2515 			return -ENXIO;
2516 		}
2517 	}
2518 	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2519 		eth->clks[i] = devm_clk_get(eth->dev,
2520 					    mtk_clks_source_name[i]);
2521 		if (IS_ERR(eth->clks[i])) {
2522 			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2523 				return -EPROBE_DEFER;
2524 			if (eth->soc->required_clks & BIT(i)) {
2525 				dev_err(&pdev->dev, "clock %s not found\n",
2526 					mtk_clks_source_name[i]);
2527 				return -EINVAL;
2528 			}
2529 			eth->clks[i] = NULL;
2530 		}
2531 	}
2532 
2533 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2534 	INIT_WORK(&eth->pending_work, mtk_pending_work);
2535 
2536 	err = mtk_hw_init(eth);
2537 	if (err)
2538 		return err;
2539 
2540 	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
2541 
2542 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
2543 		if (!of_device_is_compatible(mac_np,
2544 					     "mediatek,eth-mac"))
2545 			continue;
2546 
2547 		if (!of_device_is_available(mac_np))
2548 			continue;
2549 
2550 		err = mtk_add_mac(eth, mac_np);
2551 		if (err) {
2552 			of_node_put(mac_np);
2553 			goto err_deinit_hw;
2554 		}
2555 	}
2556 
2557 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
2558 		err = devm_request_irq(eth->dev, eth->irq[0],
2559 				       mtk_handle_irq, 0,
2560 				       dev_name(eth->dev), eth);
2561 	} else {
2562 		err = devm_request_irq(eth->dev, eth->irq[1],
2563 				       mtk_handle_irq_tx, 0,
2564 				       dev_name(eth->dev), eth);
2565 		if (err)
2566 			goto err_free_dev;
2567 
2568 		err = devm_request_irq(eth->dev, eth->irq[2],
2569 				       mtk_handle_irq_rx, 0,
2570 				       dev_name(eth->dev), eth);
2571 	}
2572 	if (err)
2573 		goto err_free_dev;
2574 
2575 	err = mtk_mdio_init(eth);
2576 	if (err)
2577 		goto err_free_dev;
2578 
2579 	for (i = 0; i < MTK_MAX_DEVS; i++) {
2580 		if (!eth->netdev[i])
2581 			continue;
2582 
2583 		err = register_netdev(eth->netdev[i]);
2584 		if (err) {
2585 			dev_err(eth->dev, "error bringing up device\n");
2586 			goto err_deinit_mdio;
2587 		} else
2588 			netif_info(eth, probe, eth->netdev[i],
2589 				   "mediatek frame engine at 0x%08lx, irq %d\n",
2590 				   eth->netdev[i]->base_addr, eth->irq[0]);
2591 	}
2592 
2593 	/* we run 2 devices on the same DMA ring so we need a dummy device
2594 	 * for NAPI to work
2595 	 */
2596 	init_dummy_netdev(&eth->dummy_dev);
2597 	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
2598 		       MTK_NAPI_WEIGHT);
2599 	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
2600 		       MTK_NAPI_WEIGHT);
2601 
2602 	platform_set_drvdata(pdev, eth);
2603 
2604 	return 0;
2605 
2606 err_deinit_mdio:
2607 	mtk_mdio_cleanup(eth);
2608 err_free_dev:
2609 	mtk_free_dev(eth);
2610 err_deinit_hw:
2611 	mtk_hw_deinit(eth);
2612 
2613 	return err;
2614 }
2615 
2616 static int mtk_remove(struct platform_device *pdev)
2617 {
2618 	struct mtk_eth *eth = platform_get_drvdata(pdev);
2619 	int i;
2620 
2621 	/* stop all devices to make sure that dma is properly shut down */
2622 	for (i = 0; i < MTK_MAC_COUNT; i++) {
2623 		if (!eth->netdev[i])
2624 			continue;
2625 		mtk_stop(eth->netdev[i]);
2626 	}
2627 
2628 	mtk_hw_deinit(eth);
2629 
2630 	netif_napi_del(&eth->tx_napi);
2631 	netif_napi_del(&eth->rx_napi);
2632 	mtk_cleanup(eth);
2633 	mtk_mdio_cleanup(eth);
2634 
2635 	return 0;
2636 }
2637 
2638 static const struct mtk_soc_data mt2701_data = {
2639 	.caps = MT7623_CAPS | MTK_HWLRO,
2640 	.required_clks = MT7623_CLKS_BITMAP,
2641 	.required_pctl = true,
2642 };
2643 
2644 static const struct mtk_soc_data mt7621_data = {
2645 	.caps = MT7621_CAPS,
2646 	.required_clks = MT7621_CLKS_BITMAP,
2647 	.required_pctl = false,
2648 };
2649 
2650 static const struct mtk_soc_data mt7622_data = {
2651 	.ana_rgc3 = 0x2028,
2652 	.caps = MT7622_CAPS | MTK_HWLRO,
2653 	.required_clks = MT7622_CLKS_BITMAP,
2654 	.required_pctl = false,
2655 };
2656 
2657 static const struct mtk_soc_data mt7623_data = {
2658 	.caps = MT7623_CAPS | MTK_HWLRO,
2659 	.required_clks = MT7623_CLKS_BITMAP,
2660 	.required_pctl = true,
2661 };
2662 
2663 static const struct mtk_soc_data mt7629_data = {
2664 	.ana_rgc3 = 0x128,
2665 	.caps = MT7629_CAPS | MTK_HWLRO,
2666 	.required_clks = MT7629_CLKS_BITMAP,
2667 	.required_pctl = false,
2668 };
2669 
2670 const struct of_device_id of_mtk_match[] = {
2671 	{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
2672 	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
2673 	{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
2674 	{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
2675 	{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
2676 	{},
2677 };
2678 MODULE_DEVICE_TABLE(of, of_mtk_match);
2679 
2680 static struct platform_driver mtk_driver = {
2681 	.probe = mtk_probe,
2682 	.remove = mtk_remove,
2683 	.driver = {
2684 		.name = "mtk_soc_eth",
2685 		.of_match_table = of_mtk_match,
2686 	},
2687 };
2688 
2689 module_platform_driver(mtk_driver);
2690 
2691 MODULE_LICENSE("GPL");
2692 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2693 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
2694