1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Broadcom BCM7xxx System Port Ethernet MAC driver
4  *
5  * Copyright (C) 2014 Broadcom Corporation
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/dsa/brcm.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/of.h>
19 #include <linux/of_net.h>
20 #include <linux/of_mdio.h>
21 #include <linux/phy.h>
22 #include <linux/phy_fixed.h>
23 #include <net/dsa.h>
24 #include <linux/clk.h>
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 
28 #include "bcmsysport.h"
29 
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
33 {									\
34 	u32 reg = readl_relaxed(priv->base + offset + off);		\
35 	return reg;							\
36 }									\
37 static inline void name##_writel(struct bcm_sysport_priv *priv,		\
38 				  u32 val, u32 off)			\
39 {									\
40 	writel_relaxed(val, priv->base + offset + off);			\
41 }									\
42 
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
47 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53 
54 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
55  * same layout, except it has been moved by 4 bytes up, *sigh*
56  */
57 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
58 {
59 	if (priv->is_lite && off >= RDMA_STATUS)
60 		off += 4;
61 	return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
62 }
63 
64 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
65 {
66 	if (priv->is_lite && off >= RDMA_STATUS)
67 		off += 4;
68 	writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
69 }
70 
71 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
72 {
73 	if (!priv->is_lite) {
74 		return BIT(bit);
75 	} else {
76 		if (bit >= ACB_ALGO)
77 			return BIT(bit + 1);
78 		else
79 			return BIT(bit);
80 	}
81 }
82 
83 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
85   */
86 #define BCM_SYSPORT_INTR_L2(which)	\
87 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
88 						u32 mask)		\
89 {									\
90 	priv->irq##which##_mask &= ~(mask);				\
91 	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
92 }									\
93 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
94 						u32 mask)		\
95 {									\
96 	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
97 	priv->irq##which##_mask |= (mask);				\
98 }									\
99 
100 BCM_SYSPORT_INTR_L2(0)
101 BCM_SYSPORT_INTR_L2(1)
102 
103 /* Register accesses to GISB/RBUS registers are expensive (few hundred
104  * nanoseconds), so keep the check for 64-bits explicit here to save
105  * one register write per-packet on 32-bits platforms.
106  */
107 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
108 				     void __iomem *d,
109 				     dma_addr_t addr)
110 {
111 #ifdef CONFIG_PHYS_ADDR_T_64BIT
112 	writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
113 		     d + DESC_ADDR_HI_STATUS_LEN);
114 #endif
115 	writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
116 }
117 
118 /* Ethtool operations */
119 static void bcm_sysport_set_rx_csum(struct net_device *dev,
120 				    netdev_features_t wanted)
121 {
122 	struct bcm_sysport_priv *priv = netdev_priv(dev);
123 	u32 reg;
124 
125 	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
126 	reg = rxchk_readl(priv, RXCHK_CONTROL);
127 	/* Clear L2 header checks, which would prevent BPDUs
128 	 * from being received.
129 	 */
130 	reg &= ~RXCHK_L2_HDR_DIS;
131 	if (priv->rx_chk_en)
132 		reg |= RXCHK_EN;
133 	else
134 		reg &= ~RXCHK_EN;
135 
136 	/* If UniMAC forwards CRC, we need to skip over it to get
137 	 * a valid CHK bit to be set in the per-packet status word
138 	 */
139 	if (priv->rx_chk_en && priv->crc_fwd)
140 		reg |= RXCHK_SKIP_FCS;
141 	else
142 		reg &= ~RXCHK_SKIP_FCS;
143 
144 	/* If Broadcom tags are enabled (e.g: using a switch), make
145 	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
146 	 * tag after the Ethernet MAC Source Address.
147 	 */
148 	if (netdev_uses_dsa(dev))
149 		reg |= RXCHK_BRCM_TAG_EN;
150 	else
151 		reg &= ~RXCHK_BRCM_TAG_EN;
152 
153 	rxchk_writel(priv, reg, RXCHK_CONTROL);
154 }
155 
156 static void bcm_sysport_set_tx_csum(struct net_device *dev,
157 				    netdev_features_t wanted)
158 {
159 	struct bcm_sysport_priv *priv = netdev_priv(dev);
160 	u32 reg;
161 
162 	/* Hardware transmit checksum requires us to enable the Transmit status
163 	 * block prepended to the packet contents
164 	 */
165 	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
166 				    NETIF_F_HW_VLAN_CTAG_TX));
167 	reg = tdma_readl(priv, TDMA_CONTROL);
168 	if (priv->tsb_en)
169 		reg |= tdma_control_bit(priv, TSB_EN);
170 	else
171 		reg &= ~tdma_control_bit(priv, TSB_EN);
172 	/* Indicating that software inserts Broadcom tags is needed for the TX
173 	 * checksum to be computed correctly when using VLAN HW acceleration,
174 	 * else it has no effect, so it can always be turned on.
175 	 */
176 	if (netdev_uses_dsa(dev))
177 		reg |= tdma_control_bit(priv, SW_BRCM_TAG);
178 	else
179 		reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
180 	tdma_writel(priv, reg, TDMA_CONTROL);
181 
182 	/* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
183 	if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
184 		tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
185 }
186 
187 static int bcm_sysport_set_features(struct net_device *dev,
188 				    netdev_features_t features)
189 {
190 	struct bcm_sysport_priv *priv = netdev_priv(dev);
191 	int ret;
192 
193 	ret = clk_prepare_enable(priv->clk);
194 	if (ret)
195 		return ret;
196 
197 	/* Read CRC forward */
198 	if (!priv->is_lite)
199 		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
200 	else
201 		priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
202 				  GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
203 
204 	bcm_sysport_set_rx_csum(dev, features);
205 	bcm_sysport_set_tx_csum(dev, features);
206 
207 	clk_disable_unprepare(priv->clk);
208 
209 	return 0;
210 }
211 
212 /* Hardware counters must be kept in sync because the order/offset
213  * is important here (order in structure declaration = order in hardware)
214  */
215 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
216 	/* general stats */
217 	STAT_NETDEV64(rx_packets),
218 	STAT_NETDEV64(tx_packets),
219 	STAT_NETDEV64(rx_bytes),
220 	STAT_NETDEV64(tx_bytes),
221 	STAT_NETDEV(rx_errors),
222 	STAT_NETDEV(tx_errors),
223 	STAT_NETDEV(rx_dropped),
224 	STAT_NETDEV(tx_dropped),
225 	STAT_NETDEV(multicast),
226 	/* UniMAC RSV counters */
227 	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
228 	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
229 	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
230 	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
231 	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
232 	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
233 	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
234 	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
235 	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
236 	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
237 	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
238 	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
239 	STAT_MIB_RX("rx_multicast", mib.rx.mca),
240 	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
241 	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
242 	STAT_MIB_RX("rx_control", mib.rx.cf),
243 	STAT_MIB_RX("rx_pause", mib.rx.pf),
244 	STAT_MIB_RX("rx_unknown", mib.rx.uo),
245 	STAT_MIB_RX("rx_align", mib.rx.aln),
246 	STAT_MIB_RX("rx_outrange", mib.rx.flr),
247 	STAT_MIB_RX("rx_code", mib.rx.cde),
248 	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
249 	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
250 	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
251 	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
252 	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
253 	STAT_MIB_RX("rx_unicast", mib.rx.uc),
254 	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
255 	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
256 	/* UniMAC TSV counters */
257 	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
258 	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
259 	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
260 	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
261 	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
262 	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
263 	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
264 	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
265 	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
266 	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
267 	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
268 	STAT_MIB_TX("tx_multicast", mib.tx.mca),
269 	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
270 	STAT_MIB_TX("tx_pause", mib.tx.pf),
271 	STAT_MIB_TX("tx_control", mib.tx.cf),
272 	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
273 	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
274 	STAT_MIB_TX("tx_defer", mib.tx.drf),
275 	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
276 	STAT_MIB_TX("tx_single_col", mib.tx.scl),
277 	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
278 	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
279 	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
280 	STAT_MIB_TX("tx_frags", mib.tx.frg),
281 	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
282 	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
283 	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
284 	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
285 	STAT_MIB_TX("tx_unicast", mib.tx.uc),
286 	/* UniMAC RUNT counters */
287 	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
288 	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
289 	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
290 	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
291 	/* RXCHK misc statistics */
292 	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
293 	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
294 		   RXCHK_OTHER_DISC_CNTR),
295 	/* RBUF misc statistics */
296 	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
297 	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
298 	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
299 	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
300 	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
301 	STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
302 	STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
303 	/* Per TX-queue statistics are dynamically appended */
304 };
305 
306 #define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
307 
308 static void bcm_sysport_get_drvinfo(struct net_device *dev,
309 				    struct ethtool_drvinfo *info)
310 {
311 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
312 	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
313 }
314 
315 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
316 {
317 	struct bcm_sysport_priv *priv = netdev_priv(dev);
318 
319 	return priv->msg_enable;
320 }
321 
322 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
323 {
324 	struct bcm_sysport_priv *priv = netdev_priv(dev);
325 
326 	priv->msg_enable = enable;
327 }
328 
329 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
330 {
331 	switch (type) {
332 	case BCM_SYSPORT_STAT_NETDEV:
333 	case BCM_SYSPORT_STAT_NETDEV64:
334 	case BCM_SYSPORT_STAT_RXCHK:
335 	case BCM_SYSPORT_STAT_RBUF:
336 	case BCM_SYSPORT_STAT_SOFT:
337 		return true;
338 	default:
339 		return false;
340 	}
341 }
342 
343 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
344 {
345 	struct bcm_sysport_priv *priv = netdev_priv(dev);
346 	const struct bcm_sysport_stats *s;
347 	unsigned int i, j;
348 
349 	switch (string_set) {
350 	case ETH_SS_STATS:
351 		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
352 			s = &bcm_sysport_gstrings_stats[i];
353 			if (priv->is_lite &&
354 			    !bcm_sysport_lite_stat_valid(s->type))
355 				continue;
356 			j++;
357 		}
358 		/* Include per-queue statistics */
359 		return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
360 	default:
361 		return -EOPNOTSUPP;
362 	}
363 }
364 
365 static void bcm_sysport_get_strings(struct net_device *dev,
366 				    u32 stringset, u8 *data)
367 {
368 	struct bcm_sysport_priv *priv = netdev_priv(dev);
369 	const struct bcm_sysport_stats *s;
370 	char buf[128];
371 	int i, j;
372 
373 	switch (stringset) {
374 	case ETH_SS_STATS:
375 		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
376 			s = &bcm_sysport_gstrings_stats[i];
377 			if (priv->is_lite &&
378 			    !bcm_sysport_lite_stat_valid(s->type))
379 				continue;
380 
381 			memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
382 			       ETH_GSTRING_LEN);
383 			j++;
384 		}
385 
386 		for (i = 0; i < dev->num_tx_queues; i++) {
387 			snprintf(buf, sizeof(buf), "txq%d_packets", i);
388 			memcpy(data + j * ETH_GSTRING_LEN, buf,
389 			       ETH_GSTRING_LEN);
390 			j++;
391 
392 			snprintf(buf, sizeof(buf), "txq%d_bytes", i);
393 			memcpy(data + j * ETH_GSTRING_LEN, buf,
394 			       ETH_GSTRING_LEN);
395 			j++;
396 		}
397 		break;
398 	default:
399 		break;
400 	}
401 }
402 
403 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
404 {
405 	int i, j = 0;
406 
407 	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
408 		const struct bcm_sysport_stats *s;
409 		u8 offset = 0;
410 		u32 val = 0;
411 		char *p;
412 
413 		s = &bcm_sysport_gstrings_stats[i];
414 		switch (s->type) {
415 		case BCM_SYSPORT_STAT_NETDEV:
416 		case BCM_SYSPORT_STAT_NETDEV64:
417 		case BCM_SYSPORT_STAT_SOFT:
418 			continue;
419 		case BCM_SYSPORT_STAT_MIB_RX:
420 		case BCM_SYSPORT_STAT_MIB_TX:
421 		case BCM_SYSPORT_STAT_RUNT:
422 			if (priv->is_lite)
423 				continue;
424 
425 			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
426 				offset = UMAC_MIB_STAT_OFFSET;
427 			val = umac_readl(priv, UMAC_MIB_START + j + offset);
428 			break;
429 		case BCM_SYSPORT_STAT_RXCHK:
430 			val = rxchk_readl(priv, s->reg_offset);
431 			if (val == ~0)
432 				rxchk_writel(priv, 0, s->reg_offset);
433 			break;
434 		case BCM_SYSPORT_STAT_RBUF:
435 			val = rbuf_readl(priv, s->reg_offset);
436 			if (val == ~0)
437 				rbuf_writel(priv, 0, s->reg_offset);
438 			break;
439 		}
440 
441 		j += s->stat_sizeof;
442 		p = (char *)priv + s->stat_offset;
443 		*(u32 *)p = val;
444 	}
445 
446 	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
447 }
448 
449 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
450 					u64 *tx_bytes, u64 *tx_packets)
451 {
452 	struct bcm_sysport_tx_ring *ring;
453 	u64 bytes = 0, packets = 0;
454 	unsigned int start;
455 	unsigned int q;
456 
457 	for (q = 0; q < priv->netdev->num_tx_queues; q++) {
458 		ring = &priv->tx_rings[q];
459 		do {
460 			start = u64_stats_fetch_begin_irq(&priv->syncp);
461 			bytes = ring->bytes;
462 			packets = ring->packets;
463 		} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
464 
465 		*tx_bytes += bytes;
466 		*tx_packets += packets;
467 	}
468 }
469 
470 static void bcm_sysport_get_stats(struct net_device *dev,
471 				  struct ethtool_stats *stats, u64 *data)
472 {
473 	struct bcm_sysport_priv *priv = netdev_priv(dev);
474 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
475 	struct u64_stats_sync *syncp = &priv->syncp;
476 	struct bcm_sysport_tx_ring *ring;
477 	u64 tx_bytes = 0, tx_packets = 0;
478 	unsigned int start;
479 	int i, j;
480 
481 	if (netif_running(dev)) {
482 		bcm_sysport_update_mib_counters(priv);
483 		bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
484 		stats64->tx_bytes = tx_bytes;
485 		stats64->tx_packets = tx_packets;
486 	}
487 
488 	for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
489 		const struct bcm_sysport_stats *s;
490 		char *p;
491 
492 		s = &bcm_sysport_gstrings_stats[i];
493 		if (s->type == BCM_SYSPORT_STAT_NETDEV)
494 			p = (char *)&dev->stats;
495 		else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
496 			p = (char *)stats64;
497 		else
498 			p = (char *)priv;
499 
500 		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
501 			continue;
502 		p += s->stat_offset;
503 
504 		if (s->stat_sizeof == sizeof(u64) &&
505 		    s->type == BCM_SYSPORT_STAT_NETDEV64) {
506 			do {
507 				start = u64_stats_fetch_begin_irq(syncp);
508 				data[i] = *(u64 *)p;
509 			} while (u64_stats_fetch_retry_irq(syncp, start));
510 		} else
511 			data[i] = *(u32 *)p;
512 		j++;
513 	}
514 
515 	/* For SYSTEMPORT Lite since we have holes in our statistics, j would
516 	 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
517 	 * needs to point to how many total statistics we have minus the
518 	 * number of per TX queue statistics
519 	 */
520 	j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
521 	    dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
522 
523 	for (i = 0; i < dev->num_tx_queues; i++) {
524 		ring = &priv->tx_rings[i];
525 		data[j] = ring->packets;
526 		j++;
527 		data[j] = ring->bytes;
528 		j++;
529 	}
530 }
531 
532 static void bcm_sysport_get_wol(struct net_device *dev,
533 				struct ethtool_wolinfo *wol)
534 {
535 	struct bcm_sysport_priv *priv = netdev_priv(dev);
536 
537 	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
538 	wol->wolopts = priv->wolopts;
539 
540 	if (!(priv->wolopts & WAKE_MAGICSECURE))
541 		return;
542 
543 	memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
544 }
545 
546 static int bcm_sysport_set_wol(struct net_device *dev,
547 			       struct ethtool_wolinfo *wol)
548 {
549 	struct bcm_sysport_priv *priv = netdev_priv(dev);
550 	struct device *kdev = &priv->pdev->dev;
551 	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
552 
553 	if (!device_can_wakeup(kdev))
554 		return -ENOTSUPP;
555 
556 	if (wol->wolopts & ~supported)
557 		return -EINVAL;
558 
559 	if (wol->wolopts & WAKE_MAGICSECURE)
560 		memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
561 
562 	/* Flag the device and relevant IRQ as wakeup capable */
563 	if (wol->wolopts) {
564 		device_set_wakeup_enable(kdev, 1);
565 		if (priv->wol_irq_disabled)
566 			enable_irq_wake(priv->wol_irq);
567 		priv->wol_irq_disabled = 0;
568 	} else {
569 		device_set_wakeup_enable(kdev, 0);
570 		/* Avoid unbalanced disable_irq_wake calls */
571 		if (!priv->wol_irq_disabled)
572 			disable_irq_wake(priv->wol_irq);
573 		priv->wol_irq_disabled = 1;
574 	}
575 
576 	priv->wolopts = wol->wolopts;
577 
578 	return 0;
579 }
580 
581 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
582 					u32 usecs, u32 pkts)
583 {
584 	u32 reg;
585 
586 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
587 	reg &= ~(RDMA_INTR_THRESH_MASK |
588 		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
589 	reg |= pkts;
590 	reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
591 	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
592 }
593 
594 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
595 					struct ethtool_coalesce *ec)
596 {
597 	struct bcm_sysport_priv *priv = ring->priv;
598 	u32 reg;
599 
600 	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
601 	reg &= ~(RING_INTR_THRESH_MASK |
602 		 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
603 	reg |= ec->tx_max_coalesced_frames;
604 	reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
605 			    RING_TIMEOUT_SHIFT;
606 	tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
607 }
608 
609 static int bcm_sysport_get_coalesce(struct net_device *dev,
610 				    struct ethtool_coalesce *ec,
611 				    struct kernel_ethtool_coalesce *kernel_coal,
612 				    struct netlink_ext_ack *extack)
613 {
614 	struct bcm_sysport_priv *priv = netdev_priv(dev);
615 	u32 reg;
616 
617 	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
618 
619 	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
620 	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
621 
622 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
623 
624 	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
625 	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
626 	ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
627 
628 	return 0;
629 }
630 
631 static int bcm_sysport_set_coalesce(struct net_device *dev,
632 				    struct ethtool_coalesce *ec,
633 				    struct kernel_ethtool_coalesce *kernel_coal,
634 				    struct netlink_ext_ack *extack)
635 {
636 	struct bcm_sysport_priv *priv = netdev_priv(dev);
637 	struct dim_cq_moder moder;
638 	u32 usecs, pkts;
639 	unsigned int i;
640 
641 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
642 	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
643 	 * to fit in the RING_TIMEOUT_MASK (16 bits).
644 	 */
645 	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
646 	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
647 	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
648 	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
649 		return -EINVAL;
650 
651 	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
652 	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
653 		return -EINVAL;
654 
655 	for (i = 0; i < dev->num_tx_queues; i++)
656 		bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
657 
658 	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
659 	priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
660 	usecs = priv->rx_coalesce_usecs;
661 	pkts = priv->rx_max_coalesced_frames;
662 
663 	if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
664 		moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
665 		usecs = moder.usec;
666 		pkts = moder.pkts;
667 	}
668 
669 	priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
670 
671 	/* Apply desired coalescing parameters */
672 	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
673 
674 	return 0;
675 }
676 
677 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
678 {
679 	dev_consume_skb_any(cb->skb);
680 	cb->skb = NULL;
681 	dma_unmap_addr_set(cb, dma_addr, 0);
682 }
683 
684 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
685 					     struct bcm_sysport_cb *cb)
686 {
687 	struct device *kdev = &priv->pdev->dev;
688 	struct net_device *ndev = priv->netdev;
689 	struct sk_buff *skb, *rx_skb;
690 	dma_addr_t mapping;
691 
692 	/* Allocate a new SKB for a new packet */
693 	skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
694 				 GFP_ATOMIC | __GFP_NOWARN);
695 	if (!skb) {
696 		priv->mib.alloc_rx_buff_failed++;
697 		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
698 		return NULL;
699 	}
700 
701 	mapping = dma_map_single(kdev, skb->data,
702 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
703 	if (dma_mapping_error(kdev, mapping)) {
704 		priv->mib.rx_dma_failed++;
705 		dev_kfree_skb_any(skb);
706 		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
707 		return NULL;
708 	}
709 
710 	/* Grab the current SKB on the ring */
711 	rx_skb = cb->skb;
712 	if (likely(rx_skb))
713 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
714 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
715 
716 	/* Put the new SKB on the ring */
717 	cb->skb = skb;
718 	dma_unmap_addr_set(cb, dma_addr, mapping);
719 	dma_desc_set_addr(priv, cb->bd_addr, mapping);
720 
721 	netif_dbg(priv, rx_status, ndev, "RX refill\n");
722 
723 	/* Return the current SKB to the caller */
724 	return rx_skb;
725 }
726 
727 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
728 {
729 	struct bcm_sysport_cb *cb;
730 	struct sk_buff *skb;
731 	unsigned int i;
732 
733 	for (i = 0; i < priv->num_rx_bds; i++) {
734 		cb = &priv->rx_cbs[i];
735 		skb = bcm_sysport_rx_refill(priv, cb);
736 		dev_kfree_skb(skb);
737 		if (!cb->skb)
738 			return -ENOMEM;
739 	}
740 
741 	return 0;
742 }
743 
744 /* Poll the hardware for up to budget packets to process */
745 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
746 					unsigned int budget)
747 {
748 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
749 	struct net_device *ndev = priv->netdev;
750 	unsigned int processed = 0, to_process;
751 	unsigned int processed_bytes = 0;
752 	struct bcm_sysport_cb *cb;
753 	struct sk_buff *skb;
754 	unsigned int p_index;
755 	u16 len, status;
756 	struct bcm_rsb *rsb;
757 
758 	/* Clear status before servicing to reduce spurious interrupts */
759 	intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
760 
761 	/* Determine how much we should process since last call, SYSTEMPORT Lite
762 	 * groups the producer and consumer indexes into the same 32-bit
763 	 * which we access using RDMA_CONS_INDEX
764 	 */
765 	if (!priv->is_lite)
766 		p_index = rdma_readl(priv, RDMA_PROD_INDEX);
767 	else
768 		p_index = rdma_readl(priv, RDMA_CONS_INDEX);
769 	p_index &= RDMA_PROD_INDEX_MASK;
770 
771 	to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
772 
773 	netif_dbg(priv, rx_status, ndev,
774 		  "p_index=%d rx_c_index=%d to_process=%d\n",
775 		  p_index, priv->rx_c_index, to_process);
776 
777 	while ((processed < to_process) && (processed < budget)) {
778 		cb = &priv->rx_cbs[priv->rx_read_ptr];
779 		skb = bcm_sysport_rx_refill(priv, cb);
780 
781 
782 		/* We do not have a backing SKB, so we do not a corresponding
783 		 * DMA mapping for this incoming packet since
784 		 * bcm_sysport_rx_refill always either has both skb and mapping
785 		 * or none.
786 		 */
787 		if (unlikely(!skb)) {
788 			netif_err(priv, rx_err, ndev, "out of memory!\n");
789 			ndev->stats.rx_dropped++;
790 			ndev->stats.rx_errors++;
791 			goto next;
792 		}
793 
794 		/* Extract the Receive Status Block prepended */
795 		rsb = (struct bcm_rsb *)skb->data;
796 		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
797 		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
798 			  DESC_STATUS_MASK;
799 
800 		netif_dbg(priv, rx_status, ndev,
801 			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
802 			  p_index, priv->rx_c_index, priv->rx_read_ptr,
803 			  len, status);
804 
805 		if (unlikely(len > RX_BUF_LENGTH)) {
806 			netif_err(priv, rx_status, ndev, "oversized packet\n");
807 			ndev->stats.rx_length_errors++;
808 			ndev->stats.rx_errors++;
809 			dev_kfree_skb_any(skb);
810 			goto next;
811 		}
812 
813 		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
814 			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
815 			ndev->stats.rx_dropped++;
816 			ndev->stats.rx_errors++;
817 			dev_kfree_skb_any(skb);
818 			goto next;
819 		}
820 
821 		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
822 			netif_err(priv, rx_err, ndev, "error packet\n");
823 			if (status & RX_STATUS_OVFLOW)
824 				ndev->stats.rx_over_errors++;
825 			ndev->stats.rx_dropped++;
826 			ndev->stats.rx_errors++;
827 			dev_kfree_skb_any(skb);
828 			goto next;
829 		}
830 
831 		skb_put(skb, len);
832 
833 		/* Hardware validated our checksum */
834 		if (likely(status & DESC_L4_CSUM))
835 			skb->ip_summed = CHECKSUM_UNNECESSARY;
836 
837 		/* Hardware pre-pends packets with 2bytes before Ethernet
838 		 * header plus we have the Receive Status Block, strip off all
839 		 * of this from the SKB.
840 		 */
841 		skb_pull(skb, sizeof(*rsb) + 2);
842 		len -= (sizeof(*rsb) + 2);
843 		processed_bytes += len;
844 
845 		/* UniMAC may forward CRC */
846 		if (priv->crc_fwd) {
847 			skb_trim(skb, len - ETH_FCS_LEN);
848 			len -= ETH_FCS_LEN;
849 		}
850 
851 		skb->protocol = eth_type_trans(skb, ndev);
852 		ndev->stats.rx_packets++;
853 		ndev->stats.rx_bytes += len;
854 		u64_stats_update_begin(&priv->syncp);
855 		stats64->rx_packets++;
856 		stats64->rx_bytes += len;
857 		u64_stats_update_end(&priv->syncp);
858 
859 		napi_gro_receive(&priv->napi, skb);
860 next:
861 		processed++;
862 		priv->rx_read_ptr++;
863 
864 		if (priv->rx_read_ptr == priv->num_rx_bds)
865 			priv->rx_read_ptr = 0;
866 	}
867 
868 	priv->dim.packets = processed;
869 	priv->dim.bytes = processed_bytes;
870 
871 	return processed;
872 }
873 
874 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
875 				       struct bcm_sysport_cb *cb,
876 				       unsigned int *bytes_compl,
877 				       unsigned int *pkts_compl)
878 {
879 	struct bcm_sysport_priv *priv = ring->priv;
880 	struct device *kdev = &priv->pdev->dev;
881 
882 	if (cb->skb) {
883 		*bytes_compl += cb->skb->len;
884 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
885 				 dma_unmap_len(cb, dma_len),
886 				 DMA_TO_DEVICE);
887 		(*pkts_compl)++;
888 		bcm_sysport_free_cb(cb);
889 	/* SKB fragment */
890 	} else if (dma_unmap_addr(cb, dma_addr)) {
891 		*bytes_compl += dma_unmap_len(cb, dma_len);
892 		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
893 			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
894 		dma_unmap_addr_set(cb, dma_addr, 0);
895 	}
896 }
897 
898 /* Reclaim queued SKBs for transmission completion, lockless version */
899 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
900 					     struct bcm_sysport_tx_ring *ring)
901 {
902 	unsigned int pkts_compl = 0, bytes_compl = 0;
903 	struct net_device *ndev = priv->netdev;
904 	unsigned int txbds_processed = 0;
905 	struct bcm_sysport_cb *cb;
906 	unsigned int txbds_ready;
907 	unsigned int c_index;
908 	u32 hw_ind;
909 
910 	/* Clear status before servicing to reduce spurious interrupts */
911 	if (!ring->priv->is_lite)
912 		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
913 	else
914 		intrl2_0_writel(ring->priv, BIT(ring->index +
915 				INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
916 
917 	/* Compute how many descriptors have been processed since last call */
918 	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
919 	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
920 	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
921 
922 	netif_dbg(priv, tx_done, ndev,
923 		  "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
924 		  ring->index, ring->c_index, c_index, txbds_ready);
925 
926 	while (txbds_processed < txbds_ready) {
927 		cb = &ring->cbs[ring->clean_index];
928 		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
929 
930 		ring->desc_count++;
931 		txbds_processed++;
932 
933 		if (likely(ring->clean_index < ring->size - 1))
934 			ring->clean_index++;
935 		else
936 			ring->clean_index = 0;
937 	}
938 
939 	u64_stats_update_begin(&priv->syncp);
940 	ring->packets += pkts_compl;
941 	ring->bytes += bytes_compl;
942 	u64_stats_update_end(&priv->syncp);
943 
944 	ring->c_index = c_index;
945 
946 	netif_dbg(priv, tx_done, ndev,
947 		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
948 		  ring->index, ring->c_index, pkts_compl, bytes_compl);
949 
950 	return pkts_compl;
951 }
952 
953 /* Locked version of the per-ring TX reclaim routine */
954 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
955 					   struct bcm_sysport_tx_ring *ring)
956 {
957 	struct netdev_queue *txq;
958 	unsigned int released;
959 	unsigned long flags;
960 
961 	txq = netdev_get_tx_queue(priv->netdev, ring->index);
962 
963 	spin_lock_irqsave(&ring->lock, flags);
964 	released = __bcm_sysport_tx_reclaim(priv, ring);
965 	if (released)
966 		netif_tx_wake_queue(txq);
967 
968 	spin_unlock_irqrestore(&ring->lock, flags);
969 
970 	return released;
971 }
972 
973 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
974 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
975 				 struct bcm_sysport_tx_ring *ring)
976 {
977 	unsigned long flags;
978 
979 	spin_lock_irqsave(&ring->lock, flags);
980 	__bcm_sysport_tx_reclaim(priv, ring);
981 	spin_unlock_irqrestore(&ring->lock, flags);
982 }
983 
984 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
985 {
986 	struct bcm_sysport_tx_ring *ring =
987 		container_of(napi, struct bcm_sysport_tx_ring, napi);
988 	unsigned int work_done = 0;
989 
990 	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
991 
992 	if (work_done == 0) {
993 		napi_complete(napi);
994 		/* re-enable TX interrupt */
995 		if (!ring->priv->is_lite)
996 			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
997 		else
998 			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
999 					    INTRL2_0_TDMA_MBDONE_SHIFT));
1000 
1001 		return 0;
1002 	}
1003 
1004 	return budget;
1005 }
1006 
1007 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1008 {
1009 	unsigned int q;
1010 
1011 	for (q = 0; q < priv->netdev->num_tx_queues; q++)
1012 		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1013 }
1014 
1015 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1016 {
1017 	struct bcm_sysport_priv *priv =
1018 		container_of(napi, struct bcm_sysport_priv, napi);
1019 	struct dim_sample dim_sample = {};
1020 	unsigned int work_done = 0;
1021 
1022 	work_done = bcm_sysport_desc_rx(priv, budget);
1023 
1024 	priv->rx_c_index += work_done;
1025 	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1026 
1027 	/* SYSTEMPORT Lite groups the producer/consumer index, producer is
1028 	 * maintained by HW, but writes to it will be ignore while RDMA
1029 	 * is active
1030 	 */
1031 	if (!priv->is_lite)
1032 		rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1033 	else
1034 		rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1035 
1036 	if (work_done < budget) {
1037 		napi_complete_done(napi, work_done);
1038 		/* re-enable RX interrupts */
1039 		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1040 	}
1041 
1042 	if (priv->dim.use_dim) {
1043 		dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1044 				  priv->dim.bytes, &dim_sample);
1045 		net_dim(&priv->dim.dim, dim_sample);
1046 	}
1047 
1048 	return work_done;
1049 }
1050 
1051 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1052 {
1053 	u32 reg, bit;
1054 
1055 	reg = umac_readl(priv, UMAC_MPD_CTRL);
1056 	if (enable)
1057 		reg |= MPD_EN;
1058 	else
1059 		reg &= ~MPD_EN;
1060 	umac_writel(priv, reg, UMAC_MPD_CTRL);
1061 
1062 	if (priv->is_lite)
1063 		bit = RBUF_ACPI_EN_LITE;
1064 	else
1065 		bit = RBUF_ACPI_EN;
1066 
1067 	reg = rbuf_readl(priv, RBUF_CONTROL);
1068 	if (enable)
1069 		reg |= bit;
1070 	else
1071 		reg &= ~bit;
1072 	rbuf_writel(priv, reg, RBUF_CONTROL);
1073 }
1074 
1075 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1076 {
1077 	unsigned int index;
1078 	u32 reg;
1079 
1080 	/* Disable RXCHK, active filters and Broadcom tag matching */
1081 	reg = rxchk_readl(priv, RXCHK_CONTROL);
1082 	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1083 		 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1084 	rxchk_writel(priv, reg, RXCHK_CONTROL);
1085 
1086 	/* Make sure we restore correct CID index in case HW lost
1087 	 * its context during deep idle state
1088 	 */
1089 	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1090 		rxchk_writel(priv, priv->filters_loc[index] <<
1091 			     RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1092 		rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1093 	}
1094 
1095 	/* Clear the MagicPacket detection logic */
1096 	mpd_enable_set(priv, false);
1097 
1098 	reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1099 	if (reg & INTRL2_0_MPD)
1100 		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1101 
1102 	if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1103 		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1104 				  RXCHK_BRCM_TAG_MATCH_MASK;
1105 		netdev_info(priv->netdev,
1106 			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1107 	}
1108 
1109 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1110 }
1111 
1112 static void bcm_sysport_dim_work(struct work_struct *work)
1113 {
1114 	struct dim *dim = container_of(work, struct dim, work);
1115 	struct bcm_sysport_net_dim *ndim =
1116 			container_of(dim, struct bcm_sysport_net_dim, dim);
1117 	struct bcm_sysport_priv *priv =
1118 			container_of(ndim, struct bcm_sysport_priv, dim);
1119 	struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1120 								    dim->profile_ix);
1121 
1122 	bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1123 	dim->state = DIM_START_MEASURE;
1124 }
1125 
1126 /* RX and misc interrupt routine */
1127 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1128 {
1129 	struct net_device *dev = dev_id;
1130 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1131 	struct bcm_sysport_tx_ring *txr;
1132 	unsigned int ring, ring_bit;
1133 
1134 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1135 			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1136 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1137 
1138 	if (unlikely(priv->irq0_stat == 0)) {
1139 		netdev_warn(priv->netdev, "spurious RX interrupt\n");
1140 		return IRQ_NONE;
1141 	}
1142 
1143 	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1144 		priv->dim.event_ctr++;
1145 		if (likely(napi_schedule_prep(&priv->napi))) {
1146 			/* disable RX interrupts */
1147 			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1148 			__napi_schedule_irqoff(&priv->napi);
1149 		}
1150 	}
1151 
1152 	/* TX ring is full, perform a full reclaim since we do not know
1153 	 * which one would trigger this interrupt
1154 	 */
1155 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1156 		bcm_sysport_tx_reclaim_all(priv);
1157 
1158 	if (!priv->is_lite)
1159 		goto out;
1160 
1161 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
1162 		ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1163 		if (!(priv->irq0_stat & ring_bit))
1164 			continue;
1165 
1166 		txr = &priv->tx_rings[ring];
1167 
1168 		if (likely(napi_schedule_prep(&txr->napi))) {
1169 			intrl2_0_mask_set(priv, ring_bit);
1170 			__napi_schedule(&txr->napi);
1171 		}
1172 	}
1173 out:
1174 	return IRQ_HANDLED;
1175 }
1176 
1177 /* TX interrupt service routine */
1178 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1179 {
1180 	struct net_device *dev = dev_id;
1181 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1182 	struct bcm_sysport_tx_ring *txr;
1183 	unsigned int ring;
1184 
1185 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1186 				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1187 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1188 
1189 	if (unlikely(priv->irq1_stat == 0)) {
1190 		netdev_warn(priv->netdev, "spurious TX interrupt\n");
1191 		return IRQ_NONE;
1192 	}
1193 
1194 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
1195 		if (!(priv->irq1_stat & BIT(ring)))
1196 			continue;
1197 
1198 		txr = &priv->tx_rings[ring];
1199 
1200 		if (likely(napi_schedule_prep(&txr->napi))) {
1201 			intrl2_1_mask_set(priv, BIT(ring));
1202 			__napi_schedule_irqoff(&txr->napi);
1203 		}
1204 	}
1205 
1206 	return IRQ_HANDLED;
1207 }
1208 
1209 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1210 {
1211 	struct bcm_sysport_priv *priv = dev_id;
1212 
1213 	pm_wakeup_event(&priv->pdev->dev, 0);
1214 
1215 	return IRQ_HANDLED;
1216 }
1217 
1218 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 static void bcm_sysport_poll_controller(struct net_device *dev)
1220 {
1221 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1222 
1223 	disable_irq(priv->irq0);
1224 	bcm_sysport_rx_isr(priv->irq0, priv);
1225 	enable_irq(priv->irq0);
1226 
1227 	if (!priv->is_lite) {
1228 		disable_irq(priv->irq1);
1229 		bcm_sysport_tx_isr(priv->irq1, priv);
1230 		enable_irq(priv->irq1);
1231 	}
1232 }
1233 #endif
1234 
1235 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1236 					      struct net_device *dev)
1237 {
1238 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1239 	struct sk_buff *nskb;
1240 	struct bcm_tsb *tsb;
1241 	u32 csum_info;
1242 	u8 ip_proto;
1243 	u16 csum_start;
1244 	__be16 ip_ver;
1245 
1246 	/* Re-allocate SKB if needed */
1247 	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1248 		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1249 		if (!nskb) {
1250 			dev_kfree_skb_any(skb);
1251 			priv->mib.tx_realloc_tsb_failed++;
1252 			dev->stats.tx_errors++;
1253 			dev->stats.tx_dropped++;
1254 			return NULL;
1255 		}
1256 		dev_consume_skb_any(skb);
1257 		skb = nskb;
1258 		priv->mib.tx_realloc_tsb++;
1259 	}
1260 
1261 	tsb = skb_push(skb, sizeof(*tsb));
1262 	/* Zero-out TSB by default */
1263 	memset(tsb, 0, sizeof(*tsb));
1264 
1265 	if (skb_vlan_tag_present(skb)) {
1266 		tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1267 		tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1268 	}
1269 
1270 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1271 		ip_ver = skb->protocol;
1272 		switch (ip_ver) {
1273 		case htons(ETH_P_IP):
1274 			ip_proto = ip_hdr(skb)->protocol;
1275 			break;
1276 		case htons(ETH_P_IPV6):
1277 			ip_proto = ipv6_hdr(skb)->nexthdr;
1278 			break;
1279 		default:
1280 			return skb;
1281 		}
1282 
1283 		/* Get the checksum offset and the L4 (transport) offset */
1284 		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1285 		/* Account for the HW inserted VLAN tag */
1286 		if (skb_vlan_tag_present(skb))
1287 			csum_start += VLAN_HLEN;
1288 		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1289 		csum_info |= (csum_start << L4_PTR_SHIFT);
1290 
1291 		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1292 			csum_info |= L4_LENGTH_VALID;
1293 			if (ip_proto == IPPROTO_UDP &&
1294 			    ip_ver == htons(ETH_P_IP))
1295 				csum_info |= L4_UDP;
1296 		} else {
1297 			csum_info = 0;
1298 		}
1299 
1300 		tsb->l4_ptr_dest_map = csum_info;
1301 	}
1302 
1303 	return skb;
1304 }
1305 
1306 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1307 				    struct net_device *dev)
1308 {
1309 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1310 	struct device *kdev = &priv->pdev->dev;
1311 	struct bcm_sysport_tx_ring *ring;
1312 	struct bcm_sysport_cb *cb;
1313 	struct netdev_queue *txq;
1314 	u32 len_status, addr_lo;
1315 	unsigned int skb_len;
1316 	unsigned long flags;
1317 	dma_addr_t mapping;
1318 	u16 queue;
1319 	int ret;
1320 
1321 	queue = skb_get_queue_mapping(skb);
1322 	txq = netdev_get_tx_queue(dev, queue);
1323 	ring = &priv->tx_rings[queue];
1324 
1325 	/* lock against tx reclaim in BH context and TX ring full interrupt */
1326 	spin_lock_irqsave(&ring->lock, flags);
1327 	if (unlikely(ring->desc_count == 0)) {
1328 		netif_tx_stop_queue(txq);
1329 		netdev_err(dev, "queue %d awake and ring full!\n", queue);
1330 		ret = NETDEV_TX_BUSY;
1331 		goto out;
1332 	}
1333 
1334 	/* Insert TSB and checksum infos */
1335 	if (priv->tsb_en) {
1336 		skb = bcm_sysport_insert_tsb(skb, dev);
1337 		if (!skb) {
1338 			ret = NETDEV_TX_OK;
1339 			goto out;
1340 		}
1341 	}
1342 
1343 	skb_len = skb->len;
1344 
1345 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1346 	if (dma_mapping_error(kdev, mapping)) {
1347 		priv->mib.tx_dma_failed++;
1348 		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1349 			  skb->data, skb_len);
1350 		ret = NETDEV_TX_OK;
1351 		goto out;
1352 	}
1353 
1354 	/* Remember the SKB for future freeing */
1355 	cb = &ring->cbs[ring->curr_desc];
1356 	cb->skb = skb;
1357 	dma_unmap_addr_set(cb, dma_addr, mapping);
1358 	dma_unmap_len_set(cb, dma_len, skb_len);
1359 
1360 	addr_lo = lower_32_bits(mapping);
1361 	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1362 	len_status |= (skb_len << DESC_LEN_SHIFT);
1363 	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1364 		       DESC_STATUS_SHIFT;
1365 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1366 		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1367 	if (skb_vlan_tag_present(skb))
1368 		len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1369 
1370 	ring->curr_desc++;
1371 	if (ring->curr_desc == ring->size)
1372 		ring->curr_desc = 0;
1373 	ring->desc_count--;
1374 
1375 	/* Ports are latched, so write upper address first */
1376 	tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1377 	tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1378 
1379 	/* Check ring space and update SW control flow */
1380 	if (ring->desc_count == 0)
1381 		netif_tx_stop_queue(txq);
1382 
1383 	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1384 		  ring->index, ring->desc_count, ring->curr_desc);
1385 
1386 	ret = NETDEV_TX_OK;
1387 out:
1388 	spin_unlock_irqrestore(&ring->lock, flags);
1389 	return ret;
1390 }
1391 
1392 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1393 {
1394 	netdev_warn(dev, "transmit timeout!\n");
1395 
1396 	netif_trans_update(dev);
1397 	dev->stats.tx_errors++;
1398 
1399 	netif_tx_wake_all_queues(dev);
1400 }
1401 
1402 /* phylib adjust link callback */
1403 static void bcm_sysport_adj_link(struct net_device *dev)
1404 {
1405 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1406 	struct phy_device *phydev = dev->phydev;
1407 	unsigned int changed = 0;
1408 	u32 cmd_bits = 0, reg;
1409 
1410 	if (priv->old_link != phydev->link) {
1411 		changed = 1;
1412 		priv->old_link = phydev->link;
1413 	}
1414 
1415 	if (priv->old_duplex != phydev->duplex) {
1416 		changed = 1;
1417 		priv->old_duplex = phydev->duplex;
1418 	}
1419 
1420 	if (priv->is_lite)
1421 		goto out;
1422 
1423 	switch (phydev->speed) {
1424 	case SPEED_2500:
1425 		cmd_bits = CMD_SPEED_2500;
1426 		break;
1427 	case SPEED_1000:
1428 		cmd_bits = CMD_SPEED_1000;
1429 		break;
1430 	case SPEED_100:
1431 		cmd_bits = CMD_SPEED_100;
1432 		break;
1433 	case SPEED_10:
1434 		cmd_bits = CMD_SPEED_10;
1435 		break;
1436 	default:
1437 		break;
1438 	}
1439 	cmd_bits <<= CMD_SPEED_SHIFT;
1440 
1441 	if (phydev->duplex == DUPLEX_HALF)
1442 		cmd_bits |= CMD_HD_EN;
1443 
1444 	if (priv->old_pause != phydev->pause) {
1445 		changed = 1;
1446 		priv->old_pause = phydev->pause;
1447 	}
1448 
1449 	if (!phydev->pause)
1450 		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1451 
1452 	if (!changed)
1453 		return;
1454 
1455 	if (phydev->link) {
1456 		reg = umac_readl(priv, UMAC_CMD);
1457 		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1458 			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1459 			CMD_TX_PAUSE_IGNORE);
1460 		reg |= cmd_bits;
1461 		umac_writel(priv, reg, UMAC_CMD);
1462 	}
1463 out:
1464 	if (changed)
1465 		phy_print_status(phydev);
1466 }
1467 
1468 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1469 				 void (*cb)(struct work_struct *work))
1470 {
1471 	struct bcm_sysport_net_dim *dim = &priv->dim;
1472 
1473 	INIT_WORK(&dim->dim.work, cb);
1474 	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1475 	dim->event_ctr = 0;
1476 	dim->packets = 0;
1477 	dim->bytes = 0;
1478 }
1479 
1480 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1481 {
1482 	struct bcm_sysport_net_dim *dim = &priv->dim;
1483 	struct dim_cq_moder moder;
1484 	u32 usecs, pkts;
1485 
1486 	usecs = priv->rx_coalesce_usecs;
1487 	pkts = priv->rx_max_coalesced_frames;
1488 
1489 	/* If DIM was enabled, re-apply default parameters */
1490 	if (dim->use_dim) {
1491 		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1492 		usecs = moder.usec;
1493 		pkts = moder.pkts;
1494 	}
1495 
1496 	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1497 }
1498 
1499 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1500 				    unsigned int index)
1501 {
1502 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1503 	size_t size;
1504 	u32 reg;
1505 
1506 	/* Simple descriptors partitioning for now */
1507 	size = 256;
1508 
1509 	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1510 	if (!ring->cbs) {
1511 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1512 		return -ENOMEM;
1513 	}
1514 
1515 	/* Initialize SW view of the ring */
1516 	spin_lock_init(&ring->lock);
1517 	ring->priv = priv;
1518 	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1519 	ring->index = index;
1520 	ring->size = size;
1521 	ring->clean_index = 0;
1522 	ring->alloc_size = ring->size;
1523 	ring->desc_count = ring->size;
1524 	ring->curr_desc = 0;
1525 
1526 	/* Initialize HW ring */
1527 	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1528 	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1529 	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1530 	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1531 
1532 	/* Configure QID and port mapping */
1533 	reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1534 	reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1535 	if (ring->inspect) {
1536 		reg |= ring->switch_queue & RING_QID_MASK;
1537 		reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1538 	} else {
1539 		reg |= RING_IGNORE_STATUS;
1540 	}
1541 	tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1542 	reg = 0;
1543 	/* Adjust the packet size calculations if SYSTEMPORT is responsible
1544 	 * for HW insertion of VLAN tags
1545 	 */
1546 	if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1547 		reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1548 	tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1549 
1550 	/* Enable ACB algorithm 2 */
1551 	reg = tdma_readl(priv, TDMA_CONTROL);
1552 	reg |= tdma_control_bit(priv, ACB_ALGO);
1553 	tdma_writel(priv, reg, TDMA_CONTROL);
1554 
1555 	/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1556 	 * with the original definition of ACB_ALGO
1557 	 */
1558 	reg = tdma_readl(priv, TDMA_CONTROL);
1559 	if (priv->is_lite)
1560 		reg &= ~BIT(TSB_SWAP1);
1561 	/* Set a correct TSB format based on host endian */
1562 	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1563 		reg |= tdma_control_bit(priv, TSB_SWAP0);
1564 	else
1565 		reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1566 	tdma_writel(priv, reg, TDMA_CONTROL);
1567 
1568 	/* Program the number of descriptors as MAX_THRESHOLD and half of
1569 	 * its size for the hysteresis trigger
1570 	 */
1571 	tdma_writel(priv, ring->size |
1572 			1 << RING_HYST_THRESH_SHIFT,
1573 			TDMA_DESC_RING_MAX_HYST(index));
1574 
1575 	/* Enable the ring queue in the arbiter */
1576 	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1577 	reg |= (1 << index);
1578 	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1579 
1580 	napi_enable(&ring->napi);
1581 
1582 	netif_dbg(priv, hw, priv->netdev,
1583 		  "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1584 		  ring->size, ring->switch_queue,
1585 		  ring->switch_port);
1586 
1587 	return 0;
1588 }
1589 
1590 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1591 				     unsigned int index)
1592 {
1593 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1594 	u32 reg;
1595 
1596 	/* Caller should stop the TDMA engine */
1597 	reg = tdma_readl(priv, TDMA_STATUS);
1598 	if (!(reg & TDMA_DISABLED))
1599 		netdev_warn(priv->netdev, "TDMA not stopped!\n");
1600 
1601 	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1602 	 * fail, so by checking this pointer we know whether the TX ring was
1603 	 * fully initialized or not.
1604 	 */
1605 	if (!ring->cbs)
1606 		return;
1607 
1608 	napi_disable(&ring->napi);
1609 	netif_napi_del(&ring->napi);
1610 
1611 	bcm_sysport_tx_clean(priv, ring);
1612 
1613 	kfree(ring->cbs);
1614 	ring->cbs = NULL;
1615 	ring->size = 0;
1616 	ring->alloc_size = 0;
1617 
1618 	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1619 }
1620 
1621 /* RDMA helper */
1622 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1623 				  unsigned int enable)
1624 {
1625 	unsigned int timeout = 1000;
1626 	u32 reg;
1627 
1628 	reg = rdma_readl(priv, RDMA_CONTROL);
1629 	if (enable)
1630 		reg |= RDMA_EN;
1631 	else
1632 		reg &= ~RDMA_EN;
1633 	rdma_writel(priv, reg, RDMA_CONTROL);
1634 
1635 	/* Poll for RMDA disabling completion */
1636 	do {
1637 		reg = rdma_readl(priv, RDMA_STATUS);
1638 		if (!!(reg & RDMA_DISABLED) == !enable)
1639 			return 0;
1640 		usleep_range(1000, 2000);
1641 	} while (timeout-- > 0);
1642 
1643 	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1644 
1645 	return -ETIMEDOUT;
1646 }
1647 
1648 /* TDMA helper */
1649 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1650 				  unsigned int enable)
1651 {
1652 	unsigned int timeout = 1000;
1653 	u32 reg;
1654 
1655 	reg = tdma_readl(priv, TDMA_CONTROL);
1656 	if (enable)
1657 		reg |= tdma_control_bit(priv, TDMA_EN);
1658 	else
1659 		reg &= ~tdma_control_bit(priv, TDMA_EN);
1660 	tdma_writel(priv, reg, TDMA_CONTROL);
1661 
1662 	/* Poll for TMDA disabling completion */
1663 	do {
1664 		reg = tdma_readl(priv, TDMA_STATUS);
1665 		if (!!(reg & TDMA_DISABLED) == !enable)
1666 			return 0;
1667 
1668 		usleep_range(1000, 2000);
1669 	} while (timeout-- > 0);
1670 
1671 	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1672 
1673 	return -ETIMEDOUT;
1674 }
1675 
1676 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1677 {
1678 	struct bcm_sysport_cb *cb;
1679 	u32 reg;
1680 	int ret;
1681 	int i;
1682 
1683 	/* Initialize SW view of the RX ring */
1684 	priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1685 	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1686 	priv->rx_c_index = 0;
1687 	priv->rx_read_ptr = 0;
1688 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1689 				GFP_KERNEL);
1690 	if (!priv->rx_cbs) {
1691 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1692 		return -ENOMEM;
1693 	}
1694 
1695 	for (i = 0; i < priv->num_rx_bds; i++) {
1696 		cb = priv->rx_cbs + i;
1697 		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1698 	}
1699 
1700 	ret = bcm_sysport_alloc_rx_bufs(priv);
1701 	if (ret) {
1702 		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1703 		return ret;
1704 	}
1705 
1706 	/* Initialize HW, ensure RDMA is disabled */
1707 	reg = rdma_readl(priv, RDMA_STATUS);
1708 	if (!(reg & RDMA_DISABLED))
1709 		rdma_enable_set(priv, 0);
1710 
1711 	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1712 	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1713 	rdma_writel(priv, 0, RDMA_PROD_INDEX);
1714 	rdma_writel(priv, 0, RDMA_CONS_INDEX);
1715 	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1716 			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1717 	/* Operate the queue in ring mode */
1718 	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1719 	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1720 	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1721 	rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1722 
1723 	netif_dbg(priv, hw, priv->netdev,
1724 		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1725 		  priv->num_rx_bds, priv->rx_bds);
1726 
1727 	return 0;
1728 }
1729 
1730 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1731 {
1732 	struct bcm_sysport_cb *cb;
1733 	unsigned int i;
1734 	u32 reg;
1735 
1736 	/* Caller should ensure RDMA is disabled */
1737 	reg = rdma_readl(priv, RDMA_STATUS);
1738 	if (!(reg & RDMA_DISABLED))
1739 		netdev_warn(priv->netdev, "RDMA not stopped!\n");
1740 
1741 	for (i = 0; i < priv->num_rx_bds; i++) {
1742 		cb = &priv->rx_cbs[i];
1743 		if (dma_unmap_addr(cb, dma_addr))
1744 			dma_unmap_single(&priv->pdev->dev,
1745 					 dma_unmap_addr(cb, dma_addr),
1746 					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1747 		bcm_sysport_free_cb(cb);
1748 	}
1749 
1750 	kfree(priv->rx_cbs);
1751 	priv->rx_cbs = NULL;
1752 
1753 	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1754 }
1755 
1756 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1757 {
1758 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1759 	u32 reg;
1760 
1761 	if (priv->is_lite)
1762 		return;
1763 
1764 	reg = umac_readl(priv, UMAC_CMD);
1765 	if (dev->flags & IFF_PROMISC)
1766 		reg |= CMD_PROMISC;
1767 	else
1768 		reg &= ~CMD_PROMISC;
1769 	umac_writel(priv, reg, UMAC_CMD);
1770 
1771 	/* No support for ALLMULTI */
1772 	if (dev->flags & IFF_ALLMULTI)
1773 		return;
1774 }
1775 
1776 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1777 				   u32 mask, unsigned int enable)
1778 {
1779 	u32 reg;
1780 
1781 	if (!priv->is_lite) {
1782 		reg = umac_readl(priv, UMAC_CMD);
1783 		if (enable)
1784 			reg |= mask;
1785 		else
1786 			reg &= ~mask;
1787 		umac_writel(priv, reg, UMAC_CMD);
1788 	} else {
1789 		reg = gib_readl(priv, GIB_CONTROL);
1790 		if (enable)
1791 			reg |= mask;
1792 		else
1793 			reg &= ~mask;
1794 		gib_writel(priv, reg, GIB_CONTROL);
1795 	}
1796 
1797 	/* UniMAC stops on a packet boundary, wait for a full-sized packet
1798 	 * to be processed (1 msec).
1799 	 */
1800 	if (enable == 0)
1801 		usleep_range(1000, 2000);
1802 }
1803 
1804 static inline void umac_reset(struct bcm_sysport_priv *priv)
1805 {
1806 	u32 reg;
1807 
1808 	if (priv->is_lite)
1809 		return;
1810 
1811 	reg = umac_readl(priv, UMAC_CMD);
1812 	reg |= CMD_SW_RESET;
1813 	umac_writel(priv, reg, UMAC_CMD);
1814 	udelay(10);
1815 	reg = umac_readl(priv, UMAC_CMD);
1816 	reg &= ~CMD_SW_RESET;
1817 	umac_writel(priv, reg, UMAC_CMD);
1818 }
1819 
1820 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1821 			     const unsigned char *addr)
1822 {
1823 	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1824 		    addr[3];
1825 	u32 mac1 = (addr[4] << 8) | addr[5];
1826 
1827 	if (!priv->is_lite) {
1828 		umac_writel(priv, mac0, UMAC_MAC0);
1829 		umac_writel(priv, mac1, UMAC_MAC1);
1830 	} else {
1831 		gib_writel(priv, mac0, GIB_MAC0);
1832 		gib_writel(priv, mac1, GIB_MAC1);
1833 	}
1834 }
1835 
1836 static void topctrl_flush(struct bcm_sysport_priv *priv)
1837 {
1838 	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1839 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1840 	mdelay(1);
1841 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1842 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1843 }
1844 
1845 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1846 {
1847 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1848 	struct sockaddr *addr = p;
1849 
1850 	if (!is_valid_ether_addr(addr->sa_data))
1851 		return -EINVAL;
1852 
1853 	eth_hw_addr_set(dev, addr->sa_data);
1854 
1855 	/* interface is disabled, changes to MAC will be reflected on next
1856 	 * open call
1857 	 */
1858 	if (!netif_running(dev))
1859 		return 0;
1860 
1861 	umac_set_hw_addr(priv, dev->dev_addr);
1862 
1863 	return 0;
1864 }
1865 
1866 static void bcm_sysport_get_stats64(struct net_device *dev,
1867 				    struct rtnl_link_stats64 *stats)
1868 {
1869 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1870 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1871 	unsigned int start;
1872 
1873 	netdev_stats_to_stats64(stats, &dev->stats);
1874 
1875 	bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1876 				    &stats->tx_packets);
1877 
1878 	do {
1879 		start = u64_stats_fetch_begin_irq(&priv->syncp);
1880 		stats->rx_packets = stats64->rx_packets;
1881 		stats->rx_bytes = stats64->rx_bytes;
1882 	} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1883 }
1884 
1885 static void bcm_sysport_netif_start(struct net_device *dev)
1886 {
1887 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1888 
1889 	/* Enable NAPI */
1890 	bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1891 	bcm_sysport_init_rx_coalesce(priv);
1892 	napi_enable(&priv->napi);
1893 
1894 	/* Enable RX interrupt and TX ring full interrupt */
1895 	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1896 
1897 	phy_start(dev->phydev);
1898 
1899 	/* Enable TX interrupts for the TXQs */
1900 	if (!priv->is_lite)
1901 		intrl2_1_mask_clear(priv, 0xffffffff);
1902 	else
1903 		intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1904 }
1905 
1906 static void rbuf_init(struct bcm_sysport_priv *priv)
1907 {
1908 	u32 reg;
1909 
1910 	reg = rbuf_readl(priv, RBUF_CONTROL);
1911 	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1912 	/* Set a correct RSB format on SYSTEMPORT Lite */
1913 	if (priv->is_lite)
1914 		reg &= ~RBUF_RSB_SWAP1;
1915 
1916 	/* Set a correct RSB format based on host endian */
1917 	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1918 		reg |= RBUF_RSB_SWAP0;
1919 	else
1920 		reg &= ~RBUF_RSB_SWAP0;
1921 	rbuf_writel(priv, reg, RBUF_CONTROL);
1922 }
1923 
1924 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1925 {
1926 	intrl2_0_mask_set(priv, 0xffffffff);
1927 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1928 	if (!priv->is_lite) {
1929 		intrl2_1_mask_set(priv, 0xffffffff);
1930 		intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1931 	}
1932 }
1933 
1934 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1935 {
1936 	u32 reg;
1937 
1938 	reg = gib_readl(priv, GIB_CONTROL);
1939 	/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1940 	if (netdev_uses_dsa(priv->netdev)) {
1941 		reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1942 		reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1943 	}
1944 	reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1945 	reg |= 12 << GIB_IPG_LEN_SHIFT;
1946 	gib_writel(priv, reg, GIB_CONTROL);
1947 }
1948 
1949 static int bcm_sysport_open(struct net_device *dev)
1950 {
1951 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1952 	struct phy_device *phydev;
1953 	unsigned int i;
1954 	int ret;
1955 
1956 	clk_prepare_enable(priv->clk);
1957 
1958 	/* Reset UniMAC */
1959 	umac_reset(priv);
1960 
1961 	/* Flush TX and RX FIFOs at TOPCTRL level */
1962 	topctrl_flush(priv);
1963 
1964 	/* Disable the UniMAC RX/TX */
1965 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1966 
1967 	/* Enable RBUF 2bytes alignment and Receive Status Block */
1968 	rbuf_init(priv);
1969 
1970 	/* Set maximum frame length */
1971 	if (!priv->is_lite)
1972 		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1973 	else
1974 		gib_set_pad_extension(priv);
1975 
1976 	/* Apply features again in case we changed them while interface was
1977 	 * down
1978 	 */
1979 	bcm_sysport_set_features(dev, dev->features);
1980 
1981 	/* Set MAC address */
1982 	umac_set_hw_addr(priv, dev->dev_addr);
1983 
1984 	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1985 				0, priv->phy_interface);
1986 	if (!phydev) {
1987 		netdev_err(dev, "could not attach to PHY\n");
1988 		ret = -ENODEV;
1989 		goto out_clk_disable;
1990 	}
1991 
1992 	/* Reset house keeping link status */
1993 	priv->old_duplex = -1;
1994 	priv->old_link = -1;
1995 	priv->old_pause = -1;
1996 
1997 	/* mask all interrupts and request them */
1998 	bcm_sysport_mask_all_intrs(priv);
1999 
2000 	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2001 	if (ret) {
2002 		netdev_err(dev, "failed to request RX interrupt\n");
2003 		goto out_phy_disconnect;
2004 	}
2005 
2006 	if (!priv->is_lite) {
2007 		ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2008 				  dev->name, dev);
2009 		if (ret) {
2010 			netdev_err(dev, "failed to request TX interrupt\n");
2011 			goto out_free_irq0;
2012 		}
2013 	}
2014 
2015 	/* Initialize both hardware and software ring */
2016 	for (i = 0; i < dev->num_tx_queues; i++) {
2017 		ret = bcm_sysport_init_tx_ring(priv, i);
2018 		if (ret) {
2019 			netdev_err(dev, "failed to initialize TX ring %d\n",
2020 				   i);
2021 			goto out_free_tx_ring;
2022 		}
2023 	}
2024 
2025 	/* Initialize linked-list */
2026 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2027 
2028 	/* Initialize RX ring */
2029 	ret = bcm_sysport_init_rx_ring(priv);
2030 	if (ret) {
2031 		netdev_err(dev, "failed to initialize RX ring\n");
2032 		goto out_free_rx_ring;
2033 	}
2034 
2035 	/* Turn on RDMA */
2036 	ret = rdma_enable_set(priv, 1);
2037 	if (ret)
2038 		goto out_free_rx_ring;
2039 
2040 	/* Turn on TDMA */
2041 	ret = tdma_enable_set(priv, 1);
2042 	if (ret)
2043 		goto out_clear_rx_int;
2044 
2045 	/* Turn on UniMAC TX/RX */
2046 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2047 
2048 	bcm_sysport_netif_start(dev);
2049 
2050 	netif_tx_start_all_queues(dev);
2051 
2052 	return 0;
2053 
2054 out_clear_rx_int:
2055 	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2056 out_free_rx_ring:
2057 	bcm_sysport_fini_rx_ring(priv);
2058 out_free_tx_ring:
2059 	for (i = 0; i < dev->num_tx_queues; i++)
2060 		bcm_sysport_fini_tx_ring(priv, i);
2061 	if (!priv->is_lite)
2062 		free_irq(priv->irq1, dev);
2063 out_free_irq0:
2064 	free_irq(priv->irq0, dev);
2065 out_phy_disconnect:
2066 	phy_disconnect(phydev);
2067 out_clk_disable:
2068 	clk_disable_unprepare(priv->clk);
2069 	return ret;
2070 }
2071 
2072 static void bcm_sysport_netif_stop(struct net_device *dev)
2073 {
2074 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2075 
2076 	/* stop all software from updating hardware */
2077 	netif_tx_disable(dev);
2078 	napi_disable(&priv->napi);
2079 	cancel_work_sync(&priv->dim.dim.work);
2080 	phy_stop(dev->phydev);
2081 
2082 	/* mask all interrupts */
2083 	bcm_sysport_mask_all_intrs(priv);
2084 }
2085 
2086 static int bcm_sysport_stop(struct net_device *dev)
2087 {
2088 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2089 	unsigned int i;
2090 	int ret;
2091 
2092 	bcm_sysport_netif_stop(dev);
2093 
2094 	/* Disable UniMAC RX */
2095 	umac_enable_set(priv, CMD_RX_EN, 0);
2096 
2097 	ret = tdma_enable_set(priv, 0);
2098 	if (ret) {
2099 		netdev_err(dev, "timeout disabling RDMA\n");
2100 		return ret;
2101 	}
2102 
2103 	/* Wait for a maximum packet size to be drained */
2104 	usleep_range(2000, 3000);
2105 
2106 	ret = rdma_enable_set(priv, 0);
2107 	if (ret) {
2108 		netdev_err(dev, "timeout disabling TDMA\n");
2109 		return ret;
2110 	}
2111 
2112 	/* Disable UniMAC TX */
2113 	umac_enable_set(priv, CMD_TX_EN, 0);
2114 
2115 	/* Free RX/TX rings SW structures */
2116 	for (i = 0; i < dev->num_tx_queues; i++)
2117 		bcm_sysport_fini_tx_ring(priv, i);
2118 	bcm_sysport_fini_rx_ring(priv);
2119 
2120 	free_irq(priv->irq0, dev);
2121 	if (!priv->is_lite)
2122 		free_irq(priv->irq1, dev);
2123 
2124 	/* Disconnect from PHY */
2125 	phy_disconnect(dev->phydev);
2126 
2127 	clk_disable_unprepare(priv->clk);
2128 
2129 	return 0;
2130 }
2131 
2132 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2133 				 u64 location)
2134 {
2135 	unsigned int index;
2136 	u32 reg;
2137 
2138 	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2139 		reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2140 		reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2141 		reg &= RXCHK_BRCM_TAG_CID_MASK;
2142 		if (reg == location)
2143 			return index;
2144 	}
2145 
2146 	return -EINVAL;
2147 }
2148 
2149 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2150 				struct ethtool_rxnfc *nfc)
2151 {
2152 	int index;
2153 
2154 	/* This is not a rule that we know about */
2155 	index = bcm_sysport_rule_find(priv, nfc->fs.location);
2156 	if (index < 0)
2157 		return -EOPNOTSUPP;
2158 
2159 	nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2160 
2161 	return 0;
2162 }
2163 
2164 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2165 				struct ethtool_rxnfc *nfc)
2166 {
2167 	unsigned int index;
2168 	u32 reg;
2169 
2170 	/* We cannot match locations greater than what the classification ID
2171 	 * permits (256 entries)
2172 	 */
2173 	if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2174 		return -E2BIG;
2175 
2176 	/* We cannot support flows that are not destined for a wake-up */
2177 	if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2178 		return -EOPNOTSUPP;
2179 
2180 	/* All filters are already in use, we cannot match more rules */
2181 	if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
2182 	    RXCHK_BRCM_TAG_MAX)
2183 		return -ENOSPC;
2184 
2185 	index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2186 	if (index >= RXCHK_BRCM_TAG_MAX)
2187 		return -ENOSPC;
2188 
2189 	/* Location is the classification ID, and index is the position
2190 	 * within one of our 8 possible filters to be programmed
2191 	 */
2192 	reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2193 	reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2194 	reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2195 	rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2196 	rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2197 
2198 	priv->filters_loc[index] = nfc->fs.location;
2199 	set_bit(index, priv->filters);
2200 
2201 	return 0;
2202 }
2203 
2204 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2205 				u64 location)
2206 {
2207 	int index;
2208 
2209 	/* This is not a rule that we know about */
2210 	index = bcm_sysport_rule_find(priv, location);
2211 	if (index < 0)
2212 		return -EOPNOTSUPP;
2213 
2214 	/* No need to disable this filter if it was enabled, this will
2215 	 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2216 	 */
2217 	clear_bit(index, priv->filters);
2218 	priv->filters_loc[index] = 0;
2219 
2220 	return 0;
2221 }
2222 
2223 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2224 				 struct ethtool_rxnfc *nfc, u32 *rule_locs)
2225 {
2226 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2227 	int ret = -EOPNOTSUPP;
2228 
2229 	switch (nfc->cmd) {
2230 	case ETHTOOL_GRXCLSRULE:
2231 		ret = bcm_sysport_rule_get(priv, nfc);
2232 		break;
2233 	default:
2234 		break;
2235 	}
2236 
2237 	return ret;
2238 }
2239 
2240 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2241 				 struct ethtool_rxnfc *nfc)
2242 {
2243 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2244 	int ret = -EOPNOTSUPP;
2245 
2246 	switch (nfc->cmd) {
2247 	case ETHTOOL_SRXCLSRLINS:
2248 		ret = bcm_sysport_rule_set(priv, nfc);
2249 		break;
2250 	case ETHTOOL_SRXCLSRLDEL:
2251 		ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2252 		break;
2253 	default:
2254 		break;
2255 	}
2256 
2257 	return ret;
2258 }
2259 
2260 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2261 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2262 				     ETHTOOL_COALESCE_MAX_FRAMES |
2263 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2264 	.get_drvinfo		= bcm_sysport_get_drvinfo,
2265 	.get_msglevel		= bcm_sysport_get_msglvl,
2266 	.set_msglevel		= bcm_sysport_set_msglvl,
2267 	.get_link		= ethtool_op_get_link,
2268 	.get_strings		= bcm_sysport_get_strings,
2269 	.get_ethtool_stats	= bcm_sysport_get_stats,
2270 	.get_sset_count		= bcm_sysport_get_sset_count,
2271 	.get_wol		= bcm_sysport_get_wol,
2272 	.set_wol		= bcm_sysport_set_wol,
2273 	.get_coalesce		= bcm_sysport_get_coalesce,
2274 	.set_coalesce		= bcm_sysport_set_coalesce,
2275 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
2276 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
2277 	.get_rxnfc		= bcm_sysport_get_rxnfc,
2278 	.set_rxnfc		= bcm_sysport_set_rxnfc,
2279 };
2280 
2281 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2282 				    struct net_device *sb_dev)
2283 {
2284 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2285 	u16 queue = skb_get_queue_mapping(skb);
2286 	struct bcm_sysport_tx_ring *tx_ring;
2287 	unsigned int q, port;
2288 
2289 	if (!netdev_uses_dsa(dev))
2290 		return netdev_pick_tx(dev, skb, NULL);
2291 
2292 	/* DSA tagging layer will have configured the correct queue */
2293 	q = BRCM_TAG_GET_QUEUE(queue);
2294 	port = BRCM_TAG_GET_PORT(queue);
2295 	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2296 
2297 	if (unlikely(!tx_ring))
2298 		return netdev_pick_tx(dev, skb, NULL);
2299 
2300 	return tx_ring->index;
2301 }
2302 
2303 static const struct net_device_ops bcm_sysport_netdev_ops = {
2304 	.ndo_start_xmit		= bcm_sysport_xmit,
2305 	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
2306 	.ndo_open		= bcm_sysport_open,
2307 	.ndo_stop		= bcm_sysport_stop,
2308 	.ndo_set_features	= bcm_sysport_set_features,
2309 	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
2310 	.ndo_set_mac_address	= bcm_sysport_change_mac,
2311 #ifdef CONFIG_NET_POLL_CONTROLLER
2312 	.ndo_poll_controller	= bcm_sysport_poll_controller,
2313 #endif
2314 	.ndo_get_stats64	= bcm_sysport_get_stats64,
2315 	.ndo_select_queue	= bcm_sysport_select_queue,
2316 };
2317 
2318 static int bcm_sysport_map_queues(struct net_device *dev,
2319 				  struct net_device *slave_dev)
2320 {
2321 	struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2322 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2323 	struct bcm_sysport_tx_ring *ring;
2324 	unsigned int num_tx_queues;
2325 	unsigned int q, qp, port;
2326 
2327 	/* We can't be setting up queue inspection for non directly attached
2328 	 * switches
2329 	 */
2330 	if (dp->ds->index)
2331 		return 0;
2332 
2333 	port = dp->index;
2334 
2335 	/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2336 	 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2337 	 * per-port (slave_dev) network devices queue, we achieve just that.
2338 	 * This need to happen now before any slave network device is used such
2339 	 * it accurately reflects the number of real TX queues.
2340 	 */
2341 	if (priv->is_lite)
2342 		netif_set_real_num_tx_queues(slave_dev,
2343 					     slave_dev->num_tx_queues / 2);
2344 
2345 	num_tx_queues = slave_dev->real_num_tx_queues;
2346 
2347 	if (priv->per_port_num_tx_queues &&
2348 	    priv->per_port_num_tx_queues != num_tx_queues)
2349 		netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2350 
2351 	priv->per_port_num_tx_queues = num_tx_queues;
2352 
2353 	for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2354 	     q++) {
2355 		ring = &priv->tx_rings[q];
2356 
2357 		if (ring->inspect)
2358 			continue;
2359 
2360 		/* Just remember the mapping actual programming done
2361 		 * during bcm_sysport_init_tx_ring
2362 		 */
2363 		ring->switch_queue = qp;
2364 		ring->switch_port = port;
2365 		ring->inspect = true;
2366 		priv->ring_map[qp + port * num_tx_queues] = ring;
2367 		qp++;
2368 	}
2369 
2370 	return 0;
2371 }
2372 
2373 static int bcm_sysport_unmap_queues(struct net_device *dev,
2374 				    struct net_device *slave_dev)
2375 {
2376 	struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2377 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2378 	struct bcm_sysport_tx_ring *ring;
2379 	unsigned int num_tx_queues;
2380 	unsigned int q, qp, port;
2381 
2382 	port = dp->index;
2383 
2384 	num_tx_queues = slave_dev->real_num_tx_queues;
2385 
2386 	for (q = 0; q < dev->num_tx_queues; q++) {
2387 		ring = &priv->tx_rings[q];
2388 
2389 		if (ring->switch_port != port)
2390 			continue;
2391 
2392 		if (!ring->inspect)
2393 			continue;
2394 
2395 		ring->inspect = false;
2396 		qp = ring->switch_queue;
2397 		priv->ring_map[qp + port * num_tx_queues] = NULL;
2398 	}
2399 
2400 	return 0;
2401 }
2402 
2403 static int bcm_sysport_netdevice_event(struct notifier_block *nb,
2404 				       unsigned long event, void *ptr)
2405 {
2406 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2407 	struct netdev_notifier_changeupper_info *info = ptr;
2408 	struct bcm_sysport_priv *priv;
2409 	int ret = 0;
2410 
2411 	priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2412 	if (priv->netdev != dev)
2413 		return NOTIFY_DONE;
2414 
2415 	switch (event) {
2416 	case NETDEV_CHANGEUPPER:
2417 		if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2418 			return NOTIFY_DONE;
2419 
2420 		if (!dsa_slave_dev_check(info->upper_dev))
2421 			return NOTIFY_DONE;
2422 
2423 		if (info->linking)
2424 			ret = bcm_sysport_map_queues(dev, info->upper_dev);
2425 		else
2426 			ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
2427 		break;
2428 	}
2429 
2430 	return notifier_from_errno(ret);
2431 }
2432 
2433 #define REV_FMT	"v%2x.%02x"
2434 
2435 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2436 	[SYSTEMPORT] = {
2437 		.is_lite = false,
2438 		.num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2439 	},
2440 	[SYSTEMPORT_LITE] = {
2441 		.is_lite = true,
2442 		.num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2443 	},
2444 };
2445 
2446 static const struct of_device_id bcm_sysport_of_match[] = {
2447 	{ .compatible = "brcm,systemportlite-v1.00",
2448 	  .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2449 	{ .compatible = "brcm,systemport-v1.00",
2450 	  .data = &bcm_sysport_params[SYSTEMPORT] },
2451 	{ .compatible = "brcm,systemport",
2452 	  .data = &bcm_sysport_params[SYSTEMPORT] },
2453 	{ /* sentinel */ }
2454 };
2455 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2456 
2457 static int bcm_sysport_probe(struct platform_device *pdev)
2458 {
2459 	const struct bcm_sysport_hw_params *params;
2460 	const struct of_device_id *of_id = NULL;
2461 	struct bcm_sysport_priv *priv;
2462 	struct device_node *dn;
2463 	struct net_device *dev;
2464 	u32 txq, rxq;
2465 	int ret;
2466 
2467 	dn = pdev->dev.of_node;
2468 	of_id = of_match_node(bcm_sysport_of_match, dn);
2469 	if (!of_id || !of_id->data)
2470 		return -EINVAL;
2471 
2472 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2473 	if (ret)
2474 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2475 	if (ret) {
2476 		dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2477 		return ret;
2478 	}
2479 
2480 	/* Fairly quickly we need to know the type of adapter we have */
2481 	params = of_id->data;
2482 
2483 	/* Read the Transmit/Receive Queue properties */
2484 	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2485 		txq = TDMA_NUM_RINGS;
2486 	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2487 		rxq = 1;
2488 
2489 	/* Sanity check the number of transmit queues */
2490 	if (!txq || txq > TDMA_NUM_RINGS)
2491 		return -EINVAL;
2492 
2493 	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2494 	if (!dev)
2495 		return -ENOMEM;
2496 
2497 	/* Initialize private members */
2498 	priv = netdev_priv(dev);
2499 
2500 	priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2501 	if (IS_ERR(priv->clk)) {
2502 		ret = PTR_ERR(priv->clk);
2503 		goto err_free_netdev;
2504 	}
2505 
2506 	/* Allocate number of TX rings */
2507 	priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2508 				      sizeof(struct bcm_sysport_tx_ring),
2509 				      GFP_KERNEL);
2510 	if (!priv->tx_rings) {
2511 		ret = -ENOMEM;
2512 		goto err_free_netdev;
2513 	}
2514 
2515 	priv->is_lite = params->is_lite;
2516 	priv->num_rx_desc_words = params->num_rx_desc_words;
2517 
2518 	priv->irq0 = platform_get_irq(pdev, 0);
2519 	if (!priv->is_lite) {
2520 		priv->irq1 = platform_get_irq(pdev, 1);
2521 		priv->wol_irq = platform_get_irq(pdev, 2);
2522 	} else {
2523 		priv->wol_irq = platform_get_irq(pdev, 1);
2524 	}
2525 	if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2526 		ret = -EINVAL;
2527 		goto err_free_netdev;
2528 	}
2529 
2530 	priv->base = devm_platform_ioremap_resource(pdev, 0);
2531 	if (IS_ERR(priv->base)) {
2532 		ret = PTR_ERR(priv->base);
2533 		goto err_free_netdev;
2534 	}
2535 
2536 	priv->netdev = dev;
2537 	priv->pdev = pdev;
2538 
2539 	ret = of_get_phy_mode(dn, &priv->phy_interface);
2540 	/* Default to GMII interface mode */
2541 	if (ret)
2542 		priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2543 
2544 	/* In the case of a fixed PHY, the DT node associated
2545 	 * to the PHY is the Ethernet MAC DT node.
2546 	 */
2547 	if (of_phy_is_fixed_link(dn)) {
2548 		ret = of_phy_register_fixed_link(dn);
2549 		if (ret) {
2550 			dev_err(&pdev->dev, "failed to register fixed PHY\n");
2551 			goto err_free_netdev;
2552 		}
2553 
2554 		priv->phy_dn = dn;
2555 	}
2556 
2557 	/* Initialize netdevice members */
2558 	ret = of_get_ethdev_address(dn, dev);
2559 	if (ret) {
2560 		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2561 		eth_hw_addr_random(dev);
2562 	}
2563 
2564 	SET_NETDEV_DEV(dev, &pdev->dev);
2565 	dev_set_drvdata(&pdev->dev, dev);
2566 	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2567 	dev->netdev_ops = &bcm_sysport_netdev_ops;
2568 	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2569 
2570 	dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2571 			 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2572 			 NETIF_F_HW_VLAN_CTAG_TX;
2573 	dev->hw_features |= dev->features;
2574 	dev->vlan_features |= dev->features;
2575 	dev->max_mtu = UMAC_MAX_MTU_SIZE;
2576 
2577 	/* Request the WOL interrupt and advertise suspend if available */
2578 	priv->wol_irq_disabled = 1;
2579 	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2580 			       bcm_sysport_wol_isr, 0, dev->name, priv);
2581 	if (!ret)
2582 		device_set_wakeup_capable(&pdev->dev, 1);
2583 
2584 	priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2585 	if (IS_ERR(priv->wol_clk))
2586 		return PTR_ERR(priv->wol_clk);
2587 
2588 	/* Set the needed headroom once and for all */
2589 	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2590 	dev->needed_headroom += sizeof(struct bcm_tsb);
2591 
2592 	/* libphy will adjust the link state accordingly */
2593 	netif_carrier_off(dev);
2594 
2595 	priv->rx_max_coalesced_frames = 1;
2596 	u64_stats_init(&priv->syncp);
2597 
2598 	priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2599 
2600 	ret = register_netdevice_notifier(&priv->netdev_notifier);
2601 	if (ret) {
2602 		dev_err(&pdev->dev, "failed to register DSA notifier\n");
2603 		goto err_deregister_fixed_link;
2604 	}
2605 
2606 	ret = register_netdev(dev);
2607 	if (ret) {
2608 		dev_err(&pdev->dev, "failed to register net_device\n");
2609 		goto err_deregister_notifier;
2610 	}
2611 
2612 	clk_prepare_enable(priv->clk);
2613 
2614 	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2615 	dev_info(&pdev->dev,
2616 		 "Broadcom SYSTEMPORT%s " REV_FMT
2617 		 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2618 		 priv->is_lite ? " Lite" : "",
2619 		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2620 		 priv->irq0, priv->irq1, txq, rxq);
2621 
2622 	clk_disable_unprepare(priv->clk);
2623 
2624 	return 0;
2625 
2626 err_deregister_notifier:
2627 	unregister_netdevice_notifier(&priv->netdev_notifier);
2628 err_deregister_fixed_link:
2629 	if (of_phy_is_fixed_link(dn))
2630 		of_phy_deregister_fixed_link(dn);
2631 err_free_netdev:
2632 	free_netdev(dev);
2633 	return ret;
2634 }
2635 
2636 static int bcm_sysport_remove(struct platform_device *pdev)
2637 {
2638 	struct net_device *dev = dev_get_drvdata(&pdev->dev);
2639 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2640 	struct device_node *dn = pdev->dev.of_node;
2641 
2642 	/* Not much to do, ndo_close has been called
2643 	 * and we use managed allocations
2644 	 */
2645 	unregister_netdevice_notifier(&priv->netdev_notifier);
2646 	unregister_netdev(dev);
2647 	if (of_phy_is_fixed_link(dn))
2648 		of_phy_deregister_fixed_link(dn);
2649 	free_netdev(dev);
2650 	dev_set_drvdata(&pdev->dev, NULL);
2651 
2652 	return 0;
2653 }
2654 
2655 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2656 {
2657 	struct net_device *ndev = priv->netdev;
2658 	unsigned int timeout = 1000;
2659 	unsigned int index, i = 0;
2660 	u32 reg;
2661 
2662 	reg = umac_readl(priv, UMAC_MPD_CTRL);
2663 	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2664 		reg |= MPD_EN;
2665 	reg &= ~PSW_EN;
2666 	if (priv->wolopts & WAKE_MAGICSECURE) {
2667 		/* Program the SecureOn password */
2668 		umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2669 			    UMAC_PSW_MS);
2670 		umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2671 			    UMAC_PSW_LS);
2672 		reg |= PSW_EN;
2673 	}
2674 	umac_writel(priv, reg, UMAC_MPD_CTRL);
2675 
2676 	if (priv->wolopts & WAKE_FILTER) {
2677 		/* Turn on ACPI matching to steal packets from RBUF */
2678 		reg = rbuf_readl(priv, RBUF_CONTROL);
2679 		if (priv->is_lite)
2680 			reg |= RBUF_ACPI_EN_LITE;
2681 		else
2682 			reg |= RBUF_ACPI_EN;
2683 		rbuf_writel(priv, reg, RBUF_CONTROL);
2684 
2685 		/* Enable RXCHK, active filters and Broadcom tag matching */
2686 		reg = rxchk_readl(priv, RXCHK_CONTROL);
2687 		reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2688 			 RXCHK_BRCM_TAG_MATCH_SHIFT);
2689 		for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2690 			reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2691 			i++;
2692 		}
2693 		reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2694 		rxchk_writel(priv, reg, RXCHK_CONTROL);
2695 	}
2696 
2697 	/* Make sure RBUF entered WoL mode as result */
2698 	do {
2699 		reg = rbuf_readl(priv, RBUF_STATUS);
2700 		if (reg & RBUF_WOL_MODE)
2701 			break;
2702 
2703 		udelay(10);
2704 	} while (timeout-- > 0);
2705 
2706 	/* Do not leave the UniMAC RBUF matching only MPD packets */
2707 	if (!timeout) {
2708 		mpd_enable_set(priv, false);
2709 		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2710 		return -ETIMEDOUT;
2711 	}
2712 
2713 	/* UniMAC receive needs to be turned on */
2714 	umac_enable_set(priv, CMD_RX_EN, 1);
2715 
2716 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2717 
2718 	return 0;
2719 }
2720 
2721 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2722 {
2723 	struct net_device *dev = dev_get_drvdata(d);
2724 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2725 	unsigned int i;
2726 	int ret = 0;
2727 	u32 reg;
2728 
2729 	if (!netif_running(dev))
2730 		return 0;
2731 
2732 	netif_device_detach(dev);
2733 
2734 	bcm_sysport_netif_stop(dev);
2735 
2736 	phy_suspend(dev->phydev);
2737 
2738 	/* Disable UniMAC RX */
2739 	umac_enable_set(priv, CMD_RX_EN, 0);
2740 
2741 	ret = rdma_enable_set(priv, 0);
2742 	if (ret) {
2743 		netdev_err(dev, "RDMA timeout!\n");
2744 		return ret;
2745 	}
2746 
2747 	/* Disable RXCHK if enabled */
2748 	if (priv->rx_chk_en) {
2749 		reg = rxchk_readl(priv, RXCHK_CONTROL);
2750 		reg &= ~RXCHK_EN;
2751 		rxchk_writel(priv, reg, RXCHK_CONTROL);
2752 	}
2753 
2754 	/* Flush RX pipe */
2755 	if (!priv->wolopts)
2756 		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2757 
2758 	ret = tdma_enable_set(priv, 0);
2759 	if (ret) {
2760 		netdev_err(dev, "TDMA timeout!\n");
2761 		return ret;
2762 	}
2763 
2764 	/* Wait for a packet boundary */
2765 	usleep_range(2000, 3000);
2766 
2767 	umac_enable_set(priv, CMD_TX_EN, 0);
2768 
2769 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2770 
2771 	/* Free RX/TX rings SW structures */
2772 	for (i = 0; i < dev->num_tx_queues; i++)
2773 		bcm_sysport_fini_tx_ring(priv, i);
2774 	bcm_sysport_fini_rx_ring(priv);
2775 
2776 	/* Get prepared for Wake-on-LAN */
2777 	if (device_may_wakeup(d) && priv->wolopts) {
2778 		clk_prepare_enable(priv->wol_clk);
2779 		ret = bcm_sysport_suspend_to_wol(priv);
2780 	}
2781 
2782 	clk_disable_unprepare(priv->clk);
2783 
2784 	return ret;
2785 }
2786 
2787 static int __maybe_unused bcm_sysport_resume(struct device *d)
2788 {
2789 	struct net_device *dev = dev_get_drvdata(d);
2790 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2791 	unsigned int i;
2792 	int ret;
2793 
2794 	if (!netif_running(dev))
2795 		return 0;
2796 
2797 	clk_prepare_enable(priv->clk);
2798 	if (priv->wolopts)
2799 		clk_disable_unprepare(priv->wol_clk);
2800 
2801 	umac_reset(priv);
2802 
2803 	/* Disable the UniMAC RX/TX */
2804 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2805 
2806 	/* We may have been suspended and never received a WOL event that
2807 	 * would turn off MPD detection, take care of that now
2808 	 */
2809 	bcm_sysport_resume_from_wol(priv);
2810 
2811 	/* Initialize both hardware and software ring */
2812 	for (i = 0; i < dev->num_tx_queues; i++) {
2813 		ret = bcm_sysport_init_tx_ring(priv, i);
2814 		if (ret) {
2815 			netdev_err(dev, "failed to initialize TX ring %d\n",
2816 				   i);
2817 			goto out_free_tx_rings;
2818 		}
2819 	}
2820 
2821 	/* Initialize linked-list */
2822 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2823 
2824 	/* Initialize RX ring */
2825 	ret = bcm_sysport_init_rx_ring(priv);
2826 	if (ret) {
2827 		netdev_err(dev, "failed to initialize RX ring\n");
2828 		goto out_free_rx_ring;
2829 	}
2830 
2831 	/* RX pipe enable */
2832 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2833 
2834 	ret = rdma_enable_set(priv, 1);
2835 	if (ret) {
2836 		netdev_err(dev, "failed to enable RDMA\n");
2837 		goto out_free_rx_ring;
2838 	}
2839 
2840 	/* Restore enabled features */
2841 	bcm_sysport_set_features(dev, dev->features);
2842 
2843 	rbuf_init(priv);
2844 
2845 	/* Set maximum frame length */
2846 	if (!priv->is_lite)
2847 		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2848 	else
2849 		gib_set_pad_extension(priv);
2850 
2851 	/* Set MAC address */
2852 	umac_set_hw_addr(priv, dev->dev_addr);
2853 
2854 	umac_enable_set(priv, CMD_RX_EN, 1);
2855 
2856 	/* TX pipe enable */
2857 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2858 
2859 	umac_enable_set(priv, CMD_TX_EN, 1);
2860 
2861 	ret = tdma_enable_set(priv, 1);
2862 	if (ret) {
2863 		netdev_err(dev, "TDMA timeout!\n");
2864 		goto out_free_rx_ring;
2865 	}
2866 
2867 	phy_resume(dev->phydev);
2868 
2869 	bcm_sysport_netif_start(dev);
2870 
2871 	netif_device_attach(dev);
2872 
2873 	return 0;
2874 
2875 out_free_rx_ring:
2876 	bcm_sysport_fini_rx_ring(priv);
2877 out_free_tx_rings:
2878 	for (i = 0; i < dev->num_tx_queues; i++)
2879 		bcm_sysport_fini_tx_ring(priv, i);
2880 	clk_disable_unprepare(priv->clk);
2881 	return ret;
2882 }
2883 
2884 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2885 		bcm_sysport_suspend, bcm_sysport_resume);
2886 
2887 static struct platform_driver bcm_sysport_driver = {
2888 	.probe	= bcm_sysport_probe,
2889 	.remove	= bcm_sysport_remove,
2890 	.driver =  {
2891 		.name = "brcm-systemport",
2892 		.of_match_table = bcm_sysport_of_match,
2893 		.pm = &bcm_sysport_pm_ops,
2894 	},
2895 };
2896 module_platform_driver(bcm_sysport_driver);
2897 
2898 MODULE_AUTHOR("Broadcom Corporation");
2899 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2900 MODULE_ALIAS("platform:brcm-systemport");
2901 MODULE_LICENSE("GPL");
2902