1 /*
2  * Broadcom BCM7xxx System Port Ethernet MAC driver
3  *
4  * Copyright (C) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
12 
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/of.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 
28 #include "bcmsysport.h"
29 
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
33 {									\
34 	u32 reg = __raw_readl(priv->base + offset + off);		\
35 	return reg;							\
36 }									\
37 static inline void name##_writel(struct bcm_sysport_priv *priv,		\
38 				  u32 val, u32 off)			\
39 {									\
40 	__raw_writel(val, priv->base + offset + off);			\
41 }									\
42 
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53 
54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
56   */
57 #define BCM_SYSPORT_INTR_L2(which)	\
58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
59 						u32 mask)		\
60 {									\
61 	priv->irq##which##_mask &= ~(mask);				\
62 	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
63 }									\
64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
65 						u32 mask)		\
66 {									\
67 	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
68 	priv->irq##which##_mask |= (mask);				\
69 }									\
70 
71 BCM_SYSPORT_INTR_L2(0)
72 BCM_SYSPORT_INTR_L2(1)
73 
74 /* Register accesses to GISB/RBUS registers are expensive (few hundred
75  * nanoseconds), so keep the check for 64-bits explicit here to save
76  * one register write per-packet on 32-bits platforms.
77  */
78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
79 				     void __iomem *d,
80 				     dma_addr_t addr)
81 {
82 #ifdef CONFIG_PHYS_ADDR_T_64BIT
83 	__raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 		     d + DESC_ADDR_HI_STATUS_LEN);
85 #endif
86 	__raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
87 }
88 
89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 					     struct dma_desc *desc,
91 					     unsigned int port)
92 {
93 	/* Ports are latched, so write upper address first */
94 	tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
95 	tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
96 }
97 
98 /* Ethtool operations */
99 static int bcm_sysport_set_rx_csum(struct net_device *dev,
100 				   netdev_features_t wanted)
101 {
102 	struct bcm_sysport_priv *priv = netdev_priv(dev);
103 	u32 reg;
104 
105 	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
106 	reg = rxchk_readl(priv, RXCHK_CONTROL);
107 	if (priv->rx_chk_en)
108 		reg |= RXCHK_EN;
109 	else
110 		reg &= ~RXCHK_EN;
111 
112 	/* If UniMAC forwards CRC, we need to skip over it to get
113 	 * a valid CHK bit to be set in the per-packet status word
114 	 */
115 	if (priv->rx_chk_en && priv->crc_fwd)
116 		reg |= RXCHK_SKIP_FCS;
117 	else
118 		reg &= ~RXCHK_SKIP_FCS;
119 
120 	/* If Broadcom tags are enabled (e.g: using a switch), make
121 	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
122 	 * tag after the Ethernet MAC Source Address.
123 	 */
124 	if (netdev_uses_dsa(dev))
125 		reg |= RXCHK_BRCM_TAG_EN;
126 	else
127 		reg &= ~RXCHK_BRCM_TAG_EN;
128 
129 	rxchk_writel(priv, reg, RXCHK_CONTROL);
130 
131 	return 0;
132 }
133 
134 static int bcm_sysport_set_tx_csum(struct net_device *dev,
135 				   netdev_features_t wanted)
136 {
137 	struct bcm_sysport_priv *priv = netdev_priv(dev);
138 	u32 reg;
139 
140 	/* Hardware transmit checksum requires us to enable the Transmit status
141 	 * block prepended to the packet contents
142 	 */
143 	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
144 	reg = tdma_readl(priv, TDMA_CONTROL);
145 	if (priv->tsb_en)
146 		reg |= TSB_EN;
147 	else
148 		reg &= ~TSB_EN;
149 	tdma_writel(priv, reg, TDMA_CONTROL);
150 
151 	return 0;
152 }
153 
154 static int bcm_sysport_set_features(struct net_device *dev,
155 				    netdev_features_t features)
156 {
157 	netdev_features_t changed = features ^ dev->features;
158 	netdev_features_t wanted = dev->wanted_features;
159 	int ret = 0;
160 
161 	if (changed & NETIF_F_RXCSUM)
162 		ret = bcm_sysport_set_rx_csum(dev, wanted);
163 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
164 		ret = bcm_sysport_set_tx_csum(dev, wanted);
165 
166 	return ret;
167 }
168 
169 /* Hardware counters must be kept in sync because the order/offset
170  * is important here (order in structure declaration = order in hardware)
171  */
172 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
173 	/* general stats */
174 	STAT_NETDEV(rx_packets),
175 	STAT_NETDEV(tx_packets),
176 	STAT_NETDEV(rx_bytes),
177 	STAT_NETDEV(tx_bytes),
178 	STAT_NETDEV(rx_errors),
179 	STAT_NETDEV(tx_errors),
180 	STAT_NETDEV(rx_dropped),
181 	STAT_NETDEV(tx_dropped),
182 	STAT_NETDEV(multicast),
183 	/* UniMAC RSV counters */
184 	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
185 	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
186 	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
187 	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
188 	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
189 	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
190 	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
191 	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
192 	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
193 	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
194 	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
195 	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
196 	STAT_MIB_RX("rx_multicast", mib.rx.mca),
197 	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
198 	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
199 	STAT_MIB_RX("rx_control", mib.rx.cf),
200 	STAT_MIB_RX("rx_pause", mib.rx.pf),
201 	STAT_MIB_RX("rx_unknown", mib.rx.uo),
202 	STAT_MIB_RX("rx_align", mib.rx.aln),
203 	STAT_MIB_RX("rx_outrange", mib.rx.flr),
204 	STAT_MIB_RX("rx_code", mib.rx.cde),
205 	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
206 	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
207 	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
208 	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
209 	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
210 	STAT_MIB_RX("rx_unicast", mib.rx.uc),
211 	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
212 	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
213 	/* UniMAC TSV counters */
214 	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
215 	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
216 	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
217 	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
218 	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
219 	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
220 	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
221 	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
222 	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
223 	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
224 	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
225 	STAT_MIB_TX("tx_multicast", mib.tx.mca),
226 	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
227 	STAT_MIB_TX("tx_pause", mib.tx.pf),
228 	STAT_MIB_TX("tx_control", mib.tx.cf),
229 	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
230 	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
231 	STAT_MIB_TX("tx_defer", mib.tx.drf),
232 	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
233 	STAT_MIB_TX("tx_single_col", mib.tx.scl),
234 	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
235 	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
236 	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
237 	STAT_MIB_TX("tx_frags", mib.tx.frg),
238 	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
239 	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
240 	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
241 	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
242 	STAT_MIB_TX("tx_unicast", mib.tx.uc),
243 	/* UniMAC RUNT counters */
244 	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
245 	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
246 	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
247 	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
248 	/* RXCHK misc statistics */
249 	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
250 	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
251 		   RXCHK_OTHER_DISC_CNTR),
252 	/* RBUF misc statistics */
253 	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
254 	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
255 	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
256 	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
257 	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
258 };
259 
260 #define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
261 
262 static void bcm_sysport_get_drvinfo(struct net_device *dev,
263 				    struct ethtool_drvinfo *info)
264 {
265 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
266 	strlcpy(info->version, "0.1", sizeof(info->version));
267 	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
268 }
269 
270 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
271 {
272 	struct bcm_sysport_priv *priv = netdev_priv(dev);
273 
274 	return priv->msg_enable;
275 }
276 
277 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
278 {
279 	struct bcm_sysport_priv *priv = netdev_priv(dev);
280 
281 	priv->msg_enable = enable;
282 }
283 
284 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
285 {
286 	switch (string_set) {
287 	case ETH_SS_STATS:
288 		return BCM_SYSPORT_STATS_LEN;
289 	default:
290 		return -EOPNOTSUPP;
291 	}
292 }
293 
294 static void bcm_sysport_get_strings(struct net_device *dev,
295 				    u32 stringset, u8 *data)
296 {
297 	int i;
298 
299 	switch (stringset) {
300 	case ETH_SS_STATS:
301 		for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
302 			memcpy(data + i * ETH_GSTRING_LEN,
303 			       bcm_sysport_gstrings_stats[i].stat_string,
304 			       ETH_GSTRING_LEN);
305 		}
306 		break;
307 	default:
308 		break;
309 	}
310 }
311 
312 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
313 {
314 	int i, j = 0;
315 
316 	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
317 		const struct bcm_sysport_stats *s;
318 		u8 offset = 0;
319 		u32 val = 0;
320 		char *p;
321 
322 		s = &bcm_sysport_gstrings_stats[i];
323 		switch (s->type) {
324 		case BCM_SYSPORT_STAT_NETDEV:
325 		case BCM_SYSPORT_STAT_SOFT:
326 			continue;
327 		case BCM_SYSPORT_STAT_MIB_RX:
328 		case BCM_SYSPORT_STAT_MIB_TX:
329 		case BCM_SYSPORT_STAT_RUNT:
330 			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
331 				offset = UMAC_MIB_STAT_OFFSET;
332 			val = umac_readl(priv, UMAC_MIB_START + j + offset);
333 			break;
334 		case BCM_SYSPORT_STAT_RXCHK:
335 			val = rxchk_readl(priv, s->reg_offset);
336 			if (val == ~0)
337 				rxchk_writel(priv, 0, s->reg_offset);
338 			break;
339 		case BCM_SYSPORT_STAT_RBUF:
340 			val = rbuf_readl(priv, s->reg_offset);
341 			if (val == ~0)
342 				rbuf_writel(priv, 0, s->reg_offset);
343 			break;
344 		}
345 
346 		j += s->stat_sizeof;
347 		p = (char *)priv + s->stat_offset;
348 		*(u32 *)p = val;
349 	}
350 
351 	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
352 }
353 
354 static void bcm_sysport_get_stats(struct net_device *dev,
355 				  struct ethtool_stats *stats, u64 *data)
356 {
357 	struct bcm_sysport_priv *priv = netdev_priv(dev);
358 	int i;
359 
360 	if (netif_running(dev))
361 		bcm_sysport_update_mib_counters(priv);
362 
363 	for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) {
364 		const struct bcm_sysport_stats *s;
365 		char *p;
366 
367 		s = &bcm_sysport_gstrings_stats[i];
368 		if (s->type == BCM_SYSPORT_STAT_NETDEV)
369 			p = (char *)&dev->stats;
370 		else
371 			p = (char *)priv;
372 		p += s->stat_offset;
373 		data[i] = *(unsigned long *)p;
374 	}
375 }
376 
377 static void bcm_sysport_get_wol(struct net_device *dev,
378 				struct ethtool_wolinfo *wol)
379 {
380 	struct bcm_sysport_priv *priv = netdev_priv(dev);
381 	u32 reg;
382 
383 	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
384 	wol->wolopts = priv->wolopts;
385 
386 	if (!(priv->wolopts & WAKE_MAGICSECURE))
387 		return;
388 
389 	/* Return the programmed SecureOn password */
390 	reg = umac_readl(priv, UMAC_PSW_MS);
391 	put_unaligned_be16(reg, &wol->sopass[0]);
392 	reg = umac_readl(priv, UMAC_PSW_LS);
393 	put_unaligned_be32(reg, &wol->sopass[2]);
394 }
395 
396 static int bcm_sysport_set_wol(struct net_device *dev,
397 			       struct ethtool_wolinfo *wol)
398 {
399 	struct bcm_sysport_priv *priv = netdev_priv(dev);
400 	struct device *kdev = &priv->pdev->dev;
401 	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
402 
403 	if (!device_can_wakeup(kdev))
404 		return -ENOTSUPP;
405 
406 	if (wol->wolopts & ~supported)
407 		return -EINVAL;
408 
409 	/* Program the SecureOn password */
410 	if (wol->wolopts & WAKE_MAGICSECURE) {
411 		umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
412 			    UMAC_PSW_MS);
413 		umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
414 			    UMAC_PSW_LS);
415 	}
416 
417 	/* Flag the device and relevant IRQ as wakeup capable */
418 	if (wol->wolopts) {
419 		device_set_wakeup_enable(kdev, 1);
420 		if (priv->wol_irq_disabled)
421 			enable_irq_wake(priv->wol_irq);
422 		priv->wol_irq_disabled = 0;
423 	} else {
424 		device_set_wakeup_enable(kdev, 0);
425 		/* Avoid unbalanced disable_irq_wake calls */
426 		if (!priv->wol_irq_disabled)
427 			disable_irq_wake(priv->wol_irq);
428 		priv->wol_irq_disabled = 1;
429 	}
430 
431 	priv->wolopts = wol->wolopts;
432 
433 	return 0;
434 }
435 
436 static int bcm_sysport_get_coalesce(struct net_device *dev,
437 				    struct ethtool_coalesce *ec)
438 {
439 	struct bcm_sysport_priv *priv = netdev_priv(dev);
440 	u32 reg;
441 
442 	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
443 
444 	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
445 	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
446 
447 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
448 
449 	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
450 	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
451 
452 	return 0;
453 }
454 
455 static int bcm_sysport_set_coalesce(struct net_device *dev,
456 				    struct ethtool_coalesce *ec)
457 {
458 	struct bcm_sysport_priv *priv = netdev_priv(dev);
459 	unsigned int i;
460 	u32 reg;
461 
462 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
463 	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
464 	 * to fit in the RING_TIMEOUT_MASK (16 bits).
465 	 */
466 	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
467 	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
468 	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
469 	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
470 		return -EINVAL;
471 
472 	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
473 	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
474 		return -EINVAL;
475 
476 	for (i = 0; i < dev->num_tx_queues; i++) {
477 		reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
478 		reg &= ~(RING_INTR_THRESH_MASK |
479 			 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
480 		reg |= ec->tx_max_coalesced_frames;
481 		reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
482 			 RING_TIMEOUT_SHIFT;
483 		tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
484 	}
485 
486 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
487 	reg &= ~(RDMA_INTR_THRESH_MASK |
488 		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
489 	reg |= ec->rx_max_coalesced_frames;
490 	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
491 			    RDMA_TIMEOUT_SHIFT;
492 	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
493 
494 	return 0;
495 }
496 
497 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
498 {
499 	dev_kfree_skb_any(cb->skb);
500 	cb->skb = NULL;
501 	dma_unmap_addr_set(cb, dma_addr, 0);
502 }
503 
504 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
505 					     struct bcm_sysport_cb *cb)
506 {
507 	struct device *kdev = &priv->pdev->dev;
508 	struct net_device *ndev = priv->netdev;
509 	struct sk_buff *skb, *rx_skb;
510 	dma_addr_t mapping;
511 
512 	/* Allocate a new SKB for a new packet */
513 	skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
514 	if (!skb) {
515 		priv->mib.alloc_rx_buff_failed++;
516 		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
517 		return NULL;
518 	}
519 
520 	mapping = dma_map_single(kdev, skb->data,
521 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
522 	if (dma_mapping_error(kdev, mapping)) {
523 		priv->mib.rx_dma_failed++;
524 		dev_kfree_skb_any(skb);
525 		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
526 		return NULL;
527 	}
528 
529 	/* Grab the current SKB on the ring */
530 	rx_skb = cb->skb;
531 	if (likely(rx_skb))
532 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
533 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
534 
535 	/* Put the new SKB on the ring */
536 	cb->skb = skb;
537 	dma_unmap_addr_set(cb, dma_addr, mapping);
538 	dma_desc_set_addr(priv, cb->bd_addr, mapping);
539 
540 	netif_dbg(priv, rx_status, ndev, "RX refill\n");
541 
542 	/* Return the current SKB to the caller */
543 	return rx_skb;
544 }
545 
546 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
547 {
548 	struct bcm_sysport_cb *cb;
549 	struct sk_buff *skb;
550 	unsigned int i;
551 
552 	for (i = 0; i < priv->num_rx_bds; i++) {
553 		cb = &priv->rx_cbs[i];
554 		skb = bcm_sysport_rx_refill(priv, cb);
555 		if (skb)
556 			dev_kfree_skb(skb);
557 		if (!cb->skb)
558 			return -ENOMEM;
559 	}
560 
561 	return 0;
562 }
563 
564 /* Poll the hardware for up to budget packets to process */
565 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
566 					unsigned int budget)
567 {
568 	struct net_device *ndev = priv->netdev;
569 	unsigned int processed = 0, to_process;
570 	struct bcm_sysport_cb *cb;
571 	struct sk_buff *skb;
572 	unsigned int p_index;
573 	u16 len, status;
574 	struct bcm_rsb *rsb;
575 
576 	/* Determine how much we should process since last call */
577 	p_index = rdma_readl(priv, RDMA_PROD_INDEX);
578 	p_index &= RDMA_PROD_INDEX_MASK;
579 
580 	if (p_index < priv->rx_c_index)
581 		to_process = (RDMA_CONS_INDEX_MASK + 1) -
582 			priv->rx_c_index + p_index;
583 	else
584 		to_process = p_index - priv->rx_c_index;
585 
586 	netif_dbg(priv, rx_status, ndev,
587 		  "p_index=%d rx_c_index=%d to_process=%d\n",
588 		  p_index, priv->rx_c_index, to_process);
589 
590 	while ((processed < to_process) && (processed < budget)) {
591 		cb = &priv->rx_cbs[priv->rx_read_ptr];
592 		skb = bcm_sysport_rx_refill(priv, cb);
593 
594 
595 		/* We do not have a backing SKB, so we do not a corresponding
596 		 * DMA mapping for this incoming packet since
597 		 * bcm_sysport_rx_refill always either has both skb and mapping
598 		 * or none.
599 		 */
600 		if (unlikely(!skb)) {
601 			netif_err(priv, rx_err, ndev, "out of memory!\n");
602 			ndev->stats.rx_dropped++;
603 			ndev->stats.rx_errors++;
604 			goto next;
605 		}
606 
607 		/* Extract the Receive Status Block prepended */
608 		rsb = (struct bcm_rsb *)skb->data;
609 		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
610 		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
611 			  DESC_STATUS_MASK;
612 
613 		netif_dbg(priv, rx_status, ndev,
614 			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
615 			  p_index, priv->rx_c_index, priv->rx_read_ptr,
616 			  len, status);
617 
618 		if (unlikely(len > RX_BUF_LENGTH)) {
619 			netif_err(priv, rx_status, ndev, "oversized packet\n");
620 			ndev->stats.rx_length_errors++;
621 			ndev->stats.rx_errors++;
622 			dev_kfree_skb_any(skb);
623 			goto next;
624 		}
625 
626 		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
627 			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
628 			ndev->stats.rx_dropped++;
629 			ndev->stats.rx_errors++;
630 			dev_kfree_skb_any(skb);
631 			goto next;
632 		}
633 
634 		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
635 			netif_err(priv, rx_err, ndev, "error packet\n");
636 			if (status & RX_STATUS_OVFLOW)
637 				ndev->stats.rx_over_errors++;
638 			ndev->stats.rx_dropped++;
639 			ndev->stats.rx_errors++;
640 			dev_kfree_skb_any(skb);
641 			goto next;
642 		}
643 
644 		skb_put(skb, len);
645 
646 		/* Hardware validated our checksum */
647 		if (likely(status & DESC_L4_CSUM))
648 			skb->ip_summed = CHECKSUM_UNNECESSARY;
649 
650 		/* Hardware pre-pends packets with 2bytes before Ethernet
651 		 * header plus we have the Receive Status Block, strip off all
652 		 * of this from the SKB.
653 		 */
654 		skb_pull(skb, sizeof(*rsb) + 2);
655 		len -= (sizeof(*rsb) + 2);
656 
657 		/* UniMAC may forward CRC */
658 		if (priv->crc_fwd) {
659 			skb_trim(skb, len - ETH_FCS_LEN);
660 			len -= ETH_FCS_LEN;
661 		}
662 
663 		skb->protocol = eth_type_trans(skb, ndev);
664 		ndev->stats.rx_packets++;
665 		ndev->stats.rx_bytes += len;
666 
667 		napi_gro_receive(&priv->napi, skb);
668 next:
669 		processed++;
670 		priv->rx_read_ptr++;
671 
672 		if (priv->rx_read_ptr == priv->num_rx_bds)
673 			priv->rx_read_ptr = 0;
674 	}
675 
676 	return processed;
677 }
678 
679 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
680 				       struct bcm_sysport_cb *cb,
681 				       unsigned int *bytes_compl,
682 				       unsigned int *pkts_compl)
683 {
684 	struct device *kdev = &priv->pdev->dev;
685 	struct net_device *ndev = priv->netdev;
686 
687 	if (cb->skb) {
688 		ndev->stats.tx_bytes += cb->skb->len;
689 		*bytes_compl += cb->skb->len;
690 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
691 				 dma_unmap_len(cb, dma_len),
692 				 DMA_TO_DEVICE);
693 		ndev->stats.tx_packets++;
694 		(*pkts_compl)++;
695 		bcm_sysport_free_cb(cb);
696 	/* SKB fragment */
697 	} else if (dma_unmap_addr(cb, dma_addr)) {
698 		ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
699 		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
700 			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
701 		dma_unmap_addr_set(cb, dma_addr, 0);
702 	}
703 }
704 
705 /* Reclaim queued SKBs for transmission completion, lockless version */
706 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
707 					     struct bcm_sysport_tx_ring *ring)
708 {
709 	struct net_device *ndev = priv->netdev;
710 	unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
711 	unsigned int pkts_compl = 0, bytes_compl = 0;
712 	struct bcm_sysport_cb *cb;
713 	struct netdev_queue *txq;
714 	u32 hw_ind;
715 
716 	txq = netdev_get_tx_queue(ndev, ring->index);
717 
718 	/* Compute how many descriptors have been processed since last call */
719 	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
720 	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
721 	ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
722 
723 	last_c_index = ring->c_index;
724 	num_tx_cbs = ring->size;
725 
726 	c_index &= (num_tx_cbs - 1);
727 
728 	if (c_index >= last_c_index)
729 		last_tx_cn = c_index - last_c_index;
730 	else
731 		last_tx_cn = num_tx_cbs - last_c_index + c_index;
732 
733 	netif_dbg(priv, tx_done, ndev,
734 		  "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
735 		  ring->index, c_index, last_tx_cn, last_c_index);
736 
737 	while (last_tx_cn-- > 0) {
738 		cb = ring->cbs + last_c_index;
739 		bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
740 
741 		ring->desc_count++;
742 		last_c_index++;
743 		last_c_index &= (num_tx_cbs - 1);
744 	}
745 
746 	ring->c_index = c_index;
747 
748 	if (netif_tx_queue_stopped(txq) && pkts_compl)
749 		netif_tx_wake_queue(txq);
750 
751 	netif_dbg(priv, tx_done, ndev,
752 		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
753 		  ring->index, ring->c_index, pkts_compl, bytes_compl);
754 
755 	return pkts_compl;
756 }
757 
758 /* Locked version of the per-ring TX reclaim routine */
759 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
760 					   struct bcm_sysport_tx_ring *ring)
761 {
762 	unsigned int released;
763 	unsigned long flags;
764 
765 	spin_lock_irqsave(&ring->lock, flags);
766 	released = __bcm_sysport_tx_reclaim(priv, ring);
767 	spin_unlock_irqrestore(&ring->lock, flags);
768 
769 	return released;
770 }
771 
772 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
773 {
774 	struct bcm_sysport_tx_ring *ring =
775 		container_of(napi, struct bcm_sysport_tx_ring, napi);
776 	unsigned int work_done = 0;
777 
778 	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
779 
780 	if (work_done == 0) {
781 		napi_complete(napi);
782 		/* re-enable TX interrupt */
783 		intrl2_1_mask_clear(ring->priv, BIT(ring->index));
784 
785 		return 0;
786 	}
787 
788 	return budget;
789 }
790 
791 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
792 {
793 	unsigned int q;
794 
795 	for (q = 0; q < priv->netdev->num_tx_queues; q++)
796 		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
797 }
798 
799 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
800 {
801 	struct bcm_sysport_priv *priv =
802 		container_of(napi, struct bcm_sysport_priv, napi);
803 	unsigned int work_done = 0;
804 
805 	work_done = bcm_sysport_desc_rx(priv, budget);
806 
807 	priv->rx_c_index += work_done;
808 	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
809 	rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
810 
811 	if (work_done < budget) {
812 		napi_complete_done(napi, work_done);
813 		/* re-enable RX interrupts */
814 		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
815 	}
816 
817 	return work_done;
818 }
819 
820 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
821 {
822 	u32 reg;
823 
824 	/* Stop monitoring MPD interrupt */
825 	intrl2_0_mask_set(priv, INTRL2_0_MPD);
826 
827 	/* Clear the MagicPacket detection logic */
828 	reg = umac_readl(priv, UMAC_MPD_CTRL);
829 	reg &= ~MPD_EN;
830 	umac_writel(priv, reg, UMAC_MPD_CTRL);
831 
832 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
833 }
834 
835 /* RX and misc interrupt routine */
836 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
837 {
838 	struct net_device *dev = dev_id;
839 	struct bcm_sysport_priv *priv = netdev_priv(dev);
840 
841 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
842 			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
843 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
844 
845 	if (unlikely(priv->irq0_stat == 0)) {
846 		netdev_warn(priv->netdev, "spurious RX interrupt\n");
847 		return IRQ_NONE;
848 	}
849 
850 	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
851 		if (likely(napi_schedule_prep(&priv->napi))) {
852 			/* disable RX interrupts */
853 			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
854 			__napi_schedule_irqoff(&priv->napi);
855 		}
856 	}
857 
858 	/* TX ring is full, perform a full reclaim since we do not know
859 	 * which one would trigger this interrupt
860 	 */
861 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
862 		bcm_sysport_tx_reclaim_all(priv);
863 
864 	if (priv->irq0_stat & INTRL2_0_MPD) {
865 		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
866 		bcm_sysport_resume_from_wol(priv);
867 	}
868 
869 	return IRQ_HANDLED;
870 }
871 
872 /* TX interrupt service routine */
873 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
874 {
875 	struct net_device *dev = dev_id;
876 	struct bcm_sysport_priv *priv = netdev_priv(dev);
877 	struct bcm_sysport_tx_ring *txr;
878 	unsigned int ring;
879 
880 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
881 				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
882 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
883 
884 	if (unlikely(priv->irq1_stat == 0)) {
885 		netdev_warn(priv->netdev, "spurious TX interrupt\n");
886 		return IRQ_NONE;
887 	}
888 
889 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
890 		if (!(priv->irq1_stat & BIT(ring)))
891 			continue;
892 
893 		txr = &priv->tx_rings[ring];
894 
895 		if (likely(napi_schedule_prep(&txr->napi))) {
896 			intrl2_1_mask_set(priv, BIT(ring));
897 			__napi_schedule_irqoff(&txr->napi);
898 		}
899 	}
900 
901 	return IRQ_HANDLED;
902 }
903 
904 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
905 {
906 	struct bcm_sysport_priv *priv = dev_id;
907 
908 	pm_wakeup_event(&priv->pdev->dev, 0);
909 
910 	return IRQ_HANDLED;
911 }
912 
913 #ifdef CONFIG_NET_POLL_CONTROLLER
914 static void bcm_sysport_poll_controller(struct net_device *dev)
915 {
916 	struct bcm_sysport_priv *priv = netdev_priv(dev);
917 
918 	disable_irq(priv->irq0);
919 	bcm_sysport_rx_isr(priv->irq0, priv);
920 	enable_irq(priv->irq0);
921 
922 	disable_irq(priv->irq1);
923 	bcm_sysport_tx_isr(priv->irq1, priv);
924 	enable_irq(priv->irq1);
925 }
926 #endif
927 
928 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
929 					      struct net_device *dev)
930 {
931 	struct sk_buff *nskb;
932 	struct bcm_tsb *tsb;
933 	u32 csum_info;
934 	u8 ip_proto;
935 	u16 csum_start;
936 	u16 ip_ver;
937 
938 	/* Re-allocate SKB if needed */
939 	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
940 		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
941 		dev_kfree_skb(skb);
942 		if (!nskb) {
943 			dev->stats.tx_errors++;
944 			dev->stats.tx_dropped++;
945 			return NULL;
946 		}
947 		skb = nskb;
948 	}
949 
950 	tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
951 	/* Zero-out TSB by default */
952 	memset(tsb, 0, sizeof(*tsb));
953 
954 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
955 		ip_ver = htons(skb->protocol);
956 		switch (ip_ver) {
957 		case ETH_P_IP:
958 			ip_proto = ip_hdr(skb)->protocol;
959 			break;
960 		case ETH_P_IPV6:
961 			ip_proto = ipv6_hdr(skb)->nexthdr;
962 			break;
963 		default:
964 			return skb;
965 		}
966 
967 		/* Get the checksum offset and the L4 (transport) offset */
968 		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
969 		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
970 		csum_info |= (csum_start << L4_PTR_SHIFT);
971 
972 		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
973 			csum_info |= L4_LENGTH_VALID;
974 			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
975 				csum_info |= L4_UDP;
976 		} else {
977 			csum_info = 0;
978 		}
979 
980 		tsb->l4_ptr_dest_map = csum_info;
981 	}
982 
983 	return skb;
984 }
985 
986 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
987 				    struct net_device *dev)
988 {
989 	struct bcm_sysport_priv *priv = netdev_priv(dev);
990 	struct device *kdev = &priv->pdev->dev;
991 	struct bcm_sysport_tx_ring *ring;
992 	struct bcm_sysport_cb *cb;
993 	struct netdev_queue *txq;
994 	struct dma_desc *desc;
995 	unsigned int skb_len;
996 	unsigned long flags;
997 	dma_addr_t mapping;
998 	u32 len_status;
999 	u16 queue;
1000 	int ret;
1001 
1002 	queue = skb_get_queue_mapping(skb);
1003 	txq = netdev_get_tx_queue(dev, queue);
1004 	ring = &priv->tx_rings[queue];
1005 
1006 	/* lock against tx reclaim in BH context and TX ring full interrupt */
1007 	spin_lock_irqsave(&ring->lock, flags);
1008 	if (unlikely(ring->desc_count == 0)) {
1009 		netif_tx_stop_queue(txq);
1010 		netdev_err(dev, "queue %d awake and ring full!\n", queue);
1011 		ret = NETDEV_TX_BUSY;
1012 		goto out;
1013 	}
1014 
1015 	/* The Ethernet switch we are interfaced with needs packets to be at
1016 	 * least 64 bytes (including FCS) otherwise they will be discarded when
1017 	 * they enter the switch port logic. When Broadcom tags are enabled, we
1018 	 * need to make sure that packets are at least 68 bytes
1019 	 * (including FCS and tag) because the length verification is done after
1020 	 * the Broadcom tag is stripped off the ingress packet.
1021 	 */
1022 	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1023 		ret = NETDEV_TX_OK;
1024 		goto out;
1025 	}
1026 
1027 	/* Insert TSB and checksum infos */
1028 	if (priv->tsb_en) {
1029 		skb = bcm_sysport_insert_tsb(skb, dev);
1030 		if (!skb) {
1031 			ret = NETDEV_TX_OK;
1032 			goto out;
1033 		}
1034 	}
1035 
1036 	skb_len = skb->len;
1037 
1038 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1039 	if (dma_mapping_error(kdev, mapping)) {
1040 		priv->mib.tx_dma_failed++;
1041 		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1042 			  skb->data, skb_len);
1043 		ret = NETDEV_TX_OK;
1044 		goto out;
1045 	}
1046 
1047 	/* Remember the SKB for future freeing */
1048 	cb = &ring->cbs[ring->curr_desc];
1049 	cb->skb = skb;
1050 	dma_unmap_addr_set(cb, dma_addr, mapping);
1051 	dma_unmap_len_set(cb, dma_len, skb_len);
1052 
1053 	/* Fetch a descriptor entry from our pool */
1054 	desc = ring->desc_cpu;
1055 
1056 	desc->addr_lo = lower_32_bits(mapping);
1057 	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1058 	len_status |= (skb_len << DESC_LEN_SHIFT);
1059 	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1060 		       DESC_STATUS_SHIFT;
1061 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1062 		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1063 
1064 	ring->curr_desc++;
1065 	if (ring->curr_desc == ring->size)
1066 		ring->curr_desc = 0;
1067 	ring->desc_count--;
1068 
1069 	/* Ensure write completion of the descriptor status/length
1070 	 * in DRAM before the System Port WRITE_PORT register latches
1071 	 * the value
1072 	 */
1073 	wmb();
1074 	desc->addr_status_len = len_status;
1075 	wmb();
1076 
1077 	/* Write this descriptor address to the RING write port */
1078 	tdma_port_write_desc_addr(priv, desc, ring->index);
1079 
1080 	/* Check ring space and update SW control flow */
1081 	if (ring->desc_count == 0)
1082 		netif_tx_stop_queue(txq);
1083 
1084 	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1085 		  ring->index, ring->desc_count, ring->curr_desc);
1086 
1087 	ret = NETDEV_TX_OK;
1088 out:
1089 	spin_unlock_irqrestore(&ring->lock, flags);
1090 	return ret;
1091 }
1092 
1093 static void bcm_sysport_tx_timeout(struct net_device *dev)
1094 {
1095 	netdev_warn(dev, "transmit timeout!\n");
1096 
1097 	netif_trans_update(dev);
1098 	dev->stats.tx_errors++;
1099 
1100 	netif_tx_wake_all_queues(dev);
1101 }
1102 
1103 /* phylib adjust link callback */
1104 static void bcm_sysport_adj_link(struct net_device *dev)
1105 {
1106 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1107 	struct phy_device *phydev = dev->phydev;
1108 	unsigned int changed = 0;
1109 	u32 cmd_bits = 0, reg;
1110 
1111 	if (priv->old_link != phydev->link) {
1112 		changed = 1;
1113 		priv->old_link = phydev->link;
1114 	}
1115 
1116 	if (priv->old_duplex != phydev->duplex) {
1117 		changed = 1;
1118 		priv->old_duplex = phydev->duplex;
1119 	}
1120 
1121 	switch (phydev->speed) {
1122 	case SPEED_2500:
1123 		cmd_bits = CMD_SPEED_2500;
1124 		break;
1125 	case SPEED_1000:
1126 		cmd_bits = CMD_SPEED_1000;
1127 		break;
1128 	case SPEED_100:
1129 		cmd_bits = CMD_SPEED_100;
1130 		break;
1131 	case SPEED_10:
1132 		cmd_bits = CMD_SPEED_10;
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 	cmd_bits <<= CMD_SPEED_SHIFT;
1138 
1139 	if (phydev->duplex == DUPLEX_HALF)
1140 		cmd_bits |= CMD_HD_EN;
1141 
1142 	if (priv->old_pause != phydev->pause) {
1143 		changed = 1;
1144 		priv->old_pause = phydev->pause;
1145 	}
1146 
1147 	if (!phydev->pause)
1148 		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1149 
1150 	if (!changed)
1151 		return;
1152 
1153 	if (phydev->link) {
1154 		reg = umac_readl(priv, UMAC_CMD);
1155 		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1156 			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1157 			CMD_TX_PAUSE_IGNORE);
1158 		reg |= cmd_bits;
1159 		umac_writel(priv, reg, UMAC_CMD);
1160 	}
1161 
1162 	phy_print_status(phydev);
1163 }
1164 
1165 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1166 				    unsigned int index)
1167 {
1168 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1169 	struct device *kdev = &priv->pdev->dev;
1170 	size_t size;
1171 	void *p;
1172 	u32 reg;
1173 
1174 	/* Simple descriptors partitioning for now */
1175 	size = 256;
1176 
1177 	/* We just need one DMA descriptor which is DMA-able, since writing to
1178 	 * the port will allocate a new descriptor in its internal linked-list
1179 	 */
1180 	p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1181 				GFP_KERNEL);
1182 	if (!p) {
1183 		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1184 		return -ENOMEM;
1185 	}
1186 
1187 	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1188 	if (!ring->cbs) {
1189 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1190 		return -ENOMEM;
1191 	}
1192 
1193 	/* Initialize SW view of the ring */
1194 	spin_lock_init(&ring->lock);
1195 	ring->priv = priv;
1196 	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1197 	ring->index = index;
1198 	ring->size = size;
1199 	ring->alloc_size = ring->size;
1200 	ring->desc_cpu = p;
1201 	ring->desc_count = ring->size;
1202 	ring->curr_desc = 0;
1203 
1204 	/* Initialize HW ring */
1205 	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1206 	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1207 	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1208 	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1209 	tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1210 	tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1211 
1212 	/* Program the number of descriptors as MAX_THRESHOLD and half of
1213 	 * its size for the hysteresis trigger
1214 	 */
1215 	tdma_writel(priv, ring->size |
1216 			1 << RING_HYST_THRESH_SHIFT,
1217 			TDMA_DESC_RING_MAX_HYST(index));
1218 
1219 	/* Enable the ring queue in the arbiter */
1220 	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1221 	reg |= (1 << index);
1222 	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1223 
1224 	napi_enable(&ring->napi);
1225 
1226 	netif_dbg(priv, hw, priv->netdev,
1227 		  "TDMA cfg, size=%d, desc_cpu=%p\n",
1228 		  ring->size, ring->desc_cpu);
1229 
1230 	return 0;
1231 }
1232 
1233 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1234 				     unsigned int index)
1235 {
1236 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1237 	struct device *kdev = &priv->pdev->dev;
1238 	u32 reg;
1239 
1240 	/* Caller should stop the TDMA engine */
1241 	reg = tdma_readl(priv, TDMA_STATUS);
1242 	if (!(reg & TDMA_DISABLED))
1243 		netdev_warn(priv->netdev, "TDMA not stopped!\n");
1244 
1245 	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1246 	 * fail, so by checking this pointer we know whether the TX ring was
1247 	 * fully initialized or not.
1248 	 */
1249 	if (!ring->cbs)
1250 		return;
1251 
1252 	napi_disable(&ring->napi);
1253 	netif_napi_del(&ring->napi);
1254 
1255 	bcm_sysport_tx_reclaim(priv, ring);
1256 
1257 	kfree(ring->cbs);
1258 	ring->cbs = NULL;
1259 
1260 	if (ring->desc_dma) {
1261 		dma_free_coherent(kdev, sizeof(struct dma_desc),
1262 				  ring->desc_cpu, ring->desc_dma);
1263 		ring->desc_dma = 0;
1264 	}
1265 	ring->size = 0;
1266 	ring->alloc_size = 0;
1267 
1268 	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1269 }
1270 
1271 /* RDMA helper */
1272 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1273 				  unsigned int enable)
1274 {
1275 	unsigned int timeout = 1000;
1276 	u32 reg;
1277 
1278 	reg = rdma_readl(priv, RDMA_CONTROL);
1279 	if (enable)
1280 		reg |= RDMA_EN;
1281 	else
1282 		reg &= ~RDMA_EN;
1283 	rdma_writel(priv, reg, RDMA_CONTROL);
1284 
1285 	/* Poll for RMDA disabling completion */
1286 	do {
1287 		reg = rdma_readl(priv, RDMA_STATUS);
1288 		if (!!(reg & RDMA_DISABLED) == !enable)
1289 			return 0;
1290 		usleep_range(1000, 2000);
1291 	} while (timeout-- > 0);
1292 
1293 	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1294 
1295 	return -ETIMEDOUT;
1296 }
1297 
1298 /* TDMA helper */
1299 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1300 				  unsigned int enable)
1301 {
1302 	unsigned int timeout = 1000;
1303 	u32 reg;
1304 
1305 	reg = tdma_readl(priv, TDMA_CONTROL);
1306 	if (enable)
1307 		reg |= TDMA_EN;
1308 	else
1309 		reg &= ~TDMA_EN;
1310 	tdma_writel(priv, reg, TDMA_CONTROL);
1311 
1312 	/* Poll for TMDA disabling completion */
1313 	do {
1314 		reg = tdma_readl(priv, TDMA_STATUS);
1315 		if (!!(reg & TDMA_DISABLED) == !enable)
1316 			return 0;
1317 
1318 		usleep_range(1000, 2000);
1319 	} while (timeout-- > 0);
1320 
1321 	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1322 
1323 	return -ETIMEDOUT;
1324 }
1325 
1326 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1327 {
1328 	struct bcm_sysport_cb *cb;
1329 	u32 reg;
1330 	int ret;
1331 	int i;
1332 
1333 	/* Initialize SW view of the RX ring */
1334 	priv->num_rx_bds = NUM_RX_DESC;
1335 	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1336 	priv->rx_c_index = 0;
1337 	priv->rx_read_ptr = 0;
1338 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1339 				GFP_KERNEL);
1340 	if (!priv->rx_cbs) {
1341 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1342 		return -ENOMEM;
1343 	}
1344 
1345 	for (i = 0; i < priv->num_rx_bds; i++) {
1346 		cb = priv->rx_cbs + i;
1347 		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1348 	}
1349 
1350 	ret = bcm_sysport_alloc_rx_bufs(priv);
1351 	if (ret) {
1352 		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1353 		return ret;
1354 	}
1355 
1356 	/* Initialize HW, ensure RDMA is disabled */
1357 	reg = rdma_readl(priv, RDMA_STATUS);
1358 	if (!(reg & RDMA_DISABLED))
1359 		rdma_enable_set(priv, 0);
1360 
1361 	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1362 	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1363 	rdma_writel(priv, 0, RDMA_PROD_INDEX);
1364 	rdma_writel(priv, 0, RDMA_CONS_INDEX);
1365 	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1366 			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1367 	/* Operate the queue in ring mode */
1368 	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1369 	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1370 	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1371 	rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1372 
1373 	rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1374 
1375 	netif_dbg(priv, hw, priv->netdev,
1376 		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1377 		  priv->num_rx_bds, priv->rx_bds);
1378 
1379 	return 0;
1380 }
1381 
1382 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1383 {
1384 	struct bcm_sysport_cb *cb;
1385 	unsigned int i;
1386 	u32 reg;
1387 
1388 	/* Caller should ensure RDMA is disabled */
1389 	reg = rdma_readl(priv, RDMA_STATUS);
1390 	if (!(reg & RDMA_DISABLED))
1391 		netdev_warn(priv->netdev, "RDMA not stopped!\n");
1392 
1393 	for (i = 0; i < priv->num_rx_bds; i++) {
1394 		cb = &priv->rx_cbs[i];
1395 		if (dma_unmap_addr(cb, dma_addr))
1396 			dma_unmap_single(&priv->pdev->dev,
1397 					 dma_unmap_addr(cb, dma_addr),
1398 					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1399 		bcm_sysport_free_cb(cb);
1400 	}
1401 
1402 	kfree(priv->rx_cbs);
1403 	priv->rx_cbs = NULL;
1404 
1405 	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1406 }
1407 
1408 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1409 {
1410 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1411 	u32 reg;
1412 
1413 	reg = umac_readl(priv, UMAC_CMD);
1414 	if (dev->flags & IFF_PROMISC)
1415 		reg |= CMD_PROMISC;
1416 	else
1417 		reg &= ~CMD_PROMISC;
1418 	umac_writel(priv, reg, UMAC_CMD);
1419 
1420 	/* No support for ALLMULTI */
1421 	if (dev->flags & IFF_ALLMULTI)
1422 		return;
1423 }
1424 
1425 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1426 				   u32 mask, unsigned int enable)
1427 {
1428 	u32 reg;
1429 
1430 	reg = umac_readl(priv, UMAC_CMD);
1431 	if (enable)
1432 		reg |= mask;
1433 	else
1434 		reg &= ~mask;
1435 	umac_writel(priv, reg, UMAC_CMD);
1436 
1437 	/* UniMAC stops on a packet boundary, wait for a full-sized packet
1438 	 * to be processed (1 msec).
1439 	 */
1440 	if (enable == 0)
1441 		usleep_range(1000, 2000);
1442 }
1443 
1444 static inline void umac_reset(struct bcm_sysport_priv *priv)
1445 {
1446 	u32 reg;
1447 
1448 	reg = umac_readl(priv, UMAC_CMD);
1449 	reg |= CMD_SW_RESET;
1450 	umac_writel(priv, reg, UMAC_CMD);
1451 	udelay(10);
1452 	reg = umac_readl(priv, UMAC_CMD);
1453 	reg &= ~CMD_SW_RESET;
1454 	umac_writel(priv, reg, UMAC_CMD);
1455 }
1456 
1457 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1458 			     unsigned char *addr)
1459 {
1460 	umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1461 			(addr[2] << 8) | addr[3], UMAC_MAC0);
1462 	umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1463 }
1464 
1465 static void topctrl_flush(struct bcm_sysport_priv *priv)
1466 {
1467 	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1468 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1469 	mdelay(1);
1470 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1471 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1472 }
1473 
1474 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1475 {
1476 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1477 	struct sockaddr *addr = p;
1478 
1479 	if (!is_valid_ether_addr(addr->sa_data))
1480 		return -EINVAL;
1481 
1482 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1483 
1484 	/* interface is disabled, changes to MAC will be reflected on next
1485 	 * open call
1486 	 */
1487 	if (!netif_running(dev))
1488 		return 0;
1489 
1490 	umac_set_hw_addr(priv, dev->dev_addr);
1491 
1492 	return 0;
1493 }
1494 
1495 static void bcm_sysport_netif_start(struct net_device *dev)
1496 {
1497 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1498 
1499 	/* Enable NAPI */
1500 	napi_enable(&priv->napi);
1501 
1502 	/* Enable RX interrupt and TX ring full interrupt */
1503 	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1504 
1505 	phy_start(dev->phydev);
1506 
1507 	/* Enable TX interrupts for the 32 TXQs */
1508 	intrl2_1_mask_clear(priv, 0xffffffff);
1509 
1510 	/* Last call before we start the real business */
1511 	netif_tx_start_all_queues(dev);
1512 }
1513 
1514 static void rbuf_init(struct bcm_sysport_priv *priv)
1515 {
1516 	u32 reg;
1517 
1518 	reg = rbuf_readl(priv, RBUF_CONTROL);
1519 	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1520 	rbuf_writel(priv, reg, RBUF_CONTROL);
1521 }
1522 
1523 static int bcm_sysport_open(struct net_device *dev)
1524 {
1525 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1526 	struct phy_device *phydev;
1527 	unsigned int i;
1528 	int ret;
1529 
1530 	/* Reset UniMAC */
1531 	umac_reset(priv);
1532 
1533 	/* Flush TX and RX FIFOs at TOPCTRL level */
1534 	topctrl_flush(priv);
1535 
1536 	/* Disable the UniMAC RX/TX */
1537 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1538 
1539 	/* Enable RBUF 2bytes alignment and Receive Status Block */
1540 	rbuf_init(priv);
1541 
1542 	/* Set maximum frame length */
1543 	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1544 
1545 	/* Set MAC address */
1546 	umac_set_hw_addr(priv, dev->dev_addr);
1547 
1548 	/* Read CRC forward */
1549 	priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1550 
1551 	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1552 				0, priv->phy_interface);
1553 	if (!phydev) {
1554 		netdev_err(dev, "could not attach to PHY\n");
1555 		return -ENODEV;
1556 	}
1557 
1558 	/* Reset house keeping link status */
1559 	priv->old_duplex = -1;
1560 	priv->old_link = -1;
1561 	priv->old_pause = -1;
1562 
1563 	/* mask all interrupts and request them */
1564 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1565 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1566 	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1567 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1568 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1569 	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1570 
1571 	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1572 	if (ret) {
1573 		netdev_err(dev, "failed to request RX interrupt\n");
1574 		goto out_phy_disconnect;
1575 	}
1576 
1577 	ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1578 	if (ret) {
1579 		netdev_err(dev, "failed to request TX interrupt\n");
1580 		goto out_free_irq0;
1581 	}
1582 
1583 	/* Initialize both hardware and software ring */
1584 	for (i = 0; i < dev->num_tx_queues; i++) {
1585 		ret = bcm_sysport_init_tx_ring(priv, i);
1586 		if (ret) {
1587 			netdev_err(dev, "failed to initialize TX ring %d\n",
1588 				   i);
1589 			goto out_free_tx_ring;
1590 		}
1591 	}
1592 
1593 	/* Initialize linked-list */
1594 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1595 
1596 	/* Initialize RX ring */
1597 	ret = bcm_sysport_init_rx_ring(priv);
1598 	if (ret) {
1599 		netdev_err(dev, "failed to initialize RX ring\n");
1600 		goto out_free_rx_ring;
1601 	}
1602 
1603 	/* Turn on RDMA */
1604 	ret = rdma_enable_set(priv, 1);
1605 	if (ret)
1606 		goto out_free_rx_ring;
1607 
1608 	/* Turn on TDMA */
1609 	ret = tdma_enable_set(priv, 1);
1610 	if (ret)
1611 		goto out_clear_rx_int;
1612 
1613 	/* Turn on UniMAC TX/RX */
1614 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1615 
1616 	bcm_sysport_netif_start(dev);
1617 
1618 	return 0;
1619 
1620 out_clear_rx_int:
1621 	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1622 out_free_rx_ring:
1623 	bcm_sysport_fini_rx_ring(priv);
1624 out_free_tx_ring:
1625 	for (i = 0; i < dev->num_tx_queues; i++)
1626 		bcm_sysport_fini_tx_ring(priv, i);
1627 	free_irq(priv->irq1, dev);
1628 out_free_irq0:
1629 	free_irq(priv->irq0, dev);
1630 out_phy_disconnect:
1631 	phy_disconnect(phydev);
1632 	return ret;
1633 }
1634 
1635 static void bcm_sysport_netif_stop(struct net_device *dev)
1636 {
1637 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1638 
1639 	/* stop all software from updating hardware */
1640 	netif_tx_stop_all_queues(dev);
1641 	napi_disable(&priv->napi);
1642 	phy_stop(dev->phydev);
1643 
1644 	/* mask all interrupts */
1645 	intrl2_0_mask_set(priv, 0xffffffff);
1646 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1647 	intrl2_1_mask_set(priv, 0xffffffff);
1648 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1649 }
1650 
1651 static int bcm_sysport_stop(struct net_device *dev)
1652 {
1653 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1654 	unsigned int i;
1655 	int ret;
1656 
1657 	bcm_sysport_netif_stop(dev);
1658 
1659 	/* Disable UniMAC RX */
1660 	umac_enable_set(priv, CMD_RX_EN, 0);
1661 
1662 	ret = tdma_enable_set(priv, 0);
1663 	if (ret) {
1664 		netdev_err(dev, "timeout disabling RDMA\n");
1665 		return ret;
1666 	}
1667 
1668 	/* Wait for a maximum packet size to be drained */
1669 	usleep_range(2000, 3000);
1670 
1671 	ret = rdma_enable_set(priv, 0);
1672 	if (ret) {
1673 		netdev_err(dev, "timeout disabling TDMA\n");
1674 		return ret;
1675 	}
1676 
1677 	/* Disable UniMAC TX */
1678 	umac_enable_set(priv, CMD_TX_EN, 0);
1679 
1680 	/* Free RX/TX rings SW structures */
1681 	for (i = 0; i < dev->num_tx_queues; i++)
1682 		bcm_sysport_fini_tx_ring(priv, i);
1683 	bcm_sysport_fini_rx_ring(priv);
1684 
1685 	free_irq(priv->irq0, dev);
1686 	free_irq(priv->irq1, dev);
1687 
1688 	/* Disconnect from PHY */
1689 	phy_disconnect(dev->phydev);
1690 
1691 	return 0;
1692 }
1693 
1694 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
1695 	.get_drvinfo		= bcm_sysport_get_drvinfo,
1696 	.get_msglevel		= bcm_sysport_get_msglvl,
1697 	.set_msglevel		= bcm_sysport_set_msglvl,
1698 	.get_link		= ethtool_op_get_link,
1699 	.get_strings		= bcm_sysport_get_strings,
1700 	.get_ethtool_stats	= bcm_sysport_get_stats,
1701 	.get_sset_count		= bcm_sysport_get_sset_count,
1702 	.get_wol		= bcm_sysport_get_wol,
1703 	.set_wol		= bcm_sysport_set_wol,
1704 	.get_coalesce		= bcm_sysport_get_coalesce,
1705 	.set_coalesce		= bcm_sysport_set_coalesce,
1706 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
1707 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
1708 };
1709 
1710 static const struct net_device_ops bcm_sysport_netdev_ops = {
1711 	.ndo_start_xmit		= bcm_sysport_xmit,
1712 	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
1713 	.ndo_open		= bcm_sysport_open,
1714 	.ndo_stop		= bcm_sysport_stop,
1715 	.ndo_set_features	= bcm_sysport_set_features,
1716 	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
1717 	.ndo_set_mac_address	= bcm_sysport_change_mac,
1718 #ifdef CONFIG_NET_POLL_CONTROLLER
1719 	.ndo_poll_controller	= bcm_sysport_poll_controller,
1720 #endif
1721 };
1722 
1723 #define REV_FMT	"v%2x.%02x"
1724 
1725 static int bcm_sysport_probe(struct platform_device *pdev)
1726 {
1727 	struct bcm_sysport_priv *priv;
1728 	struct device_node *dn;
1729 	struct net_device *dev;
1730 	const void *macaddr;
1731 	struct resource *r;
1732 	u32 txq, rxq;
1733 	int ret;
1734 
1735 	dn = pdev->dev.of_node;
1736 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1737 
1738 	/* Read the Transmit/Receive Queue properties */
1739 	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1740 		txq = TDMA_NUM_RINGS;
1741 	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1742 		rxq = 1;
1743 
1744 	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1745 	if (!dev)
1746 		return -ENOMEM;
1747 
1748 	/* Initialize private members */
1749 	priv = netdev_priv(dev);
1750 
1751 	priv->irq0 = platform_get_irq(pdev, 0);
1752 	priv->irq1 = platform_get_irq(pdev, 1);
1753 	priv->wol_irq = platform_get_irq(pdev, 2);
1754 	if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1755 		dev_err(&pdev->dev, "invalid interrupts\n");
1756 		ret = -EINVAL;
1757 		goto err_free_netdev;
1758 	}
1759 
1760 	priv->base = devm_ioremap_resource(&pdev->dev, r);
1761 	if (IS_ERR(priv->base)) {
1762 		ret = PTR_ERR(priv->base);
1763 		goto err_free_netdev;
1764 	}
1765 
1766 	priv->netdev = dev;
1767 	priv->pdev = pdev;
1768 
1769 	priv->phy_interface = of_get_phy_mode(dn);
1770 	/* Default to GMII interface mode */
1771 	if (priv->phy_interface < 0)
1772 		priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1773 
1774 	/* In the case of a fixed PHY, the DT node associated
1775 	 * to the PHY is the Ethernet MAC DT node.
1776 	 */
1777 	if (of_phy_is_fixed_link(dn)) {
1778 		ret = of_phy_register_fixed_link(dn);
1779 		if (ret) {
1780 			dev_err(&pdev->dev, "failed to register fixed PHY\n");
1781 			goto err_free_netdev;
1782 		}
1783 
1784 		priv->phy_dn = dn;
1785 	}
1786 
1787 	/* Initialize netdevice members */
1788 	macaddr = of_get_mac_address(dn);
1789 	if (!macaddr || !is_valid_ether_addr(macaddr)) {
1790 		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1791 		eth_hw_addr_random(dev);
1792 	} else {
1793 		ether_addr_copy(dev->dev_addr, macaddr);
1794 	}
1795 
1796 	SET_NETDEV_DEV(dev, &pdev->dev);
1797 	dev_set_drvdata(&pdev->dev, dev);
1798 	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1799 	dev->netdev_ops = &bcm_sysport_netdev_ops;
1800 	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1801 
1802 	/* HW supported features, none enabled by default */
1803 	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1804 				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1805 
1806 	/* Request the WOL interrupt and advertise suspend if available */
1807 	priv->wol_irq_disabled = 1;
1808 	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
1809 			       bcm_sysport_wol_isr, 0, dev->name, priv);
1810 	if (!ret)
1811 		device_set_wakeup_capable(&pdev->dev, 1);
1812 
1813 	/* Set the needed headroom once and for all */
1814 	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1815 	dev->needed_headroom += sizeof(struct bcm_tsb);
1816 
1817 	/* libphy will adjust the link state accordingly */
1818 	netif_carrier_off(dev);
1819 
1820 	ret = register_netdev(dev);
1821 	if (ret) {
1822 		dev_err(&pdev->dev, "failed to register net_device\n");
1823 		goto err_deregister_fixed_link;
1824 	}
1825 
1826 	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1827 	dev_info(&pdev->dev,
1828 		 "Broadcom SYSTEMPORT" REV_FMT
1829 		 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1830 		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1831 		 priv->base, priv->irq0, priv->irq1, txq, rxq);
1832 
1833 	return 0;
1834 
1835 err_deregister_fixed_link:
1836 	if (of_phy_is_fixed_link(dn))
1837 		of_phy_deregister_fixed_link(dn);
1838 err_free_netdev:
1839 	free_netdev(dev);
1840 	return ret;
1841 }
1842 
1843 static int bcm_sysport_remove(struct platform_device *pdev)
1844 {
1845 	struct net_device *dev = dev_get_drvdata(&pdev->dev);
1846 	struct device_node *dn = pdev->dev.of_node;
1847 
1848 	/* Not much to do, ndo_close has been called
1849 	 * and we use managed allocations
1850 	 */
1851 	unregister_netdev(dev);
1852 	if (of_phy_is_fixed_link(dn))
1853 		of_phy_deregister_fixed_link(dn);
1854 	free_netdev(dev);
1855 	dev_set_drvdata(&pdev->dev, NULL);
1856 
1857 	return 0;
1858 }
1859 
1860 #ifdef CONFIG_PM_SLEEP
1861 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
1862 {
1863 	struct net_device *ndev = priv->netdev;
1864 	unsigned int timeout = 1000;
1865 	u32 reg;
1866 
1867 	/* Password has already been programmed */
1868 	reg = umac_readl(priv, UMAC_MPD_CTRL);
1869 	reg |= MPD_EN;
1870 	reg &= ~PSW_EN;
1871 	if (priv->wolopts & WAKE_MAGICSECURE)
1872 		reg |= PSW_EN;
1873 	umac_writel(priv, reg, UMAC_MPD_CTRL);
1874 
1875 	/* Make sure RBUF entered WoL mode as result */
1876 	do {
1877 		reg = rbuf_readl(priv, RBUF_STATUS);
1878 		if (reg & RBUF_WOL_MODE)
1879 			break;
1880 
1881 		udelay(10);
1882 	} while (timeout-- > 0);
1883 
1884 	/* Do not leave the UniMAC RBUF matching only MPD packets */
1885 	if (!timeout) {
1886 		reg = umac_readl(priv, UMAC_MPD_CTRL);
1887 		reg &= ~MPD_EN;
1888 		umac_writel(priv, reg, UMAC_MPD_CTRL);
1889 		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
1890 		return -ETIMEDOUT;
1891 	}
1892 
1893 	/* UniMAC receive needs to be turned on */
1894 	umac_enable_set(priv, CMD_RX_EN, 1);
1895 
1896 	/* Enable the interrupt wake-up source */
1897 	intrl2_0_mask_clear(priv, INTRL2_0_MPD);
1898 
1899 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
1900 
1901 	return 0;
1902 }
1903 
1904 static int bcm_sysport_suspend(struct device *d)
1905 {
1906 	struct net_device *dev = dev_get_drvdata(d);
1907 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1908 	unsigned int i;
1909 	int ret = 0;
1910 	u32 reg;
1911 
1912 	if (!netif_running(dev))
1913 		return 0;
1914 
1915 	bcm_sysport_netif_stop(dev);
1916 
1917 	phy_suspend(dev->phydev);
1918 
1919 	netif_device_detach(dev);
1920 
1921 	/* Disable UniMAC RX */
1922 	umac_enable_set(priv, CMD_RX_EN, 0);
1923 
1924 	ret = rdma_enable_set(priv, 0);
1925 	if (ret) {
1926 		netdev_err(dev, "RDMA timeout!\n");
1927 		return ret;
1928 	}
1929 
1930 	/* Disable RXCHK if enabled */
1931 	if (priv->rx_chk_en) {
1932 		reg = rxchk_readl(priv, RXCHK_CONTROL);
1933 		reg &= ~RXCHK_EN;
1934 		rxchk_writel(priv, reg, RXCHK_CONTROL);
1935 	}
1936 
1937 	/* Flush RX pipe */
1938 	if (!priv->wolopts)
1939 		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1940 
1941 	ret = tdma_enable_set(priv, 0);
1942 	if (ret) {
1943 		netdev_err(dev, "TDMA timeout!\n");
1944 		return ret;
1945 	}
1946 
1947 	/* Wait for a packet boundary */
1948 	usleep_range(2000, 3000);
1949 
1950 	umac_enable_set(priv, CMD_TX_EN, 0);
1951 
1952 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1953 
1954 	/* Free RX/TX rings SW structures */
1955 	for (i = 0; i < dev->num_tx_queues; i++)
1956 		bcm_sysport_fini_tx_ring(priv, i);
1957 	bcm_sysport_fini_rx_ring(priv);
1958 
1959 	/* Get prepared for Wake-on-LAN */
1960 	if (device_may_wakeup(d) && priv->wolopts)
1961 		ret = bcm_sysport_suspend_to_wol(priv);
1962 
1963 	return ret;
1964 }
1965 
1966 static int bcm_sysport_resume(struct device *d)
1967 {
1968 	struct net_device *dev = dev_get_drvdata(d);
1969 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1970 	unsigned int i;
1971 	u32 reg;
1972 	int ret;
1973 
1974 	if (!netif_running(dev))
1975 		return 0;
1976 
1977 	umac_reset(priv);
1978 
1979 	/* We may have been suspended and never received a WOL event that
1980 	 * would turn off MPD detection, take care of that now
1981 	 */
1982 	bcm_sysport_resume_from_wol(priv);
1983 
1984 	/* Initialize both hardware and software ring */
1985 	for (i = 0; i < dev->num_tx_queues; i++) {
1986 		ret = bcm_sysport_init_tx_ring(priv, i);
1987 		if (ret) {
1988 			netdev_err(dev, "failed to initialize TX ring %d\n",
1989 				   i);
1990 			goto out_free_tx_rings;
1991 		}
1992 	}
1993 
1994 	/* Initialize linked-list */
1995 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1996 
1997 	/* Initialize RX ring */
1998 	ret = bcm_sysport_init_rx_ring(priv);
1999 	if (ret) {
2000 		netdev_err(dev, "failed to initialize RX ring\n");
2001 		goto out_free_rx_ring;
2002 	}
2003 
2004 	netif_device_attach(dev);
2005 
2006 	/* RX pipe enable */
2007 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2008 
2009 	ret = rdma_enable_set(priv, 1);
2010 	if (ret) {
2011 		netdev_err(dev, "failed to enable RDMA\n");
2012 		goto out_free_rx_ring;
2013 	}
2014 
2015 	/* Enable rxhck */
2016 	if (priv->rx_chk_en) {
2017 		reg = rxchk_readl(priv, RXCHK_CONTROL);
2018 		reg |= RXCHK_EN;
2019 		rxchk_writel(priv, reg, RXCHK_CONTROL);
2020 	}
2021 
2022 	rbuf_init(priv);
2023 
2024 	/* Set maximum frame length */
2025 	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2026 
2027 	/* Set MAC address */
2028 	umac_set_hw_addr(priv, dev->dev_addr);
2029 
2030 	umac_enable_set(priv, CMD_RX_EN, 1);
2031 
2032 	/* TX pipe enable */
2033 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2034 
2035 	umac_enable_set(priv, CMD_TX_EN, 1);
2036 
2037 	ret = tdma_enable_set(priv, 1);
2038 	if (ret) {
2039 		netdev_err(dev, "TDMA timeout!\n");
2040 		goto out_free_rx_ring;
2041 	}
2042 
2043 	phy_resume(dev->phydev);
2044 
2045 	bcm_sysport_netif_start(dev);
2046 
2047 	return 0;
2048 
2049 out_free_rx_ring:
2050 	bcm_sysport_fini_rx_ring(priv);
2051 out_free_tx_rings:
2052 	for (i = 0; i < dev->num_tx_queues; i++)
2053 		bcm_sysport_fini_tx_ring(priv, i);
2054 	return ret;
2055 }
2056 #endif
2057 
2058 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2059 		bcm_sysport_suspend, bcm_sysport_resume);
2060 
2061 static const struct of_device_id bcm_sysport_of_match[] = {
2062 	{ .compatible = "brcm,systemport-v1.00" },
2063 	{ .compatible = "brcm,systemport" },
2064 	{ /* sentinel */ }
2065 };
2066 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2067 
2068 static struct platform_driver bcm_sysport_driver = {
2069 	.probe	= bcm_sysport_probe,
2070 	.remove	= bcm_sysport_remove,
2071 	.driver =  {
2072 		.name = "brcm-systemport",
2073 		.of_match_table = bcm_sysport_of_match,
2074 		.pm = &bcm_sysport_pm_ops,
2075 	},
2076 };
2077 module_platform_driver(bcm_sysport_driver);
2078 
2079 MODULE_AUTHOR("Broadcom Corporation");
2080 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2081 MODULE_ALIAS("platform:brcm-systemport");
2082 MODULE_LICENSE("GPL");
2083