xref: /openbmc/linux/drivers/net/dsa/bcm_sf2.c (revision 4bce6fce)
1 /*
2  * Broadcom Starfighter 2 DSA switch driver
3  *
4  * Copyright (C) 2014, Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <net/dsa.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_bridge.h>
27 
28 #include "bcm_sf2.h"
29 #include "bcm_sf2_regs.h"
30 
31 /* String, offset, and register size in bytes if different from 4 bytes */
32 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
33 	{ "TxOctets",		0x000, 8	},
34 	{ "TxDropPkts",		0x020		},
35 	{ "TxQPKTQ0",		0x030		},
36 	{ "TxBroadcastPkts",	0x040		},
37 	{ "TxMulticastPkts",	0x050		},
38 	{ "TxUnicastPKts",	0x060		},
39 	{ "TxCollisions",	0x070		},
40 	{ "TxSingleCollision",	0x080		},
41 	{ "TxMultipleCollision", 0x090		},
42 	{ "TxDeferredCollision", 0x0a0		},
43 	{ "TxLateCollision",	0x0b0		},
44 	{ "TxExcessiveCollision", 0x0c0		},
45 	{ "TxFrameInDisc",	0x0d0		},
46 	{ "TxPausePkts",	0x0e0		},
47 	{ "TxQPKTQ1",		0x0f0		},
48 	{ "TxQPKTQ2",		0x100		},
49 	{ "TxQPKTQ3",		0x110		},
50 	{ "TxQPKTQ4",		0x120		},
51 	{ "TxQPKTQ5",		0x130		},
52 	{ "RxOctets",		0x140, 8	},
53 	{ "RxUndersizePkts",	0x160		},
54 	{ "RxPausePkts",	0x170		},
55 	{ "RxPkts64Octets",	0x180		},
56 	{ "RxPkts65to127Octets", 0x190		},
57 	{ "RxPkts128to255Octets", 0x1a0		},
58 	{ "RxPkts256to511Octets", 0x1b0		},
59 	{ "RxPkts512to1023Octets", 0x1c0	},
60 	{ "RxPkts1024toMaxPktsOctets", 0x1d0	},
61 	{ "RxOversizePkts",	0x1e0		},
62 	{ "RxJabbers",		0x1f0		},
63 	{ "RxAlignmentErrors",	0x200		},
64 	{ "RxFCSErrors",	0x210		},
65 	{ "RxGoodOctets",	0x220, 8	},
66 	{ "RxDropPkts",		0x240		},
67 	{ "RxUnicastPkts",	0x250		},
68 	{ "RxMulticastPkts",	0x260		},
69 	{ "RxBroadcastPkts",	0x270		},
70 	{ "RxSAChanges",	0x280		},
71 	{ "RxFragments",	0x290		},
72 	{ "RxJumboPkt",		0x2a0		},
73 	{ "RxSymblErr",		0x2b0		},
74 	{ "InRangeErrCount",	0x2c0		},
75 	{ "OutRangeErrCount",	0x2d0		},
76 	{ "EEELpiEvent",	0x2e0		},
77 	{ "EEELpiDuration",	0x2f0		},
78 	{ "RxDiscard",		0x300, 8	},
79 	{ "TxQPKTQ6",		0x320		},
80 	{ "TxQPKTQ7",		0x330		},
81 	{ "TxPkts64Octets",	0x340		},
82 	{ "TxPkts65to127Octets", 0x350		},
83 	{ "TxPkts128to255Octets", 0x360		},
84 	{ "TxPkts256to511Ocets", 0x370		},
85 	{ "TxPkts512to1023Ocets", 0x380		},
86 	{ "TxPkts1024toMaxPktOcets", 0x390	},
87 };
88 
89 #define BCM_SF2_STATS_SIZE	ARRAY_SIZE(bcm_sf2_mib)
90 
91 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
92 				   int port, uint8_t *data)
93 {
94 	unsigned int i;
95 
96 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
97 		memcpy(data + i * ETH_GSTRING_LEN,
98 		       bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
99 }
100 
101 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
102 					 int port, uint64_t *data)
103 {
104 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
105 	const struct bcm_sf2_hw_stats *s;
106 	unsigned int i;
107 	u64 val = 0;
108 	u32 offset;
109 
110 	mutex_lock(&priv->stats_mutex);
111 
112 	/* Now fetch the per-port counters */
113 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
114 		s = &bcm_sf2_mib[i];
115 
116 		/* Do a latched 64-bit read if needed */
117 		offset = s->reg + CORE_P_MIB_OFFSET(port);
118 		if (s->sizeof_stat == 8)
119 			val = core_readq(priv, offset);
120 		else
121 			val = core_readl(priv, offset);
122 
123 		data[i] = (u64)val;
124 	}
125 
126 	mutex_unlock(&priv->stats_mutex);
127 }
128 
129 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
130 {
131 	return BCM_SF2_STATS_SIZE;
132 }
133 
134 static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
135 {
136 	return "Broadcom Starfighter 2";
137 }
138 
139 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
140 {
141 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
142 	unsigned int i;
143 	u32 reg;
144 
145 	/* Enable the IMP Port to be in the same VLAN as the other ports
146 	 * on a per-port basis such that we only have Port i and IMP in
147 	 * the same VLAN.
148 	 */
149 	for (i = 0; i < priv->hw_params.num_ports; i++) {
150 		if (!((1 << i) & ds->phys_port_mask))
151 			continue;
152 
153 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
154 		reg |= (1 << cpu_port);
155 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
156 	}
157 }
158 
159 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
160 {
161 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
162 	u32 reg, val;
163 
164 	/* Enable the port memories */
165 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
166 	reg &= ~P_TXQ_PSM_VDD(port);
167 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
168 
169 	/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
170 	reg = core_readl(priv, CORE_IMP_CTL);
171 	reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
172 	reg &= ~(RX_DIS | TX_DIS);
173 	core_writel(priv, reg, CORE_IMP_CTL);
174 
175 	/* Enable forwarding */
176 	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
177 
178 	/* Enable IMP port in dumb mode */
179 	reg = core_readl(priv, CORE_SWITCH_CTRL);
180 	reg |= MII_DUMB_FWDG_EN;
181 	core_writel(priv, reg, CORE_SWITCH_CTRL);
182 
183 	/* Resolve which bit controls the Broadcom tag */
184 	switch (port) {
185 	case 8:
186 		val = BRCM_HDR_EN_P8;
187 		break;
188 	case 7:
189 		val = BRCM_HDR_EN_P7;
190 		break;
191 	case 5:
192 		val = BRCM_HDR_EN_P5;
193 		break;
194 	default:
195 		val = 0;
196 		break;
197 	}
198 
199 	/* Enable Broadcom tags for IMP port */
200 	reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
201 	reg |= val;
202 	core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
203 
204 	/* Enable reception Broadcom tag for CPU TX (switch RX) to
205 	 * allow us to tag outgoing frames
206 	 */
207 	reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
208 	reg &= ~(1 << port);
209 	core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
210 
211 	/* Enable transmission of Broadcom tags from the switch (CPU RX) to
212 	 * allow delivering frames to the per-port net_devices
213 	 */
214 	reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
215 	reg &= ~(1 << port);
216 	core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
217 
218 	/* Force link status for IMP port */
219 	reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
220 	reg |= (MII_SW_OR | LINK_STS);
221 	core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
222 }
223 
224 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
225 {
226 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
227 	u32 reg;
228 
229 	reg = core_readl(priv, CORE_EEE_EN_CTRL);
230 	if (enable)
231 		reg |= 1 << port;
232 	else
233 		reg &= ~(1 << port);
234 	core_writel(priv, reg, CORE_EEE_EN_CTRL);
235 }
236 
237 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
238 {
239 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
240 	u32 reg;
241 
242 	reg = reg_readl(priv, REG_SPHY_CNTRL);
243 	if (enable) {
244 		reg |= PHY_RESET;
245 		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
246 		reg_writel(priv, reg, REG_SPHY_CNTRL);
247 		udelay(21);
248 		reg = reg_readl(priv, REG_SPHY_CNTRL);
249 		reg &= ~PHY_RESET;
250 	} else {
251 		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
252 		reg_writel(priv, reg, REG_SPHY_CNTRL);
253 		mdelay(1);
254 		reg |= CK25_DIS;
255 	}
256 	reg_writel(priv, reg, REG_SPHY_CNTRL);
257 
258 	/* Use PHY-driven LED signaling */
259 	if (!enable) {
260 		reg = reg_readl(priv, REG_LED_CNTRL(0));
261 		reg |= SPDLNK_SRC_SEL;
262 		reg_writel(priv, reg, REG_LED_CNTRL(0));
263 	}
264 }
265 
266 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
267 			      struct phy_device *phy)
268 {
269 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
270 	s8 cpu_port = ds->dst[ds->index].cpu_port;
271 	u32 reg;
272 
273 	/* Clear the memory power down */
274 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
275 	reg &= ~P_TXQ_PSM_VDD(port);
276 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
277 
278 	/* Clear the Rx and Tx disable bits and set to no spanning tree */
279 	core_writel(priv, 0, CORE_G_PCTL_PORT(port));
280 
281 	/* Re-enable the GPHY and re-apply workarounds */
282 	if (port == 0 && priv->hw_params.num_gphy == 1) {
283 		bcm_sf2_gphy_enable_set(ds, true);
284 		if (phy) {
285 			/* if phy_stop() has been called before, phy
286 			 * will be in halted state, and phy_start()
287 			 * will call resume.
288 			 *
289 			 * the resume path does not configure back
290 			 * autoneg settings, and since we hard reset
291 			 * the phy manually here, we need to reset the
292 			 * state machine also.
293 			 */
294 			phy->state = PHY_READY;
295 			phy_init_hw(phy);
296 		}
297 	}
298 
299 	/* Enable port 7 interrupts to get notified */
300 	if (port == 7)
301 		intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
302 
303 	/* Set this port, and only this one to be in the default VLAN,
304 	 * if member of a bridge, restore its membership prior to
305 	 * bringing down this port.
306 	 */
307 	reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
308 	reg &= ~PORT_VLAN_CTRL_MASK;
309 	reg |= (1 << port);
310 	reg |= priv->port_sts[port].vlan_ctl_mask;
311 	core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
312 
313 	bcm_sf2_imp_vlan_setup(ds, cpu_port);
314 
315 	/* If EEE was enabled, restore it */
316 	if (priv->port_sts[port].eee.eee_enabled)
317 		bcm_sf2_eee_enable_set(ds, port, true);
318 
319 	return 0;
320 }
321 
322 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
323 				 struct phy_device *phy)
324 {
325 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
326 	u32 off, reg;
327 
328 	if (priv->wol_ports_mask & (1 << port))
329 		return;
330 
331 	if (port == 7) {
332 		intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
333 		intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
334 	}
335 
336 	if (port == 0 && priv->hw_params.num_gphy == 1)
337 		bcm_sf2_gphy_enable_set(ds, false);
338 
339 	if (dsa_is_cpu_port(ds, port))
340 		off = CORE_IMP_CTL;
341 	else
342 		off = CORE_G_PCTL_PORT(port);
343 
344 	reg = core_readl(priv, off);
345 	reg |= RX_DIS | TX_DIS;
346 	core_writel(priv, reg, off);
347 
348 	/* Power down the port memory */
349 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
350 	reg |= P_TXQ_PSM_VDD(port);
351 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
352 }
353 
354 /* Returns 0 if EEE was not enabled, or 1 otherwise
355  */
356 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
357 			    struct phy_device *phy)
358 {
359 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
360 	struct ethtool_eee *p = &priv->port_sts[port].eee;
361 	int ret;
362 
363 	p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
364 
365 	ret = phy_init_eee(phy, 0);
366 	if (ret)
367 		return 0;
368 
369 	bcm_sf2_eee_enable_set(ds, port, true);
370 
371 	return 1;
372 }
373 
374 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
375 			      struct ethtool_eee *e)
376 {
377 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
378 	struct ethtool_eee *p = &priv->port_sts[port].eee;
379 	u32 reg;
380 
381 	reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
382 	e->eee_enabled = p->eee_enabled;
383 	e->eee_active = !!(reg & (1 << port));
384 
385 	return 0;
386 }
387 
388 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
389 			      struct phy_device *phydev,
390 			      struct ethtool_eee *e)
391 {
392 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
393 	struct ethtool_eee *p = &priv->port_sts[port].eee;
394 
395 	p->eee_enabled = e->eee_enabled;
396 
397 	if (!p->eee_enabled) {
398 		bcm_sf2_eee_enable_set(ds, port, false);
399 	} else {
400 		p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
401 		if (!p->eee_enabled)
402 			return -EOPNOTSUPP;
403 	}
404 
405 	return 0;
406 }
407 
408 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
409  * flush for that port.
410  */
411 static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
412 {
413 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
414 	unsigned int timeout = 1000;
415 	u32 reg;
416 
417 	core_writel(priv, port, CORE_FAST_AGE_PORT);
418 
419 	reg = core_readl(priv, CORE_FAST_AGE_CTRL);
420 	reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
421 	core_writel(priv, reg, CORE_FAST_AGE_CTRL);
422 
423 	do {
424 		reg = core_readl(priv, CORE_FAST_AGE_CTRL);
425 		if (!(reg & FAST_AGE_STR_DONE))
426 			break;
427 
428 		cpu_relax();
429 	} while (timeout--);
430 
431 	if (!timeout)
432 		return -ETIMEDOUT;
433 
434 	return 0;
435 }
436 
437 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
438 			      u32 br_port_mask)
439 {
440 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
441 	unsigned int i;
442 	u32 reg, p_ctl;
443 
444 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
445 
446 	for (i = 0; i < priv->hw_params.num_ports; i++) {
447 		if (!((1 << i) & br_port_mask))
448 			continue;
449 
450 		/* Add this local port to the remote port VLAN control
451 		 * membership and update the remote port bitmask
452 		 */
453 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
454 		reg |= 1 << port;
455 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
456 		priv->port_sts[i].vlan_ctl_mask = reg;
457 
458 		p_ctl |= 1 << i;
459 	}
460 
461 	/* Configure the local port VLAN control membership to include
462 	 * remote ports and update the local port bitmask
463 	 */
464 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
465 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
466 
467 	return 0;
468 }
469 
470 static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
471 			       u32 br_port_mask)
472 {
473 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
474 	unsigned int i;
475 	u32 reg, p_ctl;
476 
477 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
478 
479 	for (i = 0; i < priv->hw_params.num_ports; i++) {
480 		/* Don't touch the remaining ports */
481 		if (!((1 << i) & br_port_mask))
482 			continue;
483 
484 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
485 		reg &= ~(1 << port);
486 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
487 		priv->port_sts[port].vlan_ctl_mask = reg;
488 
489 		/* Prevent self removal to preserve isolation */
490 		if (port != i)
491 			p_ctl &= ~(1 << i);
492 	}
493 
494 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
495 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
496 
497 	return 0;
498 }
499 
500 static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
501 				       u8 state)
502 {
503 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
504 	u8 hw_state, cur_hw_state;
505 	int ret = 0;
506 	u32 reg;
507 
508 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
509 	cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
510 
511 	switch (state) {
512 	case BR_STATE_DISABLED:
513 		hw_state = G_MISTP_DIS_STATE;
514 		break;
515 	case BR_STATE_LISTENING:
516 		hw_state = G_MISTP_LISTEN_STATE;
517 		break;
518 	case BR_STATE_LEARNING:
519 		hw_state = G_MISTP_LEARN_STATE;
520 		break;
521 	case BR_STATE_FORWARDING:
522 		hw_state = G_MISTP_FWD_STATE;
523 		break;
524 	case BR_STATE_BLOCKING:
525 		hw_state = G_MISTP_BLOCK_STATE;
526 		break;
527 	default:
528 		pr_err("%s: invalid STP state: %d\n", __func__, state);
529 		return -EINVAL;
530 	}
531 
532 	/* Fast-age ARL entries if we are moving a port from Learning or
533 	 * Forwarding state to Disabled, Blocking or Listening state
534 	 */
535 	if (cur_hw_state != hw_state) {
536 		if (cur_hw_state & 4 && !(hw_state & 4)) {
537 			ret = bcm_sf2_sw_fast_age_port(ds, port);
538 			if (ret) {
539 				pr_err("%s: fast-ageing failed\n", __func__);
540 				return ret;
541 			}
542 		}
543 	}
544 
545 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
546 	reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
547 	reg |= hw_state;
548 	core_writel(priv, reg, CORE_G_PCTL_PORT(port));
549 
550 	return 0;
551 }
552 
553 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
554 {
555 	struct bcm_sf2_priv *priv = dev_id;
556 
557 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
558 				~priv->irq0_mask;
559 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
560 
561 	return IRQ_HANDLED;
562 }
563 
564 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
565 {
566 	struct bcm_sf2_priv *priv = dev_id;
567 
568 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
569 				~priv->irq1_mask;
570 	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
571 
572 	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
573 		priv->port_sts[7].link = 1;
574 	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
575 		priv->port_sts[7].link = 0;
576 
577 	return IRQ_HANDLED;
578 }
579 
580 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
581 {
582 	unsigned int timeout = 1000;
583 	u32 reg;
584 
585 	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
586 	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
587 	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
588 
589 	do {
590 		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
591 		if (!(reg & SOFTWARE_RESET))
592 			break;
593 
594 		usleep_range(1000, 2000);
595 	} while (timeout-- > 0);
596 
597 	if (timeout == 0)
598 		return -ETIMEDOUT;
599 
600 	return 0;
601 }
602 
603 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
604 {
605 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
606 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
607 	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
608 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
609 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
610 	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
611 }
612 
613 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
614 {
615 	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
616 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
617 	struct device_node *dn;
618 	void __iomem **base;
619 	unsigned int port;
620 	unsigned int i;
621 	u32 reg, rev;
622 	int ret;
623 
624 	spin_lock_init(&priv->indir_lock);
625 	mutex_init(&priv->stats_mutex);
626 
627 	/* All the interesting properties are at the parent device_node
628 	 * level
629 	 */
630 	dn = ds->pd->of_node->parent;
631 
632 	priv->irq0 = irq_of_parse_and_map(dn, 0);
633 	priv->irq1 = irq_of_parse_and_map(dn, 1);
634 
635 	base = &priv->core;
636 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
637 		*base = of_iomap(dn, i);
638 		if (*base == NULL) {
639 			pr_err("unable to find register: %s\n", reg_names[i]);
640 			ret = -ENOMEM;
641 			goto out_unmap;
642 		}
643 		base++;
644 	}
645 
646 	ret = bcm_sf2_sw_rst(priv);
647 	if (ret) {
648 		pr_err("unable to software reset switch: %d\n", ret);
649 		goto out_unmap;
650 	}
651 
652 	/* Disable all interrupts and request them */
653 	bcm_sf2_intr_disable(priv);
654 
655 	ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
656 			  "switch_0", priv);
657 	if (ret < 0) {
658 		pr_err("failed to request switch_0 IRQ\n");
659 		goto out_unmap;
660 	}
661 
662 	ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
663 			  "switch_1", priv);
664 	if (ret < 0) {
665 		pr_err("failed to request switch_1 IRQ\n");
666 		goto out_free_irq0;
667 	}
668 
669 	/* Reset the MIB counters */
670 	reg = core_readl(priv, CORE_GMNCFGCFG);
671 	reg |= RST_MIB_CNT;
672 	core_writel(priv, reg, CORE_GMNCFGCFG);
673 	reg &= ~RST_MIB_CNT;
674 	core_writel(priv, reg, CORE_GMNCFGCFG);
675 
676 	/* Get the maximum number of ports for this switch */
677 	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
678 	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
679 		priv->hw_params.num_ports = DSA_MAX_PORTS;
680 
681 	/* Assume a single GPHY setup if we can't read that property */
682 	if (of_property_read_u32(dn, "brcm,num-gphy",
683 				 &priv->hw_params.num_gphy))
684 		priv->hw_params.num_gphy = 1;
685 
686 	/* Enable all valid ports and disable those unused */
687 	for (port = 0; port < priv->hw_params.num_ports; port++) {
688 		/* IMP port receives special treatment */
689 		if ((1 << port) & ds->phys_port_mask)
690 			bcm_sf2_port_setup(ds, port, NULL);
691 		else if (dsa_is_cpu_port(ds, port))
692 			bcm_sf2_imp_setup(ds, port);
693 		else
694 			bcm_sf2_port_disable(ds, port, NULL);
695 	}
696 
697 	/* Include the pseudo-PHY address and the broadcast PHY address to
698 	 * divert reads towards our workaround
699 	 */
700 	ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
701 
702 	rev = reg_readl(priv, REG_SWITCH_REVISION);
703 	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
704 					SWITCH_TOP_REV_MASK;
705 	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
706 
707 	rev = reg_readl(priv, REG_PHY_REVISION);
708 	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
709 
710 	pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
711 		priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
712 		priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
713 		priv->core, priv->irq0, priv->irq1);
714 
715 	return 0;
716 
717 out_free_irq0:
718 	free_irq(priv->irq0, priv);
719 out_unmap:
720 	base = &priv->core;
721 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
722 		if (*base)
723 			iounmap(*base);
724 		base++;
725 	}
726 	return ret;
727 }
728 
729 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
730 {
731 	return 0;
732 }
733 
734 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
735 {
736 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
737 
738 	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
739 	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
740 	 * the REG_PHY_REVISION register layout is.
741 	 */
742 
743 	return priv->hw_params.gphy_rev;
744 }
745 
746 static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
747 			       int regnum, u16 val)
748 {
749 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
750 	int ret = 0;
751 	u32 reg;
752 
753 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
754 	reg |= MDIO_MASTER_SEL;
755 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
756 
757 	/* Page << 8 | offset */
758 	reg = 0x70;
759 	reg <<= 2;
760 	core_writel(priv, addr, reg);
761 
762 	/* Page << 8 | offset */
763 	reg = 0x80 << 8 | regnum << 1;
764 	reg <<= 2;
765 
766 	if (op)
767 		ret = core_readl(priv, reg);
768 	else
769 		core_writel(priv, val, reg);
770 
771 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
772 	reg &= ~MDIO_MASTER_SEL;
773 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
774 
775 	return ret & 0xffff;
776 }
777 
778 static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
779 {
780 	/* Intercept reads from the MDIO broadcast address or Broadcom
781 	 * pseudo-PHY address
782 	 */
783 	switch (addr) {
784 	case 0:
785 	case 30:
786 		return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
787 	default:
788 		return 0xffff;
789 	}
790 }
791 
792 static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
793 				u16 val)
794 {
795 	/* Intercept writes to the MDIO broadcast address or Broadcom
796 	 * pseudo-PHY address
797 	 */
798 	switch (addr) {
799 	case 0:
800 	case 30:
801 		bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
802 		break;
803 	}
804 
805 	return 0;
806 }
807 
808 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
809 				   struct phy_device *phydev)
810 {
811 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
812 	u32 id_mode_dis = 0, port_mode;
813 	const char *str = NULL;
814 	u32 reg;
815 
816 	switch (phydev->interface) {
817 	case PHY_INTERFACE_MODE_RGMII:
818 		str = "RGMII (no delay)";
819 		id_mode_dis = 1;
820 	case PHY_INTERFACE_MODE_RGMII_TXID:
821 		if (!str)
822 			str = "RGMII (TX delay)";
823 		port_mode = EXT_GPHY;
824 		break;
825 	case PHY_INTERFACE_MODE_MII:
826 		str = "MII";
827 		port_mode = EXT_EPHY;
828 		break;
829 	case PHY_INTERFACE_MODE_REVMII:
830 		str = "Reverse MII";
831 		port_mode = EXT_REVMII;
832 		break;
833 	default:
834 		/* All other PHYs: internal and MoCA */
835 		goto force_link;
836 	}
837 
838 	/* If the link is down, just disable the interface to conserve power */
839 	if (!phydev->link) {
840 		reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
841 		reg &= ~RGMII_MODE_EN;
842 		reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
843 		goto force_link;
844 	}
845 
846 	/* Clear id_mode_dis bit, and the existing port mode, but
847 	 * make sure we enable the RGMII block for data to pass
848 	 */
849 	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
850 	reg &= ~ID_MODE_DIS;
851 	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
852 	reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
853 
854 	reg |= port_mode | RGMII_MODE_EN;
855 	if (id_mode_dis)
856 		reg |= ID_MODE_DIS;
857 
858 	if (phydev->pause) {
859 		if (phydev->asym_pause)
860 			reg |= TX_PAUSE_EN;
861 		reg |= RX_PAUSE_EN;
862 	}
863 
864 	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
865 
866 	pr_info("Port %d configured for %s\n", port, str);
867 
868 force_link:
869 	/* Force link settings detected from the PHY */
870 	reg = SW_OVERRIDE;
871 	switch (phydev->speed) {
872 	case SPEED_1000:
873 		reg |= SPDSTS_1000 << SPEED_SHIFT;
874 		break;
875 	case SPEED_100:
876 		reg |= SPDSTS_100 << SPEED_SHIFT;
877 		break;
878 	}
879 
880 	if (phydev->link)
881 		reg |= LINK_STS;
882 	if (phydev->duplex == DUPLEX_FULL)
883 		reg |= DUPLX_MODE;
884 
885 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
886 }
887 
888 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
889 					 struct fixed_phy_status *status)
890 {
891 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
892 	u32 duplex, pause, speed;
893 	u32 reg;
894 
895 	duplex = core_readl(priv, CORE_DUPSTS);
896 	pause = core_readl(priv, CORE_PAUSESTS);
897 	speed = core_readl(priv, CORE_SPDSTS);
898 
899 	speed >>= (port * SPDSTS_SHIFT);
900 	speed &= SPDSTS_MASK;
901 
902 	status->link = 0;
903 
904 	/* Port 7 is special as we do not get link status from CORE_LNKSTS,
905 	 * which means that we need to force the link at the port override
906 	 * level to get the data to flow. We do use what the interrupt handler
907 	 * did determine before.
908 	 *
909 	 * For the other ports, we just force the link status, since this is
910 	 * a fixed PHY device.
911 	 */
912 	if (port == 7) {
913 		status->link = priv->port_sts[port].link;
914 		status->duplex = 1;
915 	} else {
916 		status->link = 1;
917 		status->duplex = !!(duplex & (1 << port));
918 	}
919 
920 	reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
921 	reg |= SW_OVERRIDE;
922 	if (status->link)
923 		reg |= LINK_STS;
924 	else
925 		reg &= ~LINK_STS;
926 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
927 
928 	switch (speed) {
929 	case SPDSTS_10:
930 		status->speed = SPEED_10;
931 		break;
932 	case SPDSTS_100:
933 		status->speed = SPEED_100;
934 		break;
935 	case SPDSTS_1000:
936 		status->speed = SPEED_1000;
937 		break;
938 	}
939 
940 	if ((pause & (1 << port)) &&
941 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
942 		status->asym_pause = 1;
943 		status->pause = 1;
944 	}
945 
946 	if (pause & (1 << port))
947 		status->pause = 1;
948 }
949 
950 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
951 {
952 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
953 	unsigned int port;
954 
955 	bcm_sf2_intr_disable(priv);
956 
957 	/* Disable all ports physically present including the IMP
958 	 * port, the other ones have already been disabled during
959 	 * bcm_sf2_sw_setup
960 	 */
961 	for (port = 0; port < DSA_MAX_PORTS; port++) {
962 		if ((1 << port) & ds->phys_port_mask ||
963 		    dsa_is_cpu_port(ds, port))
964 			bcm_sf2_port_disable(ds, port, NULL);
965 	}
966 
967 	return 0;
968 }
969 
970 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
971 {
972 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
973 	unsigned int port;
974 	int ret;
975 
976 	ret = bcm_sf2_sw_rst(priv);
977 	if (ret) {
978 		pr_err("%s: failed to software reset switch\n", __func__);
979 		return ret;
980 	}
981 
982 	if (priv->hw_params.num_gphy == 1)
983 		bcm_sf2_gphy_enable_set(ds, true);
984 
985 	for (port = 0; port < DSA_MAX_PORTS; port++) {
986 		if ((1 << port) & ds->phys_port_mask)
987 			bcm_sf2_port_setup(ds, port, NULL);
988 		else if (dsa_is_cpu_port(ds, port))
989 			bcm_sf2_imp_setup(ds, port);
990 	}
991 
992 	return 0;
993 }
994 
995 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
996 			       struct ethtool_wolinfo *wol)
997 {
998 	struct net_device *p = ds->dst[ds->index].master_netdev;
999 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1000 	struct ethtool_wolinfo pwol;
1001 
1002 	/* Get the parent device WoL settings */
1003 	p->ethtool_ops->get_wol(p, &pwol);
1004 
1005 	/* Advertise the parent device supported settings */
1006 	wol->supported = pwol.supported;
1007 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1008 
1009 	if (pwol.wolopts & WAKE_MAGICSECURE)
1010 		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1011 
1012 	if (priv->wol_ports_mask & (1 << port))
1013 		wol->wolopts = pwol.wolopts;
1014 	else
1015 		wol->wolopts = 0;
1016 }
1017 
1018 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1019 			      struct ethtool_wolinfo *wol)
1020 {
1021 	struct net_device *p = ds->dst[ds->index].master_netdev;
1022 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1023 	s8 cpu_port = ds->dst[ds->index].cpu_port;
1024 	struct ethtool_wolinfo pwol;
1025 
1026 	p->ethtool_ops->get_wol(p, &pwol);
1027 	if (wol->wolopts & ~pwol.supported)
1028 		return -EINVAL;
1029 
1030 	if (wol->wolopts)
1031 		priv->wol_ports_mask |= (1 << port);
1032 	else
1033 		priv->wol_ports_mask &= ~(1 << port);
1034 
1035 	/* If we have at least one port enabled, make sure the CPU port
1036 	 * is also enabled. If the CPU port is the last one enabled, we disable
1037 	 * it since this configuration does not make sense.
1038 	 */
1039 	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1040 		priv->wol_ports_mask |= (1 << cpu_port);
1041 	else
1042 		priv->wol_ports_mask &= ~(1 << cpu_port);
1043 
1044 	return p->ethtool_ops->set_wol(p, wol);
1045 }
1046 
1047 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1048 	.tag_protocol		= DSA_TAG_PROTO_BRCM,
1049 	.priv_size		= sizeof(struct bcm_sf2_priv),
1050 	.probe			= bcm_sf2_sw_probe,
1051 	.setup			= bcm_sf2_sw_setup,
1052 	.set_addr		= bcm_sf2_sw_set_addr,
1053 	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
1054 	.phy_read		= bcm_sf2_sw_phy_read,
1055 	.phy_write		= bcm_sf2_sw_phy_write,
1056 	.get_strings		= bcm_sf2_sw_get_strings,
1057 	.get_ethtool_stats	= bcm_sf2_sw_get_ethtool_stats,
1058 	.get_sset_count		= bcm_sf2_sw_get_sset_count,
1059 	.adjust_link		= bcm_sf2_sw_adjust_link,
1060 	.fixed_link_update	= bcm_sf2_sw_fixed_link_update,
1061 	.suspend		= bcm_sf2_sw_suspend,
1062 	.resume			= bcm_sf2_sw_resume,
1063 	.get_wol		= bcm_sf2_sw_get_wol,
1064 	.set_wol		= bcm_sf2_sw_set_wol,
1065 	.port_enable		= bcm_sf2_port_setup,
1066 	.port_disable		= bcm_sf2_port_disable,
1067 	.get_eee		= bcm_sf2_sw_get_eee,
1068 	.set_eee		= bcm_sf2_sw_set_eee,
1069 	.port_join_bridge	= bcm_sf2_sw_br_join,
1070 	.port_leave_bridge	= bcm_sf2_sw_br_leave,
1071 	.port_stp_update	= bcm_sf2_sw_br_set_stp_state,
1072 };
1073 
1074 static int __init bcm_sf2_init(void)
1075 {
1076 	register_switch_driver(&bcm_sf2_switch_driver);
1077 
1078 	return 0;
1079 }
1080 module_init(bcm_sf2_init);
1081 
1082 static void __exit bcm_sf2_exit(void)
1083 {
1084 	unregister_switch_driver(&bcm_sf2_switch_driver);
1085 }
1086 module_exit(bcm_sf2_exit);
1087 
1088 MODULE_AUTHOR("Broadcom Corporation");
1089 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1090 MODULE_LICENSE("GPL");
1091 MODULE_ALIAS("platform:brcm-sf2");
1092