xref: /openbmc/linux/drivers/net/dsa/xrs700x/xrs700x.c (revision 941518d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 NovaTech LLC
4  * George McCollister <george.mccollister@gmail.com>
5  */
6 
7 #include <net/dsa.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_bridge.h>
10 #include <linux/of_device.h>
11 #include <linux/netdev_features.h>
12 #include <linux/if_hsr.h>
13 #include "xrs700x.h"
14 #include "xrs700x_reg.h"
15 
16 #define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
17 
18 #define XRS7000X_SUPPORTED_HSR_FEATURES \
19 	(NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
20 	 NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
21 
22 #define XRS7003E_ID	0x100
23 #define XRS7003F_ID	0x101
24 #define XRS7004E_ID	0x200
25 #define XRS7004F_ID	0x201
26 
27 const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
28 EXPORT_SYMBOL(xrs7003e_info);
29 
30 const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
31 EXPORT_SYMBOL(xrs7003f_info);
32 
33 const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
34 EXPORT_SYMBOL(xrs7004e_info);
35 
36 const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
37 EXPORT_SYMBOL(xrs7004f_info);
38 
39 struct xrs700x_regfield {
40 	struct reg_field rf;
41 	struct regmap_field **rmf;
42 };
43 
44 struct xrs700x_mib {
45 	unsigned int offset;
46 	const char *name;
47 	int stats64_offset;
48 };
49 
50 #define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
51 #define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
52 
53 static const struct xrs700x_mib xrs700x_mibs[] = {
54 	XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
55 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
56 	XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
57 	XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
58 	XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
59 	XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
60 	XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
61 	XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
62 	XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
63 	XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
64 	XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
65 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
66 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
67 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
68 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
69 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
70 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
71 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
72 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
73 	XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
74 	XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
75 	XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
76 	XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
77 	XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
78 	XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
79 	XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
80 	XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
81 };
82 
83 static const u8 eth_hsrsup_addr[ETH_ALEN] = {
84 	0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
85 
86 static void xrs700x_get_strings(struct dsa_switch *ds, int port,
87 				u32 stringset, u8 *data)
88 {
89 	int i;
90 
91 	if (stringset != ETH_SS_STATS)
92 		return;
93 
94 	for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
95 		strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
96 		data += ETH_GSTRING_LEN;
97 	}
98 }
99 
100 static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
101 {
102 	if (sset != ETH_SS_STATS)
103 		return -EOPNOTSUPP;
104 
105 	return ARRAY_SIZE(xrs700x_mibs);
106 }
107 
108 static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
109 {
110 	struct xrs700x_port *p = &priv->ports[port];
111 	struct rtnl_link_stats64 stats;
112 	int i;
113 
114 	memset(&stats, 0, sizeof(stats));
115 
116 	mutex_lock(&p->mib_mutex);
117 
118 	/* Capture counter values */
119 	regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
120 
121 	for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
122 		unsigned int high = 0, low = 0, reg;
123 
124 		reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
125 		regmap_read(priv->regmap, reg, &low);
126 		regmap_read(priv->regmap, reg + 2, &high);
127 
128 		p->mib_data[i] += (high << 16) | low;
129 
130 		if (xrs700x_mibs[i].stats64_offset >= 0) {
131 			u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
132 			*(u64 *)s += p->mib_data[i];
133 		}
134 	}
135 
136 	/* multicast must be added to rx_packets (which already includes
137 	 * unicast and broadcast)
138 	 */
139 	stats.rx_packets += stats.multicast;
140 
141 	u64_stats_update_begin(&p->syncp);
142 	p->stats64 = stats;
143 	u64_stats_update_end(&p->syncp);
144 
145 	mutex_unlock(&p->mib_mutex);
146 }
147 
148 static void xrs700x_mib_work(struct work_struct *work)
149 {
150 	struct xrs700x *priv = container_of(work, struct xrs700x,
151 					    mib_work.work);
152 	int i;
153 
154 	for (i = 0; i < priv->ds->num_ports; i++)
155 		xrs700x_read_port_counters(priv, i);
156 
157 	schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
158 }
159 
160 static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
161 				      u64 *data)
162 {
163 	struct xrs700x *priv = ds->priv;
164 	struct xrs700x_port *p = &priv->ports[port];
165 
166 	xrs700x_read_port_counters(priv, port);
167 
168 	mutex_lock(&p->mib_mutex);
169 	memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
170 	mutex_unlock(&p->mib_mutex);
171 }
172 
173 static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
174 				struct rtnl_link_stats64 *s)
175 {
176 	struct xrs700x *priv = ds->priv;
177 	struct xrs700x_port *p = &priv->ports[port];
178 	unsigned int start;
179 
180 	do {
181 		start = u64_stats_fetch_begin(&p->syncp);
182 		*s = p->stats64;
183 	} while (u64_stats_fetch_retry(&p->syncp, start));
184 }
185 
186 static int xrs700x_setup_regmap_range(struct xrs700x *priv)
187 {
188 	struct xrs700x_regfield regfields[] = {
189 		{
190 			.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
191 					   priv->ds->num_ports,
192 					   XRS_PORT_OFFSET),
193 			.rmf = &priv->ps_forward
194 		},
195 		{
196 			.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
197 					   priv->ds->num_ports,
198 					   XRS_PORT_OFFSET),
199 			.rmf = &priv->ps_management
200 		},
201 		{
202 			.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
203 					   priv->ds->num_ports,
204 					   XRS_PORT_OFFSET),
205 			.rmf = &priv->ps_sel_speed
206 		},
207 		{
208 			.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
209 					   priv->ds->num_ports,
210 					   XRS_PORT_OFFSET),
211 			.rmf = &priv->ps_cur_speed
212 		}
213 	};
214 	int i = 0;
215 
216 	for (; i < ARRAY_SIZE(regfields); i++) {
217 		*regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
218 							    priv->regmap,
219 							    regfields[i].rf);
220 		if (IS_ERR(*regfields[i].rmf))
221 			return PTR_ERR(*regfields[i].rmf);
222 	}
223 
224 	return 0;
225 }
226 
227 static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
228 						      int port,
229 						      enum dsa_tag_protocol m)
230 {
231 	return DSA_TAG_PROTO_XRS700X;
232 }
233 
234 static int xrs700x_reset(struct dsa_switch *ds)
235 {
236 	struct xrs700x *priv = ds->priv;
237 	unsigned int val;
238 	int ret;
239 
240 	ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
241 	if (ret)
242 		goto error;
243 
244 	ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
245 				       val, !(val & XRS_GENERAL_RESET),
246 				       10, 1000);
247 error:
248 	if (ret) {
249 		dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
250 				    ret);
251 	}
252 
253 	return ret;
254 }
255 
256 static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
257 				       u8 state)
258 {
259 	struct xrs700x *priv = ds->priv;
260 	unsigned int bpdus = 1;
261 	unsigned int val;
262 
263 	switch (state) {
264 	case BR_STATE_DISABLED:
265 		bpdus = 0;
266 		fallthrough;
267 	case BR_STATE_BLOCKING:
268 	case BR_STATE_LISTENING:
269 		val = XRS_PORT_DISABLED;
270 		break;
271 	case BR_STATE_LEARNING:
272 		val = XRS_PORT_LEARNING;
273 		break;
274 	case BR_STATE_FORWARDING:
275 		val = XRS_PORT_FORWARDING;
276 		break;
277 	default:
278 		dev_err(ds->dev, "invalid STP state: %d\n", state);
279 		return;
280 	}
281 
282 	regmap_fields_write(priv->ps_forward, port, val);
283 
284 	/* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
285 	 * which allows BPDU forwarding to the CPU port when the front facing
286 	 * port is in disabled/learning state.
287 	 */
288 	regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
289 
290 	dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
291 			    __func__, port, state, val);
292 }
293 
294 /* Add an inbound policy filter which matches the BPDU destination MAC
295  * and forwards to the CPU port. Leave the policy disabled, it will be
296  * enabled as needed.
297  */
298 static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
299 {
300 	struct xrs700x *priv = ds->priv;
301 	unsigned int val = 0;
302 	int i = 0;
303 	int ret;
304 
305 	/* Compare all 48 bits of the destination MAC address. */
306 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
307 	if (ret)
308 		return ret;
309 
310 	/* match BPDU destination 01:80:c2:00:00:00 */
311 	for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
312 		ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
313 				   eth_stp_addr[i] |
314 				   (eth_stp_addr[i + 1] << 8));
315 		if (ret)
316 			return ret;
317 	}
318 
319 	/* Mirror BPDU to CPU port */
320 	for (i = 0; i < ds->num_ports; i++) {
321 		if (dsa_is_cpu_port(ds, i))
322 			val |= BIT(i);
323 	}
324 
325 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
326 	if (ret)
327 		return ret;
328 
329 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
330 	if (ret)
331 		return ret;
332 
333 	return 0;
334 }
335 
336 /* Add an inbound policy filter which matches the HSR/PRP supervision MAC
337  * range and forwards to the CPU port without discarding duplicates.
338  * This is required to correctly populate the HSR/PRP node_table.
339  * Leave the policy disabled, it will be enabled as needed.
340  */
341 static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
342 				       int fwdport)
343 {
344 	struct xrs700x *priv = ds->priv;
345 	unsigned int val = 0;
346 	int i = 0;
347 	int ret;
348 
349 	/* Compare 40 bits of the destination MAC address. */
350 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
351 	if (ret)
352 		return ret;
353 
354 	/* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
355 	for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
356 		ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
357 				   eth_hsrsup_addr[i] |
358 				   (eth_hsrsup_addr[i + 1] << 8));
359 		if (ret)
360 			return ret;
361 	}
362 
363 	/* Mirror HSR/PRP supervision to CPU port */
364 	for (i = 0; i < ds->num_ports; i++) {
365 		if (dsa_is_cpu_port(ds, i))
366 			val |= BIT(i);
367 	}
368 
369 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
370 	if (ret)
371 		return ret;
372 
373 	if (fwdport >= 0)
374 		val |= BIT(fwdport);
375 
376 	/* Allow must be set prevent duplicate discard */
377 	ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
378 	if (ret)
379 		return ret;
380 
381 	return 0;
382 }
383 
384 static int xrs700x_port_setup(struct dsa_switch *ds, int port)
385 {
386 	bool cpu_port = dsa_is_cpu_port(ds, port);
387 	struct xrs700x *priv = ds->priv;
388 	unsigned int val = 0;
389 	int ret, i;
390 
391 	xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
392 
393 	/* Disable forwarding to non-CPU ports */
394 	for (i = 0; i < ds->num_ports; i++) {
395 		if (!dsa_is_cpu_port(ds, i))
396 			val |= BIT(i);
397 	}
398 
399 	/* 1 = Disable forwarding to the port */
400 	ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
401 	if (ret)
402 		return ret;
403 
404 	val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
405 	ret = regmap_fields_write(priv->ps_management, port, val);
406 	if (ret)
407 		return ret;
408 
409 	if (!cpu_port) {
410 		ret = xrs700x_port_add_bpdu_ipf(ds, port);
411 		if (ret)
412 			return ret;
413 	}
414 
415 	return 0;
416 }
417 
418 static int xrs700x_setup(struct dsa_switch *ds)
419 {
420 	struct xrs700x *priv = ds->priv;
421 	int ret, i;
422 
423 	ret = xrs700x_reset(ds);
424 	if (ret)
425 		return ret;
426 
427 	for (i = 0; i < ds->num_ports; i++) {
428 		ret = xrs700x_port_setup(ds, i);
429 		if (ret)
430 			return ret;
431 	}
432 
433 	schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
434 
435 	return 0;
436 }
437 
438 static void xrs700x_teardown(struct dsa_switch *ds)
439 {
440 	struct xrs700x *priv = ds->priv;
441 
442 	cancel_delayed_work_sync(&priv->mib_work);
443 }
444 
445 static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
446 				     unsigned long *supported,
447 				     struct phylink_link_state *state)
448 {
449 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
450 
451 	switch (port) {
452 	case 0:
453 		break;
454 	case 1:
455 	case 2:
456 	case 3:
457 		phylink_set(mask, 1000baseT_Full);
458 		break;
459 	default:
460 		linkmode_zero(supported);
461 		dev_err(ds->dev, "Unsupported port: %i\n", port);
462 		return;
463 	}
464 
465 	phylink_set_port_modes(mask);
466 
467 	/* The switch only supports full duplex. */
468 	phylink_set(mask, 10baseT_Full);
469 	phylink_set(mask, 100baseT_Full);
470 
471 	linkmode_and(supported, supported, mask);
472 	linkmode_and(state->advertising, state->advertising, mask);
473 }
474 
475 static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
476 				unsigned int mode, phy_interface_t interface,
477 				struct phy_device *phydev,
478 				int speed, int duplex,
479 				bool tx_pause, bool rx_pause)
480 {
481 	struct xrs700x *priv = ds->priv;
482 	unsigned int val;
483 
484 	switch (speed) {
485 	case SPEED_1000:
486 		val = XRS_PORT_SPEED_1000;
487 		break;
488 	case SPEED_100:
489 		val = XRS_PORT_SPEED_100;
490 		break;
491 	case SPEED_10:
492 		val = XRS_PORT_SPEED_10;
493 		break;
494 	default:
495 		return;
496 	}
497 
498 	regmap_fields_write(priv->ps_sel_speed, port, val);
499 
500 	dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
501 			    __func__, port, mode, speed);
502 }
503 
504 static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
505 				 struct dsa_bridge bridge, bool join)
506 {
507 	unsigned int i, cpu_mask = 0, mask = 0;
508 	struct xrs700x *priv = ds->priv;
509 	int ret;
510 
511 	for (i = 0; i < ds->num_ports; i++) {
512 		if (dsa_is_cpu_port(ds, i))
513 			continue;
514 
515 		cpu_mask |= BIT(i);
516 
517 		if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
518 			continue;
519 
520 		mask |= BIT(i);
521 	}
522 
523 	for (i = 0; i < ds->num_ports; i++) {
524 		if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
525 			continue;
526 
527 		/* 1 = Disable forwarding to the port */
528 		ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
529 		if (ret)
530 			return ret;
531 	}
532 
533 	if (!join) {
534 		ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
535 				   cpu_mask);
536 		if (ret)
537 			return ret;
538 	}
539 
540 	return 0;
541 }
542 
543 static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
544 			       struct dsa_bridge bridge, bool *tx_fwd_offload)
545 {
546 	return xrs700x_bridge_common(ds, port, bridge, true);
547 }
548 
549 static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
550 				 struct dsa_bridge bridge)
551 {
552 	xrs700x_bridge_common(ds, port, bridge, false);
553 }
554 
555 static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
556 			    struct net_device *hsr)
557 {
558 	unsigned int val = XRS_HSR_CFG_HSR_PRP;
559 	struct dsa_port *partner = NULL, *dp;
560 	struct xrs700x *priv = ds->priv;
561 	struct net_device *slave;
562 	int ret, i, hsr_pair[2];
563 	enum hsr_version ver;
564 	bool fwd = false;
565 
566 	ret = hsr_get_version(hsr, &ver);
567 	if (ret)
568 		return ret;
569 
570 	/* Only ports 1 and 2 can be HSR/PRP redundant ports. */
571 	if (port != 1 && port != 2)
572 		return -EOPNOTSUPP;
573 
574 	if (ver == HSR_V1)
575 		val |= XRS_HSR_CFG_HSR;
576 	else if (ver == PRP_V1)
577 		val |= XRS_HSR_CFG_PRP;
578 	else
579 		return -EOPNOTSUPP;
580 
581 	dsa_hsr_foreach_port(dp, ds, hsr) {
582 		if (dp->index != port) {
583 			partner = dp;
584 			break;
585 		}
586 	}
587 
588 	/* We can't enable redundancy on the switch until both
589 	 * redundant ports have signed up.
590 	 */
591 	if (!partner)
592 		return 0;
593 
594 	regmap_fields_write(priv->ps_forward, partner->index,
595 			    XRS_PORT_DISABLED);
596 	regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
597 
598 	regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
599 		     val | XRS_HSR_CFG_LANID_A);
600 	regmap_write(priv->regmap, XRS_HSR_CFG(port),
601 		     val | XRS_HSR_CFG_LANID_B);
602 
603 	/* Clear bits for both redundant ports (HSR only) and the CPU port to
604 	 * enable forwarding.
605 	 */
606 	val = GENMASK(ds->num_ports - 1, 0);
607 	if (ver == HSR_V1) {
608 		val &= ~BIT(partner->index);
609 		val &= ~BIT(port);
610 		fwd = true;
611 	}
612 	val &= ~BIT(dsa_upstream_port(ds, port));
613 	regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
614 	regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
615 
616 	regmap_fields_write(priv->ps_forward, partner->index,
617 			    XRS_PORT_FORWARDING);
618 	regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
619 
620 	/* Enable inbound policy which allows HSR/PRP supervision forwarding
621 	 * to the CPU port without discarding duplicates. Continue to
622 	 * forward to redundant ports when in HSR mode while discarding
623 	 * duplicates.
624 	 */
625 	ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
626 	if (ret)
627 		return ret;
628 
629 	ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
630 	if (ret)
631 		return ret;
632 
633 	regmap_update_bits(priv->regmap,
634 			   XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
635 	regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
636 
637 	hsr_pair[0] = port;
638 	hsr_pair[1] = partner->index;
639 	for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
640 		slave = dsa_to_port(ds, hsr_pair[i])->slave;
641 		slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
642 	}
643 
644 	return 0;
645 }
646 
647 static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
648 			     struct net_device *hsr)
649 {
650 	struct dsa_port *partner = NULL, *dp;
651 	struct xrs700x *priv = ds->priv;
652 	struct net_device *slave;
653 	int i, hsr_pair[2];
654 	unsigned int val;
655 
656 	dsa_hsr_foreach_port(dp, ds, hsr) {
657 		if (dp->index != port) {
658 			partner = dp;
659 			break;
660 		}
661 	}
662 
663 	if (!partner)
664 		return 0;
665 
666 	regmap_fields_write(priv->ps_forward, partner->index,
667 			    XRS_PORT_DISABLED);
668 	regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
669 
670 	regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
671 	regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
672 
673 	/* Clear bit for the CPU port to enable forwarding. */
674 	val = GENMASK(ds->num_ports - 1, 0);
675 	val &= ~BIT(dsa_upstream_port(ds, port));
676 	regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
677 	regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
678 
679 	regmap_fields_write(priv->ps_forward, partner->index,
680 			    XRS_PORT_FORWARDING);
681 	regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
682 
683 	/* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
684 	 * which allows HSR/PRP supervision forwarding to the CPU port without
685 	 * discarding duplicates.
686 	 */
687 	regmap_update_bits(priv->regmap,
688 			   XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
689 	regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
690 
691 	hsr_pair[0] = port;
692 	hsr_pair[1] = partner->index;
693 	for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
694 		slave = dsa_to_port(ds, hsr_pair[i])->slave;
695 		slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
696 	}
697 
698 	return 0;
699 }
700 
701 static const struct dsa_switch_ops xrs700x_ops = {
702 	.get_tag_protocol	= xrs700x_get_tag_protocol,
703 	.setup			= xrs700x_setup,
704 	.teardown		= xrs700x_teardown,
705 	.port_stp_state_set	= xrs700x_port_stp_state_set,
706 	.phylink_validate	= xrs700x_phylink_validate,
707 	.phylink_mac_link_up	= xrs700x_mac_link_up,
708 	.get_strings		= xrs700x_get_strings,
709 	.get_sset_count		= xrs700x_get_sset_count,
710 	.get_ethtool_stats	= xrs700x_get_ethtool_stats,
711 	.get_stats64		= xrs700x_get_stats64,
712 	.port_bridge_join	= xrs700x_bridge_join,
713 	.port_bridge_leave	= xrs700x_bridge_leave,
714 	.port_hsr_join		= xrs700x_hsr_join,
715 	.port_hsr_leave		= xrs700x_hsr_leave,
716 };
717 
718 static int xrs700x_detect(struct xrs700x *priv)
719 {
720 	const struct xrs700x_info *info;
721 	unsigned int id;
722 	int ret;
723 
724 	ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
725 	if (ret) {
726 		dev_err(priv->dev, "error %d while reading switch id.\n",
727 			ret);
728 		return ret;
729 	}
730 
731 	info = of_device_get_match_data(priv->dev);
732 	if (!info)
733 		return -EINVAL;
734 
735 	if (info->id == id) {
736 		priv->ds->num_ports = info->num_ports;
737 		dev_info(priv->dev, "%s detected.\n", info->name);
738 		return 0;
739 	}
740 
741 	dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
742 		info->id, id);
743 
744 	return -ENODEV;
745 }
746 
747 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
748 {
749 	struct dsa_switch *ds;
750 	struct xrs700x *priv;
751 
752 	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
753 	if (!ds)
754 		return NULL;
755 
756 	ds->dev = base;
757 
758 	priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
759 	if (!priv)
760 		return NULL;
761 
762 	INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
763 
764 	ds->ops = &xrs700x_ops;
765 	ds->priv = priv;
766 	priv->dev = base;
767 
768 	priv->ds = ds;
769 	priv->priv = devpriv;
770 
771 	return priv;
772 }
773 EXPORT_SYMBOL(xrs700x_switch_alloc);
774 
775 static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
776 {
777 	struct xrs700x_port *p = &priv->ports[port];
778 
779 	p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
780 				   sizeof(*p->mib_data), GFP_KERNEL);
781 	if (!p->mib_data)
782 		return -ENOMEM;
783 
784 	mutex_init(&p->mib_mutex);
785 	u64_stats_init(&p->syncp);
786 
787 	return 0;
788 }
789 
790 int xrs700x_switch_register(struct xrs700x *priv)
791 {
792 	int ret;
793 	int i;
794 
795 	ret = xrs700x_detect(priv);
796 	if (ret)
797 		return ret;
798 
799 	ret = xrs700x_setup_regmap_range(priv);
800 	if (ret)
801 		return ret;
802 
803 	priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
804 				   sizeof(*priv->ports), GFP_KERNEL);
805 	if (!priv->ports)
806 		return -ENOMEM;
807 
808 	for (i = 0; i < priv->ds->num_ports; i++) {
809 		ret = xrs700x_alloc_port_mib(priv, i);
810 		if (ret)
811 			return ret;
812 	}
813 
814 	return dsa_register_switch(priv->ds);
815 }
816 EXPORT_SYMBOL(xrs700x_switch_register);
817 
818 void xrs700x_switch_remove(struct xrs700x *priv)
819 {
820 	dsa_unregister_switch(priv->ds);
821 }
822 EXPORT_SYMBOL(xrs700x_switch_remove);
823 
824 void xrs700x_switch_shutdown(struct xrs700x *priv)
825 {
826 	dsa_switch_shutdown(priv->ds);
827 }
828 EXPORT_SYMBOL(xrs700x_switch_shutdown);
829 
830 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
831 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
832 MODULE_LICENSE("GPL v2");
833