1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 
26 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
27 			     unsigned int startup_delay)
28 {
29 	gpiod_set_value_cansleep(gpio, 1);
30 	/* Wait for minimum reset pulse length */
31 	msleep(pulse_len);
32 	gpiod_set_value_cansleep(gpio, 0);
33 	/* Wait until chip is ready after reset */
34 	msleep(startup_delay);
35 }
36 
37 static void
38 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
39 			   int from, int to, bool allow)
40 {
41 	if (allow) {
42 		l2_fwd[from].bc_domain  |= BIT(to);
43 		l2_fwd[from].reach_port |= BIT(to);
44 		l2_fwd[from].fl_domain  |= BIT(to);
45 	} else {
46 		l2_fwd[from].bc_domain  &= ~BIT(to);
47 		l2_fwd[from].reach_port &= ~BIT(to);
48 		l2_fwd[from].fl_domain  &= ~BIT(to);
49 	}
50 }
51 
52 /* Structure used to temporarily transport device tree
53  * settings into sja1105_setup
54  */
55 struct sja1105_dt_port {
56 	phy_interface_t phy_mode;
57 	sja1105_mii_role_t role;
58 };
59 
60 static int sja1105_init_mac_settings(struct sja1105_private *priv)
61 {
62 	struct sja1105_mac_config_entry default_mac = {
63 		/* Enable all 8 priority queues on egress.
64 		 * Every queue i holds top[i] - base[i] frames.
65 		 * Sum of top[i] - base[i] is 511 (max hardware limit).
66 		 */
67 		.top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68 		.base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69 		.enabled = {true, true, true, true, true, true, true, true},
70 		/* Keep standard IFG of 12 bytes on egress. */
71 		.ifg = 0,
72 		/* Always put the MAC speed in automatic mode, where it can be
73 		 * retrieved from the PHY object through phylib and
74 		 * sja1105_adjust_port_config.
75 		 */
76 		.speed = SJA1105_SPEED_AUTO,
77 		/* No static correction for 1-step 1588 events */
78 		.tp_delin = 0,
79 		.tp_delout = 0,
80 		/* Disable aging for critical TTEthernet traffic */
81 		.maxage = 0xFF,
82 		/* Internal VLAN (pvid) to apply to untagged ingress */
83 		.vlanprio = 0,
84 		.vlanid = 0,
85 		.ing_mirr = false,
86 		.egr_mirr = false,
87 		/* Don't drop traffic with other EtherType than ETH_P_IP */
88 		.drpnona664 = false,
89 		/* Don't drop double-tagged traffic */
90 		.drpdtag = false,
91 		/* Don't drop untagged traffic */
92 		.drpuntag = false,
93 		/* Don't retag 802.1p (VID 0) traffic with the pvid */
94 		.retag = false,
95 		/* Disable learning and I/O on user ports by default -
96 		 * STP will enable it.
97 		 */
98 		.dyn_learn = false,
99 		.egress = false,
100 		.ingress = false,
101 	};
102 	struct sja1105_mac_config_entry *mac;
103 	struct sja1105_table *table;
104 	int i;
105 
106 	table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107 
108 	/* Discard previous MAC Configuration Table */
109 	if (table->entry_count) {
110 		kfree(table->entries);
111 		table->entry_count = 0;
112 	}
113 
114 	table->entries = kcalloc(SJA1105_NUM_PORTS,
115 				 table->ops->unpacked_entry_size, GFP_KERNEL);
116 	if (!table->entries)
117 		return -ENOMEM;
118 
119 	/* Override table based on phylib DT bindings */
120 	table->entry_count = SJA1105_NUM_PORTS;
121 
122 	mac = table->entries;
123 
124 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
125 		mac[i] = default_mac;
126 		if (i == dsa_upstream_port(priv->ds, i)) {
127 			/* STP doesn't get called for CPU port, so we need to
128 			 * set the I/O parameters statically.
129 			 */
130 			mac[i].dyn_learn = true;
131 			mac[i].ingress = true;
132 			mac[i].egress = true;
133 		}
134 	}
135 
136 	return 0;
137 }
138 
139 static int sja1105_init_mii_settings(struct sja1105_private *priv,
140 				     struct sja1105_dt_port *ports)
141 {
142 	struct device *dev = &priv->spidev->dev;
143 	struct sja1105_xmii_params_entry *mii;
144 	struct sja1105_table *table;
145 	int i;
146 
147 	table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
148 
149 	/* Discard previous xMII Mode Parameters Table */
150 	if (table->entry_count) {
151 		kfree(table->entries);
152 		table->entry_count = 0;
153 	}
154 
155 	table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
156 				 table->ops->unpacked_entry_size, GFP_KERNEL);
157 	if (!table->entries)
158 		return -ENOMEM;
159 
160 	/* Override table based on phylib DT bindings */
161 	table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
162 
163 	mii = table->entries;
164 
165 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
166 		switch (ports[i].phy_mode) {
167 		case PHY_INTERFACE_MODE_MII:
168 			mii->xmii_mode[i] = XMII_MODE_MII;
169 			break;
170 		case PHY_INTERFACE_MODE_RMII:
171 			mii->xmii_mode[i] = XMII_MODE_RMII;
172 			break;
173 		case PHY_INTERFACE_MODE_RGMII:
174 		case PHY_INTERFACE_MODE_RGMII_ID:
175 		case PHY_INTERFACE_MODE_RGMII_RXID:
176 		case PHY_INTERFACE_MODE_RGMII_TXID:
177 			mii->xmii_mode[i] = XMII_MODE_RGMII;
178 			break;
179 		default:
180 			dev_err(dev, "Unsupported PHY mode %s!\n",
181 				phy_modes(ports[i].phy_mode));
182 		}
183 
184 		mii->phy_mac[i] = ports[i].role;
185 	}
186 	return 0;
187 }
188 
189 static int sja1105_init_static_fdb(struct sja1105_private *priv)
190 {
191 	struct sja1105_table *table;
192 
193 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
194 
195 	/* We only populate the FDB table through dynamic
196 	 * L2 Address Lookup entries
197 	 */
198 	if (table->entry_count) {
199 		kfree(table->entries);
200 		table->entry_count = 0;
201 	}
202 	return 0;
203 }
204 
205 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
206 {
207 	struct sja1105_table *table;
208 	struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
209 		/* Learned FDB entries are forgotten after 300 seconds */
210 		.maxage = SJA1105_AGEING_TIME_MS(300000),
211 		/* All entries within a FDB bin are available for learning */
212 		.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
213 		/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
214 		.poly = 0x97,
215 		/* This selects between Independent VLAN Learning (IVL) and
216 		 * Shared VLAN Learning (SVL)
217 		 */
218 		.shared_learn = false,
219 		/* Don't discard management traffic based on ENFPORT -
220 		 * we don't perform SMAC port enforcement anyway, so
221 		 * what we are setting here doesn't matter.
222 		 */
223 		.no_enf_hostprt = false,
224 		/* Don't learn SMAC for mac_fltres1 and mac_fltres0.
225 		 * Maybe correlate with no_linklocal_learn from bridge driver?
226 		 */
227 		.no_mgmt_learn = true,
228 	};
229 
230 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
231 
232 	if (table->entry_count) {
233 		kfree(table->entries);
234 		table->entry_count = 0;
235 	}
236 
237 	table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
238 				 table->ops->unpacked_entry_size, GFP_KERNEL);
239 	if (!table->entries)
240 		return -ENOMEM;
241 
242 	table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
243 
244 	/* This table only has a single entry */
245 	((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
246 				default_l2_lookup_params;
247 
248 	return 0;
249 }
250 
251 static int sja1105_init_static_vlan(struct sja1105_private *priv)
252 {
253 	struct sja1105_table *table;
254 	struct sja1105_vlan_lookup_entry pvid = {
255 		.ving_mirr = 0,
256 		.vegr_mirr = 0,
257 		.vmemb_port = 0,
258 		.vlan_bc = 0,
259 		.tag_port = 0,
260 		.vlanid = 0,
261 	};
262 	int i;
263 
264 	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
265 
266 	/* The static VLAN table will only contain the initial pvid of 0.
267 	 * All other VLANs are to be configured through dynamic entries,
268 	 * and kept in the static configuration table as backing memory.
269 	 * The pvid of 0 is sufficient to pass traffic while the ports are
270 	 * standalone and when vlan_filtering is disabled. When filtering
271 	 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
272 	 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
273 	 * vlan' even when vlan_filtering is off, but it has no effect.
274 	 */
275 	if (table->entry_count) {
276 		kfree(table->entries);
277 		table->entry_count = 0;
278 	}
279 
280 	table->entries = kcalloc(1, table->ops->unpacked_entry_size,
281 				 GFP_KERNEL);
282 	if (!table->entries)
283 		return -ENOMEM;
284 
285 	table->entry_count = 1;
286 
287 	/* VLAN ID 0: all DT-defined ports are members; no restrictions on
288 	 * forwarding; always transmit priority-tagged frames as untagged.
289 	 */
290 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
291 		pvid.vmemb_port |= BIT(i);
292 		pvid.vlan_bc |= BIT(i);
293 		pvid.tag_port &= ~BIT(i);
294 	}
295 
296 	((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
297 	return 0;
298 }
299 
300 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
301 {
302 	struct sja1105_l2_forwarding_entry *l2fwd;
303 	struct sja1105_table *table;
304 	int i, j;
305 
306 	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
307 
308 	if (table->entry_count) {
309 		kfree(table->entries);
310 		table->entry_count = 0;
311 	}
312 
313 	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
314 				 table->ops->unpacked_entry_size, GFP_KERNEL);
315 	if (!table->entries)
316 		return -ENOMEM;
317 
318 	table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
319 
320 	l2fwd = table->entries;
321 
322 	/* First 5 entries define the forwarding rules */
323 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
324 		unsigned int upstream = dsa_upstream_port(priv->ds, i);
325 
326 		for (j = 0; j < SJA1105_NUM_TC; j++)
327 			l2fwd[i].vlan_pmap[j] = j;
328 
329 		if (i == upstream)
330 			continue;
331 
332 		sja1105_port_allow_traffic(l2fwd, i, upstream, true);
333 		sja1105_port_allow_traffic(l2fwd, upstream, i, true);
334 	}
335 	/* Next 8 entries define VLAN PCP mapping from ingress to egress.
336 	 * Create a one-to-one mapping.
337 	 */
338 	for (i = 0; i < SJA1105_NUM_TC; i++)
339 		for (j = 0; j < SJA1105_NUM_PORTS; j++)
340 			l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
341 
342 	return 0;
343 }
344 
345 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
346 {
347 	struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
348 		/* Disallow dynamic reconfiguration of vlan_pmap */
349 		.max_dynp = 0,
350 		/* Use a single memory partition for all ingress queues */
351 		.part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
352 	};
353 	struct sja1105_table *table;
354 
355 	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
356 
357 	if (table->entry_count) {
358 		kfree(table->entries);
359 		table->entry_count = 0;
360 	}
361 
362 	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
363 				 table->ops->unpacked_entry_size, GFP_KERNEL);
364 	if (!table->entries)
365 		return -ENOMEM;
366 
367 	table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
368 
369 	/* This table only has a single entry */
370 	((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
371 				default_l2fwd_params;
372 
373 	return 0;
374 }
375 
376 static int sja1105_init_general_params(struct sja1105_private *priv)
377 {
378 	struct sja1105_general_params_entry default_general_params = {
379 		/* Disallow dynamic changing of the mirror port */
380 		.mirr_ptacu = 0,
381 		.switchid = priv->ds->index,
382 		/* Priority queue for link-local frames trapped to CPU */
383 		.hostprio = 0,
384 		.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
385 		.mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
386 		.incl_srcpt1 = true,
387 		.send_meta1  = false,
388 		.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
389 		.mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
390 		.incl_srcpt0 = true,
391 		.send_meta0  = false,
392 		/* The destination for traffic matching mac_fltres1 and
393 		 * mac_fltres0 on all ports except host_port. Such traffic
394 		 * receieved on host_port itself would be dropped, except
395 		 * by installing a temporary 'management route'
396 		 */
397 		.host_port = dsa_upstream_port(priv->ds, 0),
398 		/* Same as host port */
399 		.mirr_port = dsa_upstream_port(priv->ds, 0),
400 		/* Link-local traffic received on casc_port will be forwarded
401 		 * to host_port without embedding the source port and device ID
402 		 * info in the destination MAC address (presumably because it
403 		 * is a cascaded port and a downstream SJA switch already did
404 		 * that). Default to an invalid port (to disable the feature)
405 		 * and overwrite this if we find any DSA (cascaded) ports.
406 		 */
407 		.casc_port = SJA1105_NUM_PORTS,
408 		/* No TTEthernet */
409 		.vllupformat = 0,
410 		.vlmarker = 0,
411 		.vlmask = 0,
412 		/* Only update correctionField for 1-step PTP (L2 transport) */
413 		.ignore2stf = 0,
414 		/* Forcefully disable VLAN filtering by telling
415 		 * the switch that VLAN has a different EtherType.
416 		 */
417 		.tpid = ETH_P_SJA1105,
418 		.tpid2 = ETH_P_SJA1105,
419 	};
420 	struct sja1105_table *table;
421 	int i, k = 0;
422 
423 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
424 		if (dsa_is_dsa_port(priv->ds, i))
425 			default_general_params.casc_port = i;
426 		else if (dsa_is_user_port(priv->ds, i))
427 			priv->ports[i].mgmt_slot = k++;
428 	}
429 
430 	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
431 
432 	if (table->entry_count) {
433 		kfree(table->entries);
434 		table->entry_count = 0;
435 	}
436 
437 	table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
438 				 table->ops->unpacked_entry_size, GFP_KERNEL);
439 	if (!table->entries)
440 		return -ENOMEM;
441 
442 	table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
443 
444 	/* This table only has a single entry */
445 	((struct sja1105_general_params_entry *)table->entries)[0] =
446 				default_general_params;
447 
448 	return 0;
449 }
450 
451 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
452 
453 static inline void
454 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
455 		      int index)
456 {
457 	policing[index].sharindx = index;
458 	policing[index].smax = 65535; /* Burst size in bytes */
459 	policing[index].rate = SJA1105_RATE_MBPS(1000);
460 	policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
461 	policing[index].partition = 0;
462 }
463 
464 static int sja1105_init_l2_policing(struct sja1105_private *priv)
465 {
466 	struct sja1105_l2_policing_entry *policing;
467 	struct sja1105_table *table;
468 	int i, j, k;
469 
470 	table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
471 
472 	/* Discard previous L2 Policing Table */
473 	if (table->entry_count) {
474 		kfree(table->entries);
475 		table->entry_count = 0;
476 	}
477 
478 	table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
479 				 table->ops->unpacked_entry_size, GFP_KERNEL);
480 	if (!table->entries)
481 		return -ENOMEM;
482 
483 	table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
484 
485 	policing = table->entries;
486 
487 	/* k sweeps through all unicast policers (0-39).
488 	 * bcast sweeps through policers 40-44.
489 	 */
490 	for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
491 		int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
492 
493 		for (j = 0; j < SJA1105_NUM_TC; j++, k++)
494 			sja1105_setup_policer(policing, k);
495 
496 		/* Set up this port's policer for broadcast traffic */
497 		sja1105_setup_policer(policing, bcast);
498 	}
499 	return 0;
500 }
501 
502 static int sja1105_static_config_load(struct sja1105_private *priv,
503 				      struct sja1105_dt_port *ports)
504 {
505 	int rc;
506 
507 	sja1105_static_config_free(&priv->static_config);
508 	rc = sja1105_static_config_init(&priv->static_config,
509 					priv->info->static_ops,
510 					priv->info->device_id);
511 	if (rc)
512 		return rc;
513 
514 	/* Build static configuration */
515 	rc = sja1105_init_mac_settings(priv);
516 	if (rc < 0)
517 		return rc;
518 	rc = sja1105_init_mii_settings(priv, ports);
519 	if (rc < 0)
520 		return rc;
521 	rc = sja1105_init_static_fdb(priv);
522 	if (rc < 0)
523 		return rc;
524 	rc = sja1105_init_static_vlan(priv);
525 	if (rc < 0)
526 		return rc;
527 	rc = sja1105_init_l2_lookup_params(priv);
528 	if (rc < 0)
529 		return rc;
530 	rc = sja1105_init_l2_forwarding(priv);
531 	if (rc < 0)
532 		return rc;
533 	rc = sja1105_init_l2_forwarding_params(priv);
534 	if (rc < 0)
535 		return rc;
536 	rc = sja1105_init_l2_policing(priv);
537 	if (rc < 0)
538 		return rc;
539 	rc = sja1105_init_general_params(priv);
540 	if (rc < 0)
541 		return rc;
542 
543 	/* Send initial configuration to hardware via SPI */
544 	return sja1105_static_config_upload(priv);
545 }
546 
547 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
548 				      const struct sja1105_dt_port *ports)
549 {
550 	int i;
551 
552 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
553 		if (ports->role == XMII_MAC)
554 			continue;
555 
556 		if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
557 		    ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
558 			priv->rgmii_rx_delay[i] = true;
559 
560 		if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
561 		    ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
562 			priv->rgmii_tx_delay[i] = true;
563 
564 		if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
565 		     !priv->info->setup_rgmii_delay)
566 			return -EINVAL;
567 	}
568 	return 0;
569 }
570 
571 static int sja1105_parse_ports_node(struct sja1105_private *priv,
572 				    struct sja1105_dt_port *ports,
573 				    struct device_node *ports_node)
574 {
575 	struct device *dev = &priv->spidev->dev;
576 	struct device_node *child;
577 
578 	for_each_child_of_node(ports_node, child) {
579 		struct device_node *phy_node;
580 		int phy_mode;
581 		u32 index;
582 
583 		/* Get switch port number from DT */
584 		if (of_property_read_u32(child, "reg", &index) < 0) {
585 			dev_err(dev, "Port number not defined in device tree "
586 				"(property \"reg\")\n");
587 			return -ENODEV;
588 		}
589 
590 		/* Get PHY mode from DT */
591 		phy_mode = of_get_phy_mode(child);
592 		if (phy_mode < 0) {
593 			dev_err(dev, "Failed to read phy-mode or "
594 				"phy-interface-type property for port %d\n",
595 				index);
596 			return -ENODEV;
597 		}
598 		ports[index].phy_mode = phy_mode;
599 
600 		phy_node = of_parse_phandle(child, "phy-handle", 0);
601 		if (!phy_node) {
602 			if (!of_phy_is_fixed_link(child)) {
603 				dev_err(dev, "phy-handle or fixed-link "
604 					"properties missing!\n");
605 				return -ENODEV;
606 			}
607 			/* phy-handle is missing, but fixed-link isn't.
608 			 * So it's a fixed link. Default to PHY role.
609 			 */
610 			ports[index].role = XMII_PHY;
611 		} else {
612 			/* phy-handle present => put port in MAC role */
613 			ports[index].role = XMII_MAC;
614 			of_node_put(phy_node);
615 		}
616 
617 		/* The MAC/PHY role can be overridden with explicit bindings */
618 		if (of_property_read_bool(child, "sja1105,role-mac"))
619 			ports[index].role = XMII_MAC;
620 		else if (of_property_read_bool(child, "sja1105,role-phy"))
621 			ports[index].role = XMII_PHY;
622 	}
623 
624 	return 0;
625 }
626 
627 static int sja1105_parse_dt(struct sja1105_private *priv,
628 			    struct sja1105_dt_port *ports)
629 {
630 	struct device *dev = &priv->spidev->dev;
631 	struct device_node *switch_node = dev->of_node;
632 	struct device_node *ports_node;
633 	int rc;
634 
635 	ports_node = of_get_child_by_name(switch_node, "ports");
636 	if (!ports_node) {
637 		dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
638 		return -ENODEV;
639 	}
640 
641 	rc = sja1105_parse_ports_node(priv, ports, ports_node);
642 	of_node_put(ports_node);
643 
644 	return rc;
645 }
646 
647 /* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
648 static int sja1105_speed[] = {
649 	[SJA1105_SPEED_AUTO]     = 0,
650 	[SJA1105_SPEED_10MBPS]   = 10,
651 	[SJA1105_SPEED_100MBPS]  = 100,
652 	[SJA1105_SPEED_1000MBPS] = 1000,
653 };
654 
655 /* Set link speed and enable/disable traffic I/O in the MAC configuration
656  * for a specific port.
657  *
658  * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
659  * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
660  *	     settings from the STP state, but not persistently (does not
661  *	     overwrite the static MAC info for this port).
662  */
663 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
664 				      int speed_mbps, bool enabled)
665 {
666 	struct sja1105_mac_config_entry dyn_mac;
667 	struct sja1105_xmii_params_entry *mii;
668 	struct sja1105_mac_config_entry *mac;
669 	struct device *dev = priv->ds->dev;
670 	sja1105_phy_interface_t phy_mode;
671 	sja1105_speed_t speed;
672 	int rc;
673 
674 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
675 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
676 
677 	switch (speed_mbps) {
678 	case 0:
679 		/* No speed update requested */
680 		speed = SJA1105_SPEED_AUTO;
681 		break;
682 	case 10:
683 		speed = SJA1105_SPEED_10MBPS;
684 		break;
685 	case 100:
686 		speed = SJA1105_SPEED_100MBPS;
687 		break;
688 	case 1000:
689 		speed = SJA1105_SPEED_1000MBPS;
690 		break;
691 	default:
692 		dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
693 		return -EINVAL;
694 	}
695 
696 	/* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
697 	 * configuration table, since this will be used for the clocking setup,
698 	 * and we no longer need to store it in the static config (already told
699 	 * hardware we want auto during upload phase).
700 	 */
701 	mac[port].speed = speed;
702 
703 	/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
704 	 * tables. On E/T, MAC reconfig tables are not readable, only writable.
705 	 * We have to *know* what the MAC looks like.  For the sake of keeping
706 	 * the code common, we'll use the static configuration tables as a
707 	 * reasonable approximation for both E/T and P/Q/R/S.
708 	 */
709 	dyn_mac = mac[port];
710 	dyn_mac.ingress = enabled && mac[port].ingress;
711 	dyn_mac.egress  = enabled && mac[port].egress;
712 
713 	/* Write to the dynamic reconfiguration tables */
714 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
715 					  port, &dyn_mac, true);
716 	if (rc < 0) {
717 		dev_err(dev, "Failed to write MAC config: %d\n", rc);
718 		return rc;
719 	}
720 
721 	/* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
722 	 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
723 	 * RMII no change of the clock setup is required. Actually, changing
724 	 * the clock setup does interrupt the clock signal for a certain time
725 	 * which causes trouble for all PHYs relying on this signal.
726 	 */
727 	if (!enabled)
728 		return 0;
729 
730 	phy_mode = mii->xmii_mode[port];
731 	if (phy_mode != XMII_MODE_RGMII)
732 		return 0;
733 
734 	return sja1105_clocking_setup_port(priv, port);
735 }
736 
737 static void sja1105_adjust_link(struct dsa_switch *ds, int port,
738 				struct phy_device *phydev)
739 {
740 	struct sja1105_private *priv = ds->priv;
741 
742 	if (!phydev->link)
743 		sja1105_adjust_port_config(priv, port, 0, false);
744 	else
745 		sja1105_adjust_port_config(priv, port, phydev->speed, true);
746 }
747 
748 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
749 				     unsigned long *supported,
750 				     struct phylink_link_state *state)
751 {
752 	/* Construct a new mask which exhaustively contains all link features
753 	 * supported by the MAC, and then apply that (logical AND) to what will
754 	 * be sent to the PHY for "marketing".
755 	 */
756 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
757 	struct sja1105_private *priv = ds->priv;
758 	struct sja1105_xmii_params_entry *mii;
759 
760 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
761 
762 	/* The MAC does not support pause frames, and also doesn't
763 	 * support half-duplex traffic modes.
764 	 */
765 	phylink_set(mask, Autoneg);
766 	phylink_set(mask, MII);
767 	phylink_set(mask, 10baseT_Full);
768 	phylink_set(mask, 100baseT_Full);
769 	if (mii->xmii_mode[port] == XMII_MODE_RGMII)
770 		phylink_set(mask, 1000baseT_Full);
771 
772 	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
773 	bitmap_and(state->advertising, state->advertising, mask,
774 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
775 }
776 
777 /* First-generation switches have a 4-way set associative TCAM that
778  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
779  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
780  * For the placement of a newly learnt FDB entry, the switch selects the bin
781  * based on a hash function, and the way within that bin incrementally.
782  */
783 static inline int sja1105et_fdb_index(int bin, int way)
784 {
785 	return bin * SJA1105ET_FDB_BIN_SIZE + way;
786 }
787 
788 static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
789 				       const u8 *addr, u16 vid,
790 				       struct sja1105_l2_lookup_entry *match,
791 				       int *last_unused)
792 {
793 	int way;
794 
795 	for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
796 		struct sja1105_l2_lookup_entry l2_lookup = {0};
797 		int index = sja1105et_fdb_index(bin, way);
798 
799 		/* Skip unused entries, optionally marking them
800 		 * into the return value
801 		 */
802 		if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
803 						index, &l2_lookup)) {
804 			if (last_unused)
805 				*last_unused = way;
806 			continue;
807 		}
808 
809 		if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
810 		    l2_lookup.vlanid == vid) {
811 			if (match)
812 				*match = l2_lookup;
813 			return way;
814 		}
815 	}
816 	/* Return an invalid entry index if not found */
817 	return -1;
818 }
819 
820 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
821 			   const unsigned char *addr, u16 vid)
822 {
823 	struct sja1105_l2_lookup_entry l2_lookup = {0};
824 	struct sja1105_private *priv = ds->priv;
825 	struct device *dev = ds->dev;
826 	int last_unused = -1;
827 	int bin, way;
828 
829 	bin = sja1105_fdb_hash(priv, addr, vid);
830 
831 	way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
832 					  &l2_lookup, &last_unused);
833 	if (way >= 0) {
834 		/* We have an FDB entry. Is our port in the destination
835 		 * mask? If yes, we need to do nothing. If not, we need
836 		 * to rewrite the entry by adding this port to it.
837 		 */
838 		if (l2_lookup.destports & BIT(port))
839 			return 0;
840 		l2_lookup.destports |= BIT(port);
841 	} else {
842 		int index = sja1105et_fdb_index(bin, way);
843 
844 		/* We don't have an FDB entry. We construct a new one and
845 		 * try to find a place for it within the FDB table.
846 		 */
847 		l2_lookup.macaddr = ether_addr_to_u64(addr);
848 		l2_lookup.destports = BIT(port);
849 		l2_lookup.vlanid = vid;
850 
851 		if (last_unused >= 0) {
852 			way = last_unused;
853 		} else {
854 			/* Bin is full, need to evict somebody.
855 			 * Choose victim at random. If you get these messages
856 			 * often, you may need to consider changing the
857 			 * distribution function:
858 			 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
859 			 */
860 			get_random_bytes(&way, sizeof(u8));
861 			way %= SJA1105ET_FDB_BIN_SIZE;
862 			dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
863 				 bin, addr, way);
864 			/* Evict entry */
865 			sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
866 						     index, NULL, false);
867 		}
868 	}
869 	l2_lookup.index = sja1105et_fdb_index(bin, way);
870 
871 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
872 					    l2_lookup.index, &l2_lookup,
873 					    true);
874 }
875 
876 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
877 			   const unsigned char *addr, u16 vid)
878 {
879 	struct sja1105_l2_lookup_entry l2_lookup = {0};
880 	struct sja1105_private *priv = ds->priv;
881 	int index, bin, way;
882 	bool keep;
883 
884 	bin = sja1105_fdb_hash(priv, addr, vid);
885 	way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
886 					  &l2_lookup, NULL);
887 	if (way < 0)
888 		return 0;
889 	index = sja1105et_fdb_index(bin, way);
890 
891 	/* We have an FDB entry. Is our port in the destination mask? If yes,
892 	 * we need to remove it. If the resulting port mask becomes empty, we
893 	 * need to completely evict the FDB entry.
894 	 * Otherwise we just write it back.
895 	 */
896 	if (l2_lookup.destports & BIT(port))
897 		l2_lookup.destports &= ~BIT(port);
898 	if (l2_lookup.destports)
899 		keep = true;
900 	else
901 		keep = false;
902 
903 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
904 					    index, &l2_lookup, keep);
905 }
906 
907 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
908 			    dsa_fdb_dump_cb_t *cb, void *data)
909 {
910 	struct sja1105_private *priv = ds->priv;
911 	struct device *dev = ds->dev;
912 	int i;
913 
914 	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
915 		struct sja1105_l2_lookup_entry l2_lookup = {0};
916 		u8 macaddr[ETH_ALEN];
917 		int rc;
918 
919 		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
920 						 i, &l2_lookup);
921 		/* No fdb entry at i, not an issue */
922 		if (rc == -EINVAL)
923 			continue;
924 		if (rc) {
925 			dev_err(dev, "Failed to dump FDB: %d\n", rc);
926 			return rc;
927 		}
928 
929 		/* FDB dump callback is per port. This means we have to
930 		 * disregard a valid entry if it's not for this port, even if
931 		 * only to revisit it later. This is inefficient because the
932 		 * 1024-sized FDB table needs to be traversed 4 times through
933 		 * SPI during a 'bridge fdb show' command.
934 		 */
935 		if (!(l2_lookup.destports & BIT(port)))
936 			continue;
937 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
938 		cb(macaddr, l2_lookup.vlanid, false, data);
939 	}
940 	return 0;
941 }
942 
943 /* This callback needs to be present */
944 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
945 			       const struct switchdev_obj_port_mdb *mdb)
946 {
947 	return 0;
948 }
949 
950 static void sja1105_mdb_add(struct dsa_switch *ds, int port,
951 			    const struct switchdev_obj_port_mdb *mdb)
952 {
953 	sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
954 }
955 
956 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
957 			   const struct switchdev_obj_port_mdb *mdb)
958 {
959 	return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
960 }
961 
962 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
963 				 struct net_device *br, bool member)
964 {
965 	struct sja1105_l2_forwarding_entry *l2_fwd;
966 	struct sja1105_private *priv = ds->priv;
967 	int i, rc;
968 
969 	l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
970 
971 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
972 		/* Add this port to the forwarding matrix of the
973 		 * other ports in the same bridge, and viceversa.
974 		 */
975 		if (!dsa_is_user_port(ds, i))
976 			continue;
977 		/* For the ports already under the bridge, only one thing needs
978 		 * to be done, and that is to add this port to their
979 		 * reachability domain. So we can perform the SPI write for
980 		 * them immediately. However, for this port itself (the one
981 		 * that is new to the bridge), we need to add all other ports
982 		 * to its reachability domain. So we do that incrementally in
983 		 * this loop, and perform the SPI write only at the end, once
984 		 * the domain contains all other bridge ports.
985 		 */
986 		if (i == port)
987 			continue;
988 		if (dsa_to_port(ds, i)->bridge_dev != br)
989 			continue;
990 		sja1105_port_allow_traffic(l2_fwd, i, port, member);
991 		sja1105_port_allow_traffic(l2_fwd, port, i, member);
992 
993 		rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
994 						  i, &l2_fwd[i], true);
995 		if (rc < 0)
996 			return rc;
997 	}
998 
999 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1000 					    port, &l2_fwd[port], true);
1001 }
1002 
1003 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1004 					 u8 state)
1005 {
1006 	struct sja1105_private *priv = ds->priv;
1007 	struct sja1105_mac_config_entry *mac;
1008 
1009 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1010 
1011 	switch (state) {
1012 	case BR_STATE_DISABLED:
1013 	case BR_STATE_BLOCKING:
1014 		/* From UM10944 description of DRPDTAG (why put this there?):
1015 		 * "Management traffic flows to the port regardless of the state
1016 		 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1017 		 * At the moment no difference between DISABLED and BLOCKING.
1018 		 */
1019 		mac[port].ingress   = false;
1020 		mac[port].egress    = false;
1021 		mac[port].dyn_learn = false;
1022 		break;
1023 	case BR_STATE_LISTENING:
1024 		mac[port].ingress   = true;
1025 		mac[port].egress    = false;
1026 		mac[port].dyn_learn = false;
1027 		break;
1028 	case BR_STATE_LEARNING:
1029 		mac[port].ingress   = true;
1030 		mac[port].egress    = false;
1031 		mac[port].dyn_learn = true;
1032 		break;
1033 	case BR_STATE_FORWARDING:
1034 		mac[port].ingress   = true;
1035 		mac[port].egress    = true;
1036 		mac[port].dyn_learn = true;
1037 		break;
1038 	default:
1039 		dev_err(ds->dev, "invalid STP state: %d\n", state);
1040 		return;
1041 	}
1042 
1043 	sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1044 				     &mac[port], true);
1045 }
1046 
1047 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1048 			       struct net_device *br)
1049 {
1050 	return sja1105_bridge_member(ds, port, br, true);
1051 }
1052 
1053 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1054 				 struct net_device *br)
1055 {
1056 	sja1105_bridge_member(ds, port, br, false);
1057 }
1058 
1059 static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
1060 {
1061 	struct sja1105_mac_config_entry *mac;
1062 
1063 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1064 
1065 	if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1066 		return BR_STATE_BLOCKING;
1067 	if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1068 		return BR_STATE_LISTENING;
1069 	if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
1070 		return BR_STATE_LEARNING;
1071 	if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
1072 		return BR_STATE_FORWARDING;
1073 	/* This is really an error condition if the MAC was in none of the STP
1074 	 * states above. But treating the port as disabled does nothing, which
1075 	 * is adequate, and it also resets the MAC to a known state later on.
1076 	 */
1077 	return BR_STATE_DISABLED;
1078 }
1079 
1080 /* For situations where we need to change a setting at runtime that is only
1081  * available through the static configuration, resetting the switch in order
1082  * to upload the new static config is unavoidable. Back up the settings we
1083  * modify at runtime (currently only MAC) and restore them after uploading,
1084  * such that this operation is relatively seamless.
1085  */
1086 static int sja1105_static_config_reload(struct sja1105_private *priv)
1087 {
1088 	struct sja1105_mac_config_entry *mac;
1089 	int speed_mbps[SJA1105_NUM_PORTS];
1090 	u8 stp_state[SJA1105_NUM_PORTS];
1091 	int rc, i;
1092 
1093 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1094 
1095 	/* Back up settings changed by sja1105_adjust_port_config and
1096 	 * sja1105_bridge_stp_state_set and restore their defaults.
1097 	 */
1098 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1099 		speed_mbps[i] = sja1105_speed[mac[i].speed];
1100 		mac[i].speed = SJA1105_SPEED_AUTO;
1101 		if (i == dsa_upstream_port(priv->ds, i)) {
1102 			mac[i].ingress = true;
1103 			mac[i].egress = true;
1104 			mac[i].dyn_learn = true;
1105 		} else {
1106 			stp_state[i] = sja1105_stp_state_get(priv, i);
1107 			mac[i].ingress = false;
1108 			mac[i].egress = false;
1109 			mac[i].dyn_learn = false;
1110 		}
1111 	}
1112 
1113 	/* Reset switch and send updated static configuration */
1114 	rc = sja1105_static_config_upload(priv);
1115 	if (rc < 0)
1116 		goto out;
1117 
1118 	/* Configure the CGU (PLLs) for MII and RMII PHYs.
1119 	 * For these interfaces there is no dynamic configuration
1120 	 * needed, since PLLs have same settings at all speeds.
1121 	 */
1122 	rc = sja1105_clocking_setup(priv);
1123 	if (rc < 0)
1124 		goto out;
1125 
1126 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1127 		bool enabled = (speed_mbps[i] != 0);
1128 
1129 		if (i != dsa_upstream_port(priv->ds, i))
1130 			sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
1131 
1132 		rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
1133 						enabled);
1134 		if (rc < 0)
1135 			goto out;
1136 	}
1137 out:
1138 	return rc;
1139 }
1140 
1141 /* The TPID setting belongs to the General Parameters table,
1142  * which can only be partially reconfigured at runtime (and not the TPID).
1143  * So a switch reset is required.
1144  */
1145 static int sja1105_change_tpid(struct sja1105_private *priv,
1146 			       u16 tpid, u16 tpid2)
1147 {
1148 	struct sja1105_general_params_entry *general_params;
1149 	struct sja1105_table *table;
1150 
1151 	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1152 	general_params = table->entries;
1153 	general_params->tpid = tpid;
1154 	general_params->tpid2 = tpid2;
1155 	return sja1105_static_config_reload(priv);
1156 }
1157 
1158 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1159 {
1160 	struct sja1105_mac_config_entry *mac;
1161 
1162 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1163 
1164 	mac[port].vlanid = pvid;
1165 
1166 	return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1167 					   &mac[port], true);
1168 }
1169 
1170 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1171 {
1172 	struct sja1105_vlan_lookup_entry *vlan;
1173 	int count, i;
1174 
1175 	vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1176 	count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1177 
1178 	for (i = 0; i < count; i++)
1179 		if (vlan[i].vlanid == vid)
1180 			return i;
1181 
1182 	/* Return an invalid entry index if not found */
1183 	return -1;
1184 }
1185 
1186 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1187 			      bool enabled, bool untagged)
1188 {
1189 	struct sja1105_vlan_lookup_entry *vlan;
1190 	struct sja1105_table *table;
1191 	bool keep = true;
1192 	int match, rc;
1193 
1194 	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1195 
1196 	match = sja1105_is_vlan_configured(priv, vid);
1197 	if (match < 0) {
1198 		/* Can't delete a missing entry. */
1199 		if (!enabled)
1200 			return 0;
1201 		rc = sja1105_table_resize(table, table->entry_count + 1);
1202 		if (rc)
1203 			return rc;
1204 		match = table->entry_count - 1;
1205 	}
1206 	/* Assign pointer after the resize (it's new memory) */
1207 	vlan = table->entries;
1208 	vlan[match].vlanid = vid;
1209 	if (enabled) {
1210 		vlan[match].vlan_bc |= BIT(port);
1211 		vlan[match].vmemb_port |= BIT(port);
1212 	} else {
1213 		vlan[match].vlan_bc &= ~BIT(port);
1214 		vlan[match].vmemb_port &= ~BIT(port);
1215 	}
1216 	/* Also unset tag_port if removing this VLAN was requested,
1217 	 * just so we don't have a confusing bitmap (no practical purpose).
1218 	 */
1219 	if (untagged || !enabled)
1220 		vlan[match].tag_port &= ~BIT(port);
1221 	else
1222 		vlan[match].tag_port |= BIT(port);
1223 	/* If there's no port left as member of this VLAN,
1224 	 * it's time for it to go.
1225 	 */
1226 	if (!vlan[match].vmemb_port)
1227 		keep = false;
1228 
1229 	dev_dbg(priv->ds->dev,
1230 		"%s: port %d, vid %llu, broadcast domain 0x%llx, "
1231 		"port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1232 		__func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1233 		vlan[match].vmemb_port, vlan[match].tag_port, keep);
1234 
1235 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1236 					  &vlan[match], keep);
1237 	if (rc < 0)
1238 		return rc;
1239 
1240 	if (!keep)
1241 		return sja1105_table_delete_entry(table, match);
1242 
1243 	return 0;
1244 }
1245 
1246 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1247 {
1248 	int rc, i;
1249 
1250 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1251 		rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1252 		if (rc < 0) {
1253 			dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1254 				i, rc);
1255 			return rc;
1256 		}
1257 	}
1258 	dev_info(ds->dev, "%s switch tagging\n",
1259 		 enabled ? "Enabled" : "Disabled");
1260 	return 0;
1261 }
1262 
1263 static enum dsa_tag_protocol
1264 sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1265 {
1266 	return DSA_TAG_PROTO_SJA1105;
1267 }
1268 
1269 /* This callback needs to be present */
1270 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1271 				const struct switchdev_obj_port_vlan *vlan)
1272 {
1273 	return 0;
1274 }
1275 
1276 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1277 {
1278 	struct sja1105_private *priv = ds->priv;
1279 	int rc;
1280 
1281 	if (enabled)
1282 		/* Enable VLAN filtering. */
1283 		rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
1284 	else
1285 		/* Disable VLAN filtering. */
1286 		rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
1287 	if (rc)
1288 		dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1289 
1290 	/* Switch port identification based on 802.1Q is only passable
1291 	 * if we are not under a vlan_filtering bridge. So make sure
1292 	 * the two configurations are mutually exclusive.
1293 	 */
1294 	return sja1105_setup_8021q_tagging(ds, !enabled);
1295 }
1296 
1297 static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1298 			     const struct switchdev_obj_port_vlan *vlan)
1299 {
1300 	struct sja1105_private *priv = ds->priv;
1301 	u16 vid;
1302 	int rc;
1303 
1304 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1305 		rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1306 					BRIDGE_VLAN_INFO_UNTAGGED);
1307 		if (rc < 0) {
1308 			dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1309 				vid, port, rc);
1310 			return;
1311 		}
1312 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1313 			rc = sja1105_pvid_apply(ds->priv, port, vid);
1314 			if (rc < 0) {
1315 				dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1316 					vid, port, rc);
1317 				return;
1318 			}
1319 		}
1320 	}
1321 }
1322 
1323 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1324 			    const struct switchdev_obj_port_vlan *vlan)
1325 {
1326 	struct sja1105_private *priv = ds->priv;
1327 	u16 vid;
1328 	int rc;
1329 
1330 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1331 		rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1332 					BRIDGE_VLAN_INFO_UNTAGGED);
1333 		if (rc < 0) {
1334 			dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1335 				vid, port, rc);
1336 			return rc;
1337 		}
1338 	}
1339 	return 0;
1340 }
1341 
1342 /* The programming model for the SJA1105 switch is "all-at-once" via static
1343  * configuration tables. Some of these can be dynamically modified at runtime,
1344  * but not the xMII mode parameters table.
1345  * Furthermode, some PHYs may not have crystals for generating their clocks
1346  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1347  * ref_clk pin. So port clocking needs to be initialized early, before
1348  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1349  * Setting correct PHY link speed does not matter now.
1350  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1351  * bindings are not yet parsed by DSA core. We need to parse early so that we
1352  * can populate the xMII mode parameters table.
1353  */
1354 static int sja1105_setup(struct dsa_switch *ds)
1355 {
1356 	struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1357 	struct sja1105_private *priv = ds->priv;
1358 	int rc;
1359 
1360 	rc = sja1105_parse_dt(priv, ports);
1361 	if (rc < 0) {
1362 		dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1363 		return rc;
1364 	}
1365 
1366 	/* Error out early if internal delays are required through DT
1367 	 * and we can't apply them.
1368 	 */
1369 	rc = sja1105_parse_rgmii_delays(priv, ports);
1370 	if (rc < 0) {
1371 		dev_err(ds->dev, "RGMII delay not supported\n");
1372 		return rc;
1373 	}
1374 
1375 	/* Create and send configuration down to device */
1376 	rc = sja1105_static_config_load(priv, ports);
1377 	if (rc < 0) {
1378 		dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1379 		return rc;
1380 	}
1381 	/* Configure the CGU (PHY link modes and speeds) */
1382 	rc = sja1105_clocking_setup(priv);
1383 	if (rc < 0) {
1384 		dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1385 		return rc;
1386 	}
1387 	/* On SJA1105, VLAN filtering per se is always enabled in hardware.
1388 	 * The only thing we can do to disable it is lie about what the 802.1Q
1389 	 * EtherType is.
1390 	 * So it will still try to apply VLAN filtering, but all ingress
1391 	 * traffic (except frames received with EtherType of ETH_P_SJA1105)
1392 	 * will be internally tagged with a distorted VLAN header where the
1393 	 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1394 	 */
1395 	ds->vlan_filtering_is_global = true;
1396 
1397 	/* The DSA/switchdev model brings up switch ports in standalone mode by
1398 	 * default, and that means vlan_filtering is 0 since they're not under
1399 	 * a bridge, so it's safe to set up switch tagging at this time.
1400 	 */
1401 	return sja1105_setup_8021q_tagging(ds, true);
1402 }
1403 
1404 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
1405 			     struct sk_buff *skb)
1406 {
1407 	struct sja1105_mgmt_entry mgmt_route = {0};
1408 	struct sja1105_private *priv = ds->priv;
1409 	struct ethhdr *hdr;
1410 	int timeout = 10;
1411 	int rc;
1412 
1413 	hdr = eth_hdr(skb);
1414 
1415 	mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1416 	mgmt_route.destports = BIT(port);
1417 	mgmt_route.enfport = 1;
1418 
1419 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1420 					  slot, &mgmt_route, true);
1421 	if (rc < 0) {
1422 		kfree_skb(skb);
1423 		return rc;
1424 	}
1425 
1426 	/* Transfer skb to the host port. */
1427 	dsa_enqueue_skb(skb, ds->ports[port].slave);
1428 
1429 	/* Wait until the switch has processed the frame */
1430 	do {
1431 		rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1432 						 slot, &mgmt_route);
1433 		if (rc < 0) {
1434 			dev_err_ratelimited(priv->ds->dev,
1435 					    "failed to poll for mgmt route\n");
1436 			continue;
1437 		}
1438 
1439 		/* UM10944: The ENFPORT flag of the respective entry is
1440 		 * cleared when a match is found. The host can use this
1441 		 * flag as an acknowledgment.
1442 		 */
1443 		cpu_relax();
1444 	} while (mgmt_route.enfport && --timeout);
1445 
1446 	if (!timeout) {
1447 		/* Clean up the management route so that a follow-up
1448 		 * frame may not match on it by mistake.
1449 		 */
1450 		sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1451 					     slot, &mgmt_route, false);
1452 		dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1453 	}
1454 
1455 	return NETDEV_TX_OK;
1456 }
1457 
1458 /* Deferred work is unfortunately necessary because setting up the management
1459  * route cannot be done from atomit context (SPI transfer takes a sleepable
1460  * lock on the bus)
1461  */
1462 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1463 					      struct sk_buff *skb)
1464 {
1465 	struct sja1105_private *priv = ds->priv;
1466 	struct sja1105_port *sp = &priv->ports[port];
1467 	int slot = sp->mgmt_slot;
1468 
1469 	/* The tragic fact about the switch having 4x2 slots for installing
1470 	 * management routes is that all of them except one are actually
1471 	 * useless.
1472 	 * If 2 slots are simultaneously configured for two BPDUs sent to the
1473 	 * same (multicast) DMAC but on different egress ports, the switch
1474 	 * would confuse them and redirect first frame it receives on the CPU
1475 	 * port towards the port configured on the numerically first slot
1476 	 * (therefore wrong port), then second received frame on second slot
1477 	 * (also wrong port).
1478 	 * So for all practical purposes, there needs to be a lock that
1479 	 * prevents that from happening. The slot used here is utterly useless
1480 	 * (could have simply been 0 just as fine), but we are doing it
1481 	 * nonetheless, in case a smarter idea ever comes up in the future.
1482 	 */
1483 	mutex_lock(&priv->mgmt_lock);
1484 
1485 	sja1105_mgmt_xmit(ds, port, slot, skb);
1486 
1487 	mutex_unlock(&priv->mgmt_lock);
1488 	return NETDEV_TX_OK;
1489 }
1490 
1491 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1492  * which cannot be reconfigured at runtime. So a switch reset is required.
1493  */
1494 static int sja1105_set_ageing_time(struct dsa_switch *ds,
1495 				   unsigned int ageing_time)
1496 {
1497 	struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1498 	struct sja1105_private *priv = ds->priv;
1499 	struct sja1105_table *table;
1500 	unsigned int maxage;
1501 
1502 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1503 	l2_lookup_params = table->entries;
1504 
1505 	maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1506 
1507 	if (l2_lookup_params->maxage == maxage)
1508 		return 0;
1509 
1510 	l2_lookup_params->maxage = maxage;
1511 
1512 	return sja1105_static_config_reload(priv);
1513 }
1514 
1515 static const struct dsa_switch_ops sja1105_switch_ops = {
1516 	.get_tag_protocol	= sja1105_get_tag_protocol,
1517 	.setup			= sja1105_setup,
1518 	.adjust_link		= sja1105_adjust_link,
1519 	.set_ageing_time	= sja1105_set_ageing_time,
1520 	.phylink_validate	= sja1105_phylink_validate,
1521 	.get_strings		= sja1105_get_strings,
1522 	.get_ethtool_stats	= sja1105_get_ethtool_stats,
1523 	.get_sset_count		= sja1105_get_sset_count,
1524 	.port_fdb_dump		= sja1105_fdb_dump,
1525 	.port_fdb_add		= sja1105_fdb_add,
1526 	.port_fdb_del		= sja1105_fdb_del,
1527 	.port_bridge_join	= sja1105_bridge_join,
1528 	.port_bridge_leave	= sja1105_bridge_leave,
1529 	.port_stp_state_set	= sja1105_bridge_stp_state_set,
1530 	.port_vlan_prepare	= sja1105_vlan_prepare,
1531 	.port_vlan_filtering	= sja1105_vlan_filtering,
1532 	.port_vlan_add		= sja1105_vlan_add,
1533 	.port_vlan_del		= sja1105_vlan_del,
1534 	.port_mdb_prepare	= sja1105_mdb_prepare,
1535 	.port_mdb_add		= sja1105_mdb_add,
1536 	.port_mdb_del		= sja1105_mdb_del,
1537 	.port_deferred_xmit	= sja1105_port_deferred_xmit,
1538 };
1539 
1540 static int sja1105_check_device_id(struct sja1105_private *priv)
1541 {
1542 	const struct sja1105_regs *regs = priv->info->regs;
1543 	u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
1544 	struct device *dev = &priv->spidev->dev;
1545 	u64 device_id;
1546 	u64 part_no;
1547 	int rc;
1548 
1549 	rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
1550 				  &device_id, SJA1105_SIZE_DEVICE_ID);
1551 	if (rc < 0)
1552 		return rc;
1553 
1554 	if (device_id != priv->info->device_id) {
1555 		dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
1556 			priv->info->device_id, device_id);
1557 		return -ENODEV;
1558 	}
1559 
1560 	rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
1561 					 prod_id, SJA1105_SIZE_DEVICE_ID);
1562 	if (rc < 0)
1563 		return rc;
1564 
1565 	sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
1566 
1567 	if (part_no != priv->info->part_no) {
1568 		dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
1569 			priv->info->part_no, part_no);
1570 		return -ENODEV;
1571 	}
1572 
1573 	return 0;
1574 }
1575 
1576 static int sja1105_probe(struct spi_device *spi)
1577 {
1578 	struct device *dev = &spi->dev;
1579 	struct sja1105_private *priv;
1580 	struct dsa_switch *ds;
1581 	int rc, i;
1582 
1583 	if (!dev->of_node) {
1584 		dev_err(dev, "No DTS bindings for SJA1105 driver\n");
1585 		return -EINVAL;
1586 	}
1587 
1588 	priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
1589 	if (!priv)
1590 		return -ENOMEM;
1591 
1592 	/* Configure the optional reset pin and bring up switch */
1593 	priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1594 	if (IS_ERR(priv->reset_gpio))
1595 		dev_dbg(dev, "reset-gpios not defined, ignoring\n");
1596 	else
1597 		sja1105_hw_reset(priv->reset_gpio, 1, 1);
1598 
1599 	/* Populate our driver private structure (priv) based on
1600 	 * the device tree node that was probed (spi)
1601 	 */
1602 	priv->spidev = spi;
1603 	spi_set_drvdata(spi, priv);
1604 
1605 	/* Configure the SPI bus */
1606 	spi->bits_per_word = 8;
1607 	rc = spi_setup(spi);
1608 	if (rc < 0) {
1609 		dev_err(dev, "Could not init SPI\n");
1610 		return rc;
1611 	}
1612 
1613 	priv->info = of_device_get_match_data(dev);
1614 
1615 	/* Detect hardware device */
1616 	rc = sja1105_check_device_id(priv);
1617 	if (rc < 0) {
1618 		dev_err(dev, "Device ID check failed: %d\n", rc);
1619 		return rc;
1620 	}
1621 
1622 	dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
1623 
1624 	ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
1625 	if (!ds)
1626 		return -ENOMEM;
1627 
1628 	ds->ops = &sja1105_switch_ops;
1629 	ds->priv = priv;
1630 	priv->ds = ds;
1631 
1632 	/* Connections between dsa_port and sja1105_port */
1633 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1634 		struct sja1105_port *sp = &priv->ports[i];
1635 
1636 		ds->ports[i].priv = sp;
1637 		sp->dp = &ds->ports[i];
1638 	}
1639 	mutex_init(&priv->mgmt_lock);
1640 
1641 	return dsa_register_switch(priv->ds);
1642 }
1643 
1644 static int sja1105_remove(struct spi_device *spi)
1645 {
1646 	struct sja1105_private *priv = spi_get_drvdata(spi);
1647 
1648 	dsa_unregister_switch(priv->ds);
1649 	sja1105_static_config_free(&priv->static_config);
1650 	return 0;
1651 }
1652 
1653 static const struct of_device_id sja1105_dt_ids[] = {
1654 	{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
1655 	{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
1656 	{ .compatible = "nxp,sja1105p", .data = &sja1105p_info },
1657 	{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
1658 	{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
1659 	{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
1660 	{ /* sentinel */ },
1661 };
1662 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
1663 
1664 static struct spi_driver sja1105_driver = {
1665 	.driver = {
1666 		.name  = "sja1105",
1667 		.owner = THIS_MODULE,
1668 		.of_match_table = of_match_ptr(sja1105_dt_ids),
1669 	},
1670 	.probe  = sja1105_probe,
1671 	.remove = sja1105_remove,
1672 };
1673 
1674 module_spi_driver(sja1105_driver);
1675 
1676 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
1677 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
1678 MODULE_DESCRIPTION("SJA1105 Driver");
1679 MODULE_LICENSE("GPL v2");
1680