xref: /openbmc/linux/drivers/net/ethernet/mscc/ocelot.c (revision b4e18b29)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Microsemi Ocelot Switch driver
4  *
5  * Copyright (c) 2017 Microsemi Corporation
6  */
7 #include <linux/if_bridge.h>
8 #include <soc/mscc/ocelot_vcap.h>
9 #include "ocelot.h"
10 #include "ocelot_vcap.h"
11 
12 #define TABLE_UPDATE_SLEEP_US 10
13 #define TABLE_UPDATE_TIMEOUT_US 100000
14 
15 struct ocelot_mact_entry {
16 	u8 mac[ETH_ALEN];
17 	u16 vid;
18 	enum macaccess_entry_type type;
19 };
20 
21 static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
22 {
23 	return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
24 }
25 
26 static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
27 {
28 	u32 val;
29 
30 	return readx_poll_timeout(ocelot_mact_read_macaccess,
31 		ocelot, val,
32 		(val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
33 		MACACCESS_CMD_IDLE,
34 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
35 }
36 
37 static void ocelot_mact_select(struct ocelot *ocelot,
38 			       const unsigned char mac[ETH_ALEN],
39 			       unsigned int vid)
40 {
41 	u32 macl = 0, mach = 0;
42 
43 	/* Set the MAC address to handle and the vlan associated in a format
44 	 * understood by the hardware.
45 	 */
46 	mach |= vid    << 16;
47 	mach |= mac[0] << 8;
48 	mach |= mac[1] << 0;
49 	macl |= mac[2] << 24;
50 	macl |= mac[3] << 16;
51 	macl |= mac[4] << 8;
52 	macl |= mac[5] << 0;
53 
54 	ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
55 	ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
56 
57 }
58 
59 int ocelot_mact_learn(struct ocelot *ocelot, int port,
60 		      const unsigned char mac[ETH_ALEN],
61 		      unsigned int vid, enum macaccess_entry_type type)
62 {
63 	u32 cmd = ANA_TABLES_MACACCESS_VALID |
64 		ANA_TABLES_MACACCESS_DEST_IDX(port) |
65 		ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
66 		ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
67 	unsigned int mc_ports;
68 
69 	/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
70 	if (type == ENTRYTYPE_MACv4)
71 		mc_ports = (mac[1] << 8) | mac[2];
72 	else if (type == ENTRYTYPE_MACv6)
73 		mc_ports = (mac[0] << 8) | mac[1];
74 	else
75 		mc_ports = 0;
76 
77 	if (mc_ports & BIT(ocelot->num_phys_ports))
78 		cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
79 
80 	ocelot_mact_select(ocelot, mac, vid);
81 
82 	/* Issue a write command */
83 	ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
84 
85 	return ocelot_mact_wait_for_completion(ocelot);
86 }
87 EXPORT_SYMBOL(ocelot_mact_learn);
88 
89 int ocelot_mact_forget(struct ocelot *ocelot,
90 		       const unsigned char mac[ETH_ALEN], unsigned int vid)
91 {
92 	ocelot_mact_select(ocelot, mac, vid);
93 
94 	/* Issue a forget command */
95 	ocelot_write(ocelot,
96 		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
97 		     ANA_TABLES_MACACCESS);
98 
99 	return ocelot_mact_wait_for_completion(ocelot);
100 }
101 EXPORT_SYMBOL(ocelot_mact_forget);
102 
103 static void ocelot_mact_init(struct ocelot *ocelot)
104 {
105 	/* Configure the learning mode entries attributes:
106 	 * - Do not copy the frame to the CPU extraction queues.
107 	 * - Use the vlan and mac_cpoy for dmac lookup.
108 	 */
109 	ocelot_rmw(ocelot, 0,
110 		   ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
111 		   | ANA_AGENCTRL_LEARN_FWD_KILL
112 		   | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
113 		   ANA_AGENCTRL);
114 
115 	/* Clear the MAC table */
116 	ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
117 }
118 
119 static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
120 {
121 	ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
122 			 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
123 			 ANA_PORT_VCAP_S2_CFG, port);
124 
125 	ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
126 			 ANA_PORT_VCAP_CFG, port);
127 
128 	ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
129 		       REW_PORT_CFG_ES0_EN,
130 		       REW_PORT_CFG, port);
131 }
132 
133 static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
134 {
135 	return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
136 }
137 
138 static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
139 {
140 	u32 val;
141 
142 	return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
143 		ocelot,
144 		val,
145 		(val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
146 		ANA_TABLES_VLANACCESS_CMD_IDLE,
147 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
148 }
149 
150 static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
151 {
152 	/* Select the VID to configure */
153 	ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
154 		     ANA_TABLES_VLANTIDX);
155 	/* Set the vlan port members mask and issue a write command */
156 	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
157 			     ANA_TABLES_VLANACCESS_CMD_WRITE,
158 		     ANA_TABLES_VLANACCESS);
159 
160 	return ocelot_vlant_wait_for_completion(ocelot);
161 }
162 
163 static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
164 					struct ocelot_vlan native_vlan)
165 {
166 	struct ocelot_port *ocelot_port = ocelot->ports[port];
167 	u32 val = 0;
168 
169 	ocelot_port->native_vlan = native_vlan;
170 
171 	ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
172 		       REW_PORT_VLAN_CFG_PORT_VID_M,
173 		       REW_PORT_VLAN_CFG, port);
174 
175 	if (ocelot_port->vlan_aware) {
176 		if (native_vlan.valid)
177 			/* Tag all frames except when VID == DEFAULT_VLAN */
178 			val = REW_TAG_CFG_TAG_CFG(1);
179 		else
180 			/* Tag all frames */
181 			val = REW_TAG_CFG_TAG_CFG(3);
182 	} else {
183 		/* Port tagging disabled. */
184 		val = REW_TAG_CFG_TAG_CFG(0);
185 	}
186 	ocelot_rmw_gix(ocelot, val,
187 		       REW_TAG_CFG_TAG_CFG_M,
188 		       REW_TAG_CFG, port);
189 }
190 
191 /* Default vlan to clasify for untagged frames (may be zero) */
192 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
193 				 struct ocelot_vlan pvid_vlan)
194 {
195 	struct ocelot_port *ocelot_port = ocelot->ports[port];
196 	u32 val = 0;
197 
198 	ocelot_port->pvid_vlan = pvid_vlan;
199 
200 	if (!ocelot_port->vlan_aware)
201 		pvid_vlan.vid = 0;
202 
203 	ocelot_rmw_gix(ocelot,
204 		       ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
205 		       ANA_PORT_VLAN_CFG_VLAN_VID_M,
206 		       ANA_PORT_VLAN_CFG, port);
207 
208 	/* If there's no pvid, we should drop not only untagged traffic (which
209 	 * happens automatically), but also 802.1p traffic which gets
210 	 * classified to VLAN 0, but that is always in our RX filter, so it
211 	 * would get accepted were it not for this setting.
212 	 */
213 	if (!pvid_vlan.valid && ocelot_port->vlan_aware)
214 		val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
215 		      ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
216 
217 	ocelot_rmw_gix(ocelot, val,
218 		       ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
219 		       ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
220 		       ANA_PORT_DROP_CFG, port);
221 }
222 
223 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
224 			       bool vlan_aware)
225 {
226 	struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
227 	struct ocelot_port *ocelot_port = ocelot->ports[port];
228 	struct ocelot_vcap_filter *filter;
229 	u32 val;
230 
231 	list_for_each_entry(filter, &block->rules, list) {
232 		if (filter->ingress_port_mask & BIT(port) &&
233 		    filter->action.vid_replace_ena) {
234 			dev_err(ocelot->dev,
235 				"Cannot change VLAN state with vlan modify rules active\n");
236 			return -EBUSY;
237 		}
238 	}
239 
240 	ocelot_port->vlan_aware = vlan_aware;
241 
242 	if (vlan_aware)
243 		val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
244 		      ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
245 	else
246 		val = 0;
247 	ocelot_rmw_gix(ocelot, val,
248 		       ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
249 		       ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
250 		       ANA_PORT_VLAN_CFG, port);
251 
252 	ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
253 	ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
254 
255 	return 0;
256 }
257 EXPORT_SYMBOL(ocelot_port_vlan_filtering);
258 
259 int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
260 			bool untagged)
261 {
262 	struct ocelot_port *ocelot_port = ocelot->ports[port];
263 
264 	/* Deny changing the native VLAN, but always permit deleting it */
265 	if (untagged && ocelot_port->native_vlan.vid != vid &&
266 	    ocelot_port->native_vlan.valid) {
267 		dev_err(ocelot->dev,
268 			"Port already has a native VLAN: %d\n",
269 			ocelot_port->native_vlan.vid);
270 		return -EBUSY;
271 	}
272 
273 	return 0;
274 }
275 EXPORT_SYMBOL(ocelot_vlan_prepare);
276 
277 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
278 		    bool untagged)
279 {
280 	int ret;
281 
282 	/* Make the port a member of the VLAN */
283 	ocelot->vlan_mask[vid] |= BIT(port);
284 	ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
285 	if (ret)
286 		return ret;
287 
288 	/* Default ingress vlan classification */
289 	if (pvid) {
290 		struct ocelot_vlan pvid_vlan;
291 
292 		pvid_vlan.vid = vid;
293 		pvid_vlan.valid = true;
294 		ocelot_port_set_pvid(ocelot, port, pvid_vlan);
295 	}
296 
297 	/* Untagged egress vlan clasification */
298 	if (untagged) {
299 		struct ocelot_vlan native_vlan;
300 
301 		native_vlan.vid = vid;
302 		native_vlan.valid = true;
303 		ocelot_port_set_native_vlan(ocelot, port, native_vlan);
304 	}
305 
306 	return 0;
307 }
308 EXPORT_SYMBOL(ocelot_vlan_add);
309 
310 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
311 {
312 	struct ocelot_port *ocelot_port = ocelot->ports[port];
313 	int ret;
314 
315 	/* Stop the port from being a member of the vlan */
316 	ocelot->vlan_mask[vid] &= ~BIT(port);
317 	ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
318 	if (ret)
319 		return ret;
320 
321 	/* Ingress */
322 	if (ocelot_port->pvid_vlan.vid == vid) {
323 		struct ocelot_vlan pvid_vlan = {0};
324 
325 		ocelot_port_set_pvid(ocelot, port, pvid_vlan);
326 	}
327 
328 	/* Egress */
329 	if (ocelot_port->native_vlan.vid == vid) {
330 		struct ocelot_vlan native_vlan = {0};
331 
332 		ocelot_port_set_native_vlan(ocelot, port, native_vlan);
333 	}
334 
335 	return 0;
336 }
337 EXPORT_SYMBOL(ocelot_vlan_del);
338 
339 static void ocelot_vlan_init(struct ocelot *ocelot)
340 {
341 	u16 port, vid;
342 
343 	/* Clear VLAN table, by default all ports are members of all VLANs */
344 	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
345 		     ANA_TABLES_VLANACCESS);
346 	ocelot_vlant_wait_for_completion(ocelot);
347 
348 	/* Configure the port VLAN memberships */
349 	for (vid = 1; vid < VLAN_N_VID; vid++) {
350 		ocelot->vlan_mask[vid] = 0;
351 		ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
352 	}
353 
354 	/* Because VLAN filtering is enabled, we need VID 0 to get untagged
355 	 * traffic.  It is added automatically if 8021q module is loaded, but
356 	 * we can't rely on it since module may be not loaded.
357 	 */
358 	ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
359 	ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
360 
361 	/* Set vlan ingress filter mask to all ports but the CPU port by
362 	 * default.
363 	 */
364 	ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
365 		     ANA_VLANMASK);
366 
367 	for (port = 0; port < ocelot->num_phys_ports; port++) {
368 		ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
369 		ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
370 	}
371 }
372 
373 static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
374 {
375 	return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
376 }
377 
378 int ocelot_port_flush(struct ocelot *ocelot, int port)
379 {
380 	int err, val;
381 
382 	/* Disable dequeuing from the egress queues */
383 	ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
384 		       QSYS_PORT_MODE_DEQUEUE_DIS,
385 		       QSYS_PORT_MODE, port);
386 
387 	/* Disable flow control */
388 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
389 
390 	/* Disable priority flow control */
391 	ocelot_fields_write(ocelot, port,
392 			    QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
393 
394 	/* Wait at least the time it takes to receive a frame of maximum length
395 	 * at the port.
396 	 * Worst-case delays for 10 kilobyte jumbo frames are:
397 	 * 8 ms on a 10M port
398 	 * 800 μs on a 100M port
399 	 * 80 μs on a 1G port
400 	 * 32 μs on a 2.5G port
401 	 */
402 	usleep_range(8000, 10000);
403 
404 	/* Disable half duplex backpressure. */
405 	ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
406 		       SYS_FRONT_PORT_MODE, port);
407 
408 	/* Flush the queues associated with the port. */
409 	ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
410 		       REW_PORT_CFG, port);
411 
412 	/* Enable dequeuing from the egress queues. */
413 	ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
414 		       port);
415 
416 	/* Wait until flushing is complete. */
417 	err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
418 				100, 2000000, false, ocelot, port);
419 
420 	/* Clear flushing again. */
421 	ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
422 
423 	return err;
424 }
425 EXPORT_SYMBOL(ocelot_port_flush);
426 
427 void ocelot_adjust_link(struct ocelot *ocelot, int port,
428 			struct phy_device *phydev)
429 {
430 	struct ocelot_port *ocelot_port = ocelot->ports[port];
431 	int speed, mode = 0;
432 
433 	switch (phydev->speed) {
434 	case SPEED_10:
435 		speed = OCELOT_SPEED_10;
436 		break;
437 	case SPEED_100:
438 		speed = OCELOT_SPEED_100;
439 		break;
440 	case SPEED_1000:
441 		speed = OCELOT_SPEED_1000;
442 		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
443 		break;
444 	case SPEED_2500:
445 		speed = OCELOT_SPEED_2500;
446 		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
447 		break;
448 	default:
449 		dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
450 			port, phydev->speed);
451 		return;
452 	}
453 
454 	phy_print_status(phydev);
455 
456 	if (!phydev->link)
457 		return;
458 
459 	/* Only full duplex supported for now */
460 	ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
461 			   mode, DEV_MAC_MODE_CFG);
462 
463 	/* Disable HDX fast control */
464 	ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
465 			   DEV_PORT_MISC);
466 
467 	/* SGMII only for now */
468 	ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
469 			   PCS1G_MODE_CFG);
470 	ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
471 
472 	/* Enable PCS */
473 	ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
474 
475 	/* No aneg on SGMII */
476 	ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
477 
478 	/* No loopback */
479 	ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
480 
481 	/* Enable MAC module */
482 	ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
483 			   DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
484 
485 	/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
486 	 * reset */
487 	ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
488 			   DEV_CLOCK_CFG);
489 
490 	/* No PFC */
491 	ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
492 			 ANA_PFC_PFC_CFG, port);
493 
494 	/* Core: Enable port for frame transfer */
495 	ocelot_fields_write(ocelot, port,
496 			    QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
497 
498 	/* Flow control */
499 	ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
500 			 SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA |
501 			 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
502 			 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
503 			 SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
504 			 SYS_MAC_FC_CFG, port);
505 	ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
506 }
507 EXPORT_SYMBOL(ocelot_adjust_link);
508 
509 void ocelot_port_enable(struct ocelot *ocelot, int port,
510 			struct phy_device *phy)
511 {
512 	/* Enable receiving frames on the port, and activate auto-learning of
513 	 * MAC addresses.
514 	 */
515 	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
516 			 ANA_PORT_PORT_CFG_RECV_ENA |
517 			 ANA_PORT_PORT_CFG_PORTID_VAL(port),
518 			 ANA_PORT_PORT_CFG, port);
519 }
520 EXPORT_SYMBOL(ocelot_port_enable);
521 
522 void ocelot_port_disable(struct ocelot *ocelot, int port)
523 {
524 	struct ocelot_port *ocelot_port = ocelot->ports[port];
525 
526 	ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
527 	ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
528 }
529 EXPORT_SYMBOL(ocelot_port_disable);
530 
531 void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
532 				  struct sk_buff *clone)
533 {
534 	struct ocelot_port *ocelot_port = ocelot->ports[port];
535 
536 	spin_lock(&ocelot_port->ts_id_lock);
537 
538 	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
539 	/* Store timestamp ID in cb[0] of sk_buff */
540 	clone->cb[0] = ocelot_port->ts_id;
541 	ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
542 	skb_queue_tail(&ocelot_port->tx_skbs, clone);
543 
544 	spin_unlock(&ocelot_port->ts_id_lock);
545 }
546 EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
547 
548 static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
549 				   struct timespec64 *ts)
550 {
551 	unsigned long flags;
552 	u32 val;
553 
554 	spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
555 
556 	/* Read current PTP time to get seconds */
557 	val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
558 
559 	val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
560 	val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
561 	ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
562 	ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
563 
564 	/* Read packet HW timestamp from FIFO */
565 	val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
566 	ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
567 
568 	/* Sec has incremented since the ts was registered */
569 	if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
570 		ts->tv_sec--;
571 
572 	spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
573 }
574 
575 void ocelot_get_txtstamp(struct ocelot *ocelot)
576 {
577 	int budget = OCELOT_PTP_QUEUE_SZ;
578 
579 	while (budget--) {
580 		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
581 		struct skb_shared_hwtstamps shhwtstamps;
582 		struct ocelot_port *port;
583 		struct timespec64 ts;
584 		unsigned long flags;
585 		u32 val, id, txport;
586 
587 		val = ocelot_read(ocelot, SYS_PTP_STATUS);
588 
589 		/* Check if a timestamp can be retrieved */
590 		if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
591 			break;
592 
593 		WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
594 
595 		/* Retrieve the ts ID and Tx port */
596 		id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
597 		txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
598 
599 		/* Retrieve its associated skb */
600 		port = ocelot->ports[txport];
601 
602 		spin_lock_irqsave(&port->tx_skbs.lock, flags);
603 
604 		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
605 			if (skb->cb[0] != id)
606 				continue;
607 			__skb_unlink(skb, &port->tx_skbs);
608 			skb_match = skb;
609 			break;
610 		}
611 
612 		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
613 
614 		/* Get the h/w timestamp */
615 		ocelot_get_hwtimestamp(ocelot, &ts);
616 
617 		if (unlikely(!skb_match))
618 			continue;
619 
620 		/* Set the timestamp into the skb */
621 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
622 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
623 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
624 
625 		/* Next ts */
626 		ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
627 	}
628 }
629 EXPORT_SYMBOL(ocelot_get_txtstamp);
630 
631 int ocelot_fdb_add(struct ocelot *ocelot, int port,
632 		   const unsigned char *addr, u16 vid)
633 {
634 	int pgid = port;
635 
636 	if (port == ocelot->npi)
637 		pgid = PGID_CPU;
638 
639 	return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
640 }
641 EXPORT_SYMBOL(ocelot_fdb_add);
642 
643 int ocelot_fdb_del(struct ocelot *ocelot, int port,
644 		   const unsigned char *addr, u16 vid)
645 {
646 	return ocelot_mact_forget(ocelot, addr, vid);
647 }
648 EXPORT_SYMBOL(ocelot_fdb_del);
649 
650 int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
651 			    bool is_static, void *data)
652 {
653 	struct ocelot_dump_ctx *dump = data;
654 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
655 	u32 seq = dump->cb->nlh->nlmsg_seq;
656 	struct nlmsghdr *nlh;
657 	struct ndmsg *ndm;
658 
659 	if (dump->idx < dump->cb->args[2])
660 		goto skip;
661 
662 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
663 			sizeof(*ndm), NLM_F_MULTI);
664 	if (!nlh)
665 		return -EMSGSIZE;
666 
667 	ndm = nlmsg_data(nlh);
668 	ndm->ndm_family  = AF_BRIDGE;
669 	ndm->ndm_pad1    = 0;
670 	ndm->ndm_pad2    = 0;
671 	ndm->ndm_flags   = NTF_SELF;
672 	ndm->ndm_type    = 0;
673 	ndm->ndm_ifindex = dump->dev->ifindex;
674 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
675 
676 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
677 		goto nla_put_failure;
678 
679 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
680 		goto nla_put_failure;
681 
682 	nlmsg_end(dump->skb, nlh);
683 
684 skip:
685 	dump->idx++;
686 	return 0;
687 
688 nla_put_failure:
689 	nlmsg_cancel(dump->skb, nlh);
690 	return -EMSGSIZE;
691 }
692 EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
693 
694 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
695 			    struct ocelot_mact_entry *entry)
696 {
697 	u32 val, dst, macl, mach;
698 	char mac[ETH_ALEN];
699 
700 	/* Set row and column to read from */
701 	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
702 	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
703 
704 	/* Issue a read command */
705 	ocelot_write(ocelot,
706 		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
707 		     ANA_TABLES_MACACCESS);
708 
709 	if (ocelot_mact_wait_for_completion(ocelot))
710 		return -ETIMEDOUT;
711 
712 	/* Read the entry flags */
713 	val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
714 	if (!(val & ANA_TABLES_MACACCESS_VALID))
715 		return -EINVAL;
716 
717 	/* If the entry read has another port configured as its destination,
718 	 * do not report it.
719 	 */
720 	dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
721 	if (dst != port)
722 		return -EINVAL;
723 
724 	/* Get the entry's MAC address and VLAN id */
725 	macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
726 	mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
727 
728 	mac[0] = (mach >> 8)  & 0xff;
729 	mac[1] = (mach >> 0)  & 0xff;
730 	mac[2] = (macl >> 24) & 0xff;
731 	mac[3] = (macl >> 16) & 0xff;
732 	mac[4] = (macl >> 8)  & 0xff;
733 	mac[5] = (macl >> 0)  & 0xff;
734 
735 	entry->vid = (mach >> 16) & 0xfff;
736 	ether_addr_copy(entry->mac, mac);
737 
738 	return 0;
739 }
740 
741 int ocelot_fdb_dump(struct ocelot *ocelot, int port,
742 		    dsa_fdb_dump_cb_t *cb, void *data)
743 {
744 	int i, j;
745 
746 	/* Loop through all the mac tables entries. */
747 	for (i = 0; i < ocelot->num_mact_rows; i++) {
748 		for (j = 0; j < 4; j++) {
749 			struct ocelot_mact_entry entry;
750 			bool is_static;
751 			int ret;
752 
753 			ret = ocelot_mact_read(ocelot, port, i, j, &entry);
754 			/* If the entry is invalid (wrong port, invalid...),
755 			 * skip it.
756 			 */
757 			if (ret == -EINVAL)
758 				continue;
759 			else if (ret)
760 				return ret;
761 
762 			is_static = (entry.type == ENTRYTYPE_LOCKED);
763 
764 			ret = cb(entry.mac, entry.vid, is_static, data);
765 			if (ret)
766 				return ret;
767 		}
768 	}
769 
770 	return 0;
771 }
772 EXPORT_SYMBOL(ocelot_fdb_dump);
773 
774 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
775 {
776 	return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
777 			    sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
778 }
779 EXPORT_SYMBOL(ocelot_hwstamp_get);
780 
781 int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
782 {
783 	struct ocelot_port *ocelot_port = ocelot->ports[port];
784 	struct hwtstamp_config cfg;
785 
786 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
787 		return -EFAULT;
788 
789 	/* reserved for future extensions */
790 	if (cfg.flags)
791 		return -EINVAL;
792 
793 	/* Tx type sanity check */
794 	switch (cfg.tx_type) {
795 	case HWTSTAMP_TX_ON:
796 		ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
797 		break;
798 	case HWTSTAMP_TX_ONESTEP_SYNC:
799 		/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
800 		 * need to update the origin time.
801 		 */
802 		ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
803 		break;
804 	case HWTSTAMP_TX_OFF:
805 		ocelot_port->ptp_cmd = 0;
806 		break;
807 	default:
808 		return -ERANGE;
809 	}
810 
811 	mutex_lock(&ocelot->ptp_lock);
812 
813 	switch (cfg.rx_filter) {
814 	case HWTSTAMP_FILTER_NONE:
815 		break;
816 	case HWTSTAMP_FILTER_ALL:
817 	case HWTSTAMP_FILTER_SOME:
818 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
819 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
820 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
821 	case HWTSTAMP_FILTER_NTP_ALL:
822 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
823 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
824 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
825 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
826 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
827 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
828 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
829 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
830 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
831 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
832 		break;
833 	default:
834 		mutex_unlock(&ocelot->ptp_lock);
835 		return -ERANGE;
836 	}
837 
838 	/* Commit back the result & save it */
839 	memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
840 	mutex_unlock(&ocelot->ptp_lock);
841 
842 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
843 }
844 EXPORT_SYMBOL(ocelot_hwstamp_set);
845 
846 void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
847 {
848 	int i;
849 
850 	if (sset != ETH_SS_STATS)
851 		return;
852 
853 	for (i = 0; i < ocelot->num_stats; i++)
854 		memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
855 		       ETH_GSTRING_LEN);
856 }
857 EXPORT_SYMBOL(ocelot_get_strings);
858 
859 static void ocelot_update_stats(struct ocelot *ocelot)
860 {
861 	int i, j;
862 
863 	mutex_lock(&ocelot->stats_lock);
864 
865 	for (i = 0; i < ocelot->num_phys_ports; i++) {
866 		/* Configure the port to read the stats from */
867 		ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
868 
869 		for (j = 0; j < ocelot->num_stats; j++) {
870 			u32 val;
871 			unsigned int idx = i * ocelot->num_stats + j;
872 
873 			val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
874 					      ocelot->stats_layout[j].offset);
875 
876 			if (val < (ocelot->stats[idx] & U32_MAX))
877 				ocelot->stats[idx] += (u64)1 << 32;
878 
879 			ocelot->stats[idx] = (ocelot->stats[idx] &
880 					      ~(u64)U32_MAX) + val;
881 		}
882 	}
883 
884 	mutex_unlock(&ocelot->stats_lock);
885 }
886 
887 static void ocelot_check_stats_work(struct work_struct *work)
888 {
889 	struct delayed_work *del_work = to_delayed_work(work);
890 	struct ocelot *ocelot = container_of(del_work, struct ocelot,
891 					     stats_work);
892 
893 	ocelot_update_stats(ocelot);
894 
895 	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
896 			   OCELOT_STATS_CHECK_DELAY);
897 }
898 
899 void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
900 {
901 	int i;
902 
903 	/* check and update now */
904 	ocelot_update_stats(ocelot);
905 
906 	/* Copy all counters */
907 	for (i = 0; i < ocelot->num_stats; i++)
908 		*data++ = ocelot->stats[port * ocelot->num_stats + i];
909 }
910 EXPORT_SYMBOL(ocelot_get_ethtool_stats);
911 
912 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
913 {
914 	if (sset != ETH_SS_STATS)
915 		return -EOPNOTSUPP;
916 
917 	return ocelot->num_stats;
918 }
919 EXPORT_SYMBOL(ocelot_get_sset_count);
920 
921 int ocelot_get_ts_info(struct ocelot *ocelot, int port,
922 		       struct ethtool_ts_info *info)
923 {
924 	info->phc_index = ocelot->ptp_clock ?
925 			  ptp_clock_index(ocelot->ptp_clock) : -1;
926 	if (info->phc_index == -1) {
927 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
928 					 SOF_TIMESTAMPING_RX_SOFTWARE |
929 					 SOF_TIMESTAMPING_SOFTWARE;
930 		return 0;
931 	}
932 	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
933 				 SOF_TIMESTAMPING_RX_SOFTWARE |
934 				 SOF_TIMESTAMPING_SOFTWARE |
935 				 SOF_TIMESTAMPING_TX_HARDWARE |
936 				 SOF_TIMESTAMPING_RX_HARDWARE |
937 				 SOF_TIMESTAMPING_RAW_HARDWARE;
938 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
939 			 BIT(HWTSTAMP_TX_ONESTEP_SYNC);
940 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
941 
942 	return 0;
943 }
944 EXPORT_SYMBOL(ocelot_get_ts_info);
945 
946 static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
947 				bool only_active_ports)
948 {
949 	u32 mask = 0;
950 	int port;
951 
952 	for (port = 0; port < ocelot->num_phys_ports; port++) {
953 		struct ocelot_port *ocelot_port = ocelot->ports[port];
954 
955 		if (!ocelot_port)
956 			continue;
957 
958 		if (ocelot_port->bond == bond) {
959 			if (only_active_ports && !ocelot_port->lag_tx_active)
960 				continue;
961 
962 			mask |= BIT(port);
963 		}
964 	}
965 
966 	return mask;
967 }
968 
969 static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
970 {
971 	u32 mask = 0;
972 	int port;
973 
974 	for (port = 0; port < ocelot->num_phys_ports; port++) {
975 		struct ocelot_port *ocelot_port = ocelot->ports[port];
976 
977 		if (!ocelot_port)
978 			continue;
979 
980 		if (ocelot_port->is_dsa_8021q_cpu)
981 			mask |= BIT(port);
982 	}
983 
984 	return mask;
985 }
986 
987 void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
988 {
989 	unsigned long cpu_fwd_mask;
990 	int port;
991 
992 	/* If a DSA tag_8021q CPU exists, it needs to be included in the
993 	 * regular forwarding path of the front ports regardless of whether
994 	 * those are bridged or standalone.
995 	 * If DSA tag_8021q is not used, this returns 0, which is fine because
996 	 * the hardware-based CPU port module can be a destination for packets
997 	 * even if it isn't part of PGID_SRC.
998 	 */
999 	cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
1000 
1001 	/* Apply FWD mask. The loop is needed to add/remove the current port as
1002 	 * a source for the other ports.
1003 	 */
1004 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1005 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1006 		unsigned long mask;
1007 
1008 		if (!ocelot_port) {
1009 			/* Unused ports can't send anywhere */
1010 			mask = 0;
1011 		} else if (ocelot_port->is_dsa_8021q_cpu) {
1012 			/* The DSA tag_8021q CPU ports need to be able to
1013 			 * forward packets to all other ports except for
1014 			 * themselves
1015 			 */
1016 			mask = GENMASK(ocelot->num_phys_ports - 1, 0);
1017 			mask &= ~cpu_fwd_mask;
1018 		} else if (ocelot->bridge_fwd_mask & BIT(port)) {
1019 			struct net_device *bond = ocelot_port->bond;
1020 
1021 			mask = ocelot->bridge_fwd_mask & ~BIT(port);
1022 			if (bond) {
1023 				mask &= ~ocelot_get_bond_mask(ocelot, bond,
1024 							      false);
1025 			}
1026 		} else {
1027 			/* Standalone ports forward only to DSA tag_8021q CPU
1028 			 * ports (if those exist), or to the hardware CPU port
1029 			 * module otherwise.
1030 			 */
1031 			mask = cpu_fwd_mask;
1032 		}
1033 
1034 		ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
1035 	}
1036 }
1037 EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
1038 
1039 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
1040 {
1041 	u32 port_cfg;
1042 
1043 	if (!(BIT(port) & ocelot->bridge_mask))
1044 		return;
1045 
1046 	port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
1047 
1048 	switch (state) {
1049 	case BR_STATE_FORWARDING:
1050 		ocelot->bridge_fwd_mask |= BIT(port);
1051 		fallthrough;
1052 	case BR_STATE_LEARNING:
1053 		port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
1054 		break;
1055 
1056 	default:
1057 		port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
1058 		ocelot->bridge_fwd_mask &= ~BIT(port);
1059 		break;
1060 	}
1061 
1062 	ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
1063 
1064 	ocelot_apply_bridge_fwd_mask(ocelot);
1065 }
1066 EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
1067 
1068 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
1069 {
1070 	unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
1071 
1072 	/* Setting AGE_PERIOD to zero effectively disables automatic aging,
1073 	 * which is clearly not what our intention is. So avoid that.
1074 	 */
1075 	if (!age_period)
1076 		age_period = 1;
1077 
1078 	ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
1079 }
1080 EXPORT_SYMBOL(ocelot_set_ageing_time);
1081 
1082 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
1083 						     const unsigned char *addr,
1084 						     u16 vid)
1085 {
1086 	struct ocelot_multicast *mc;
1087 
1088 	list_for_each_entry(mc, &ocelot->multicast, list) {
1089 		if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
1090 			return mc;
1091 	}
1092 
1093 	return NULL;
1094 }
1095 
1096 static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
1097 {
1098 	if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
1099 		return ENTRYTYPE_MACv4;
1100 	if (addr[0] == 0x33 && addr[1] == 0x33)
1101 		return ENTRYTYPE_MACv6;
1102 	return ENTRYTYPE_LOCKED;
1103 }
1104 
1105 static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
1106 					     unsigned long ports)
1107 {
1108 	struct ocelot_pgid *pgid;
1109 
1110 	pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
1111 	if (!pgid)
1112 		return ERR_PTR(-ENOMEM);
1113 
1114 	pgid->ports = ports;
1115 	pgid->index = index;
1116 	refcount_set(&pgid->refcount, 1);
1117 	list_add_tail(&pgid->list, &ocelot->pgids);
1118 
1119 	return pgid;
1120 }
1121 
1122 static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
1123 {
1124 	if (!refcount_dec_and_test(&pgid->refcount))
1125 		return;
1126 
1127 	list_del(&pgid->list);
1128 	kfree(pgid);
1129 }
1130 
1131 static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
1132 					       const struct ocelot_multicast *mc)
1133 {
1134 	struct ocelot_pgid *pgid;
1135 	int index;
1136 
1137 	/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
1138 	 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
1139 	 * destination mask table (PGID), the destination set is programmed as
1140 	 * part of the entry MAC address.", and the DEST_IDX is set to 0.
1141 	 */
1142 	if (mc->entry_type == ENTRYTYPE_MACv4 ||
1143 	    mc->entry_type == ENTRYTYPE_MACv6)
1144 		return ocelot_pgid_alloc(ocelot, 0, mc->ports);
1145 
1146 	list_for_each_entry(pgid, &ocelot->pgids, list) {
1147 		/* When searching for a nonreserved multicast PGID, ignore the
1148 		 * dummy PGID of zero that we have for MACv4/MACv6 entries
1149 		 */
1150 		if (pgid->index && pgid->ports == mc->ports) {
1151 			refcount_inc(&pgid->refcount);
1152 			return pgid;
1153 		}
1154 	}
1155 
1156 	/* Search for a free index in the nonreserved multicast PGID area */
1157 	for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
1158 		bool used = false;
1159 
1160 		list_for_each_entry(pgid, &ocelot->pgids, list) {
1161 			if (pgid->index == index) {
1162 				used = true;
1163 				break;
1164 			}
1165 		}
1166 
1167 		if (!used)
1168 			return ocelot_pgid_alloc(ocelot, index, mc->ports);
1169 	}
1170 
1171 	return ERR_PTR(-ENOSPC);
1172 }
1173 
1174 static void ocelot_encode_ports_to_mdb(unsigned char *addr,
1175 				       struct ocelot_multicast *mc)
1176 {
1177 	ether_addr_copy(addr, mc->addr);
1178 
1179 	if (mc->entry_type == ENTRYTYPE_MACv4) {
1180 		addr[0] = 0;
1181 		addr[1] = mc->ports >> 8;
1182 		addr[2] = mc->ports & 0xff;
1183 	} else if (mc->entry_type == ENTRYTYPE_MACv6) {
1184 		addr[0] = mc->ports >> 8;
1185 		addr[1] = mc->ports & 0xff;
1186 	}
1187 }
1188 
1189 int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
1190 			const struct switchdev_obj_port_mdb *mdb)
1191 {
1192 	unsigned char addr[ETH_ALEN];
1193 	struct ocelot_multicast *mc;
1194 	struct ocelot_pgid *pgid;
1195 	u16 vid = mdb->vid;
1196 
1197 	if (port == ocelot->npi)
1198 		port = ocelot->num_phys_ports;
1199 
1200 	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1201 	if (!mc) {
1202 		/* New entry */
1203 		mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
1204 		if (!mc)
1205 			return -ENOMEM;
1206 
1207 		mc->entry_type = ocelot_classify_mdb(mdb->addr);
1208 		ether_addr_copy(mc->addr, mdb->addr);
1209 		mc->vid = vid;
1210 
1211 		list_add_tail(&mc->list, &ocelot->multicast);
1212 	} else {
1213 		/* Existing entry. Clean up the current port mask from
1214 		 * hardware now, because we'll be modifying it.
1215 		 */
1216 		ocelot_pgid_free(ocelot, mc->pgid);
1217 		ocelot_encode_ports_to_mdb(addr, mc);
1218 		ocelot_mact_forget(ocelot, addr, vid);
1219 	}
1220 
1221 	mc->ports |= BIT(port);
1222 
1223 	pgid = ocelot_mdb_get_pgid(ocelot, mc);
1224 	if (IS_ERR(pgid)) {
1225 		dev_err(ocelot->dev,
1226 			"Cannot allocate PGID for mdb %pM vid %d\n",
1227 			mc->addr, mc->vid);
1228 		devm_kfree(ocelot->dev, mc);
1229 		return PTR_ERR(pgid);
1230 	}
1231 	mc->pgid = pgid;
1232 
1233 	ocelot_encode_ports_to_mdb(addr, mc);
1234 
1235 	if (mc->entry_type != ENTRYTYPE_MACv4 &&
1236 	    mc->entry_type != ENTRYTYPE_MACv6)
1237 		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1238 				 pgid->index);
1239 
1240 	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1241 				 mc->entry_type);
1242 }
1243 EXPORT_SYMBOL(ocelot_port_mdb_add);
1244 
1245 int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
1246 			const struct switchdev_obj_port_mdb *mdb)
1247 {
1248 	unsigned char addr[ETH_ALEN];
1249 	struct ocelot_multicast *mc;
1250 	struct ocelot_pgid *pgid;
1251 	u16 vid = mdb->vid;
1252 
1253 	if (port == ocelot->npi)
1254 		port = ocelot->num_phys_ports;
1255 
1256 	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1257 	if (!mc)
1258 		return -ENOENT;
1259 
1260 	ocelot_encode_ports_to_mdb(addr, mc);
1261 	ocelot_mact_forget(ocelot, addr, vid);
1262 
1263 	ocelot_pgid_free(ocelot, mc->pgid);
1264 	mc->ports &= ~BIT(port);
1265 	if (!mc->ports) {
1266 		list_del(&mc->list);
1267 		devm_kfree(ocelot->dev, mc);
1268 		return 0;
1269 	}
1270 
1271 	/* We have a PGID with fewer ports now */
1272 	pgid = ocelot_mdb_get_pgid(ocelot, mc);
1273 	if (IS_ERR(pgid))
1274 		return PTR_ERR(pgid);
1275 	mc->pgid = pgid;
1276 
1277 	ocelot_encode_ports_to_mdb(addr, mc);
1278 
1279 	if (mc->entry_type != ENTRYTYPE_MACv4 &&
1280 	    mc->entry_type != ENTRYTYPE_MACv6)
1281 		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1282 				 pgid->index);
1283 
1284 	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1285 				 mc->entry_type);
1286 }
1287 EXPORT_SYMBOL(ocelot_port_mdb_del);
1288 
1289 int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
1290 			    struct net_device *bridge)
1291 {
1292 	if (!ocelot->bridge_mask) {
1293 		ocelot->hw_bridge_dev = bridge;
1294 	} else {
1295 		if (ocelot->hw_bridge_dev != bridge)
1296 			/* This is adding the port to a second bridge, this is
1297 			 * unsupported */
1298 			return -ENODEV;
1299 	}
1300 
1301 	ocelot->bridge_mask |= BIT(port);
1302 
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL(ocelot_port_bridge_join);
1306 
1307 int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
1308 			     struct net_device *bridge)
1309 {
1310 	struct ocelot_vlan pvid = {0}, native_vlan = {0};
1311 	int ret;
1312 
1313 	ocelot->bridge_mask &= ~BIT(port);
1314 
1315 	if (!ocelot->bridge_mask)
1316 		ocelot->hw_bridge_dev = NULL;
1317 
1318 	ret = ocelot_port_vlan_filtering(ocelot, port, false);
1319 	if (ret)
1320 		return ret;
1321 
1322 	ocelot_port_set_pvid(ocelot, port, pvid);
1323 	ocelot_port_set_native_vlan(ocelot, port, native_vlan);
1324 
1325 	return 0;
1326 }
1327 EXPORT_SYMBOL(ocelot_port_bridge_leave);
1328 
1329 static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
1330 {
1331 	unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
1332 	int i, port, lag;
1333 
1334 	/* Reset destination and aggregation PGIDS */
1335 	for_each_unicast_dest_pgid(ocelot, port)
1336 		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
1337 
1338 	for_each_aggr_pgid(ocelot, i)
1339 		ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
1340 				 ANA_PGID_PGID, i);
1341 
1342 	/* The visited ports bitmask holds the list of ports offloading any
1343 	 * bonding interface. Initially we mark all these ports as unvisited,
1344 	 * then every time we visit a port in this bitmask, we know that it is
1345 	 * the lowest numbered port, i.e. the one whose logical ID == physical
1346 	 * port ID == LAG ID. So we mark as visited all further ports in the
1347 	 * bitmask that are offloading the same bonding interface. This way,
1348 	 * we set up the aggregation PGIDs only once per bonding interface.
1349 	 */
1350 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1351 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1352 
1353 		if (!ocelot_port || !ocelot_port->bond)
1354 			continue;
1355 
1356 		visited &= ~BIT(port);
1357 	}
1358 
1359 	/* Now, set PGIDs for each active LAG */
1360 	for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
1361 		struct net_device *bond = ocelot->ports[lag]->bond;
1362 		int num_active_ports = 0;
1363 		unsigned long bond_mask;
1364 		u8 aggr_idx[16];
1365 
1366 		if (!bond || (visited & BIT(lag)))
1367 			continue;
1368 
1369 		bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
1370 
1371 		for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
1372 			// Destination mask
1373 			ocelot_write_rix(ocelot, bond_mask,
1374 					 ANA_PGID_PGID, port);
1375 			aggr_idx[num_active_ports++] = port;
1376 		}
1377 
1378 		for_each_aggr_pgid(ocelot, i) {
1379 			u32 ac;
1380 
1381 			ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
1382 			ac &= ~bond_mask;
1383 			/* Don't do division by zero if there was no active
1384 			 * port. Just make all aggregation codes zero.
1385 			 */
1386 			if (num_active_ports)
1387 				ac |= BIT(aggr_idx[i % num_active_ports]);
1388 			ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
1389 		}
1390 
1391 		/* Mark all ports in the same LAG as visited to avoid applying
1392 		 * the same config again.
1393 		 */
1394 		for (port = lag; port < ocelot->num_phys_ports; port++) {
1395 			struct ocelot_port *ocelot_port = ocelot->ports[port];
1396 
1397 			if (!ocelot_port)
1398 				continue;
1399 
1400 			if (ocelot_port->bond == bond)
1401 				visited |= BIT(port);
1402 		}
1403 	}
1404 }
1405 
1406 /* When offloading a bonding interface, the switch ports configured under the
1407  * same bond must have the same logical port ID, equal to the physical port ID
1408  * of the lowest numbered physical port in that bond. Otherwise, in standalone/
1409  * bridged mode, each port has a logical port ID equal to its physical port ID.
1410  */
1411 static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
1412 {
1413 	int port;
1414 
1415 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1416 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1417 		struct net_device *bond;
1418 
1419 		if (!ocelot_port)
1420 			continue;
1421 
1422 		bond = ocelot_port->bond;
1423 		if (bond) {
1424 			int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
1425 							     false));
1426 
1427 			ocelot_rmw_gix(ocelot,
1428 				       ANA_PORT_PORT_CFG_PORTID_VAL(lag),
1429 				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
1430 				       ANA_PORT_PORT_CFG, port);
1431 		} else {
1432 			ocelot_rmw_gix(ocelot,
1433 				       ANA_PORT_PORT_CFG_PORTID_VAL(port),
1434 				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
1435 				       ANA_PORT_PORT_CFG, port);
1436 		}
1437 	}
1438 }
1439 
1440 int ocelot_port_lag_join(struct ocelot *ocelot, int port,
1441 			 struct net_device *bond,
1442 			 struct netdev_lag_upper_info *info)
1443 {
1444 	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
1445 		return -EOPNOTSUPP;
1446 
1447 	ocelot->ports[port]->bond = bond;
1448 
1449 	ocelot_setup_logical_port_ids(ocelot);
1450 	ocelot_apply_bridge_fwd_mask(ocelot);
1451 	ocelot_set_aggr_pgids(ocelot);
1452 
1453 	return 0;
1454 }
1455 EXPORT_SYMBOL(ocelot_port_lag_join);
1456 
1457 void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
1458 			   struct net_device *bond)
1459 {
1460 	ocelot->ports[port]->bond = NULL;
1461 
1462 	ocelot_setup_logical_port_ids(ocelot);
1463 	ocelot_apply_bridge_fwd_mask(ocelot);
1464 	ocelot_set_aggr_pgids(ocelot);
1465 }
1466 EXPORT_SYMBOL(ocelot_port_lag_leave);
1467 
1468 void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
1469 {
1470 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1471 
1472 	ocelot_port->lag_tx_active = lag_tx_active;
1473 
1474 	/* Rebalance the LAGs */
1475 	ocelot_set_aggr_pgids(ocelot);
1476 }
1477 EXPORT_SYMBOL(ocelot_port_lag_change);
1478 
1479 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
1480  * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
1481  * In the special case that it's the NPI port that we're configuring, the
1482  * length of the tag and optional prefix needs to be accounted for privately,
1483  * in order to be able to sustain communication at the requested @sdu.
1484  */
1485 void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
1486 {
1487 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1488 	int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
1489 	int pause_start, pause_stop;
1490 	int atop, atop_tot;
1491 
1492 	if (port == ocelot->npi) {
1493 		maxlen += OCELOT_TAG_LEN;
1494 
1495 		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
1496 			maxlen += OCELOT_SHORT_PREFIX_LEN;
1497 		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
1498 			maxlen += OCELOT_LONG_PREFIX_LEN;
1499 	}
1500 
1501 	ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
1502 
1503 	/* Set Pause watermark hysteresis */
1504 	pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
1505 	pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
1506 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
1507 			    pause_start);
1508 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
1509 			    pause_stop);
1510 
1511 	/* Tail dropping watermarks */
1512 	atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
1513 		   OCELOT_BUFFER_CELL_SZ;
1514 	atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
1515 	ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
1516 	ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
1517 }
1518 EXPORT_SYMBOL(ocelot_port_set_maxlen);
1519 
1520 int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
1521 {
1522 	int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
1523 
1524 	if (port == ocelot->npi) {
1525 		max_mtu -= OCELOT_TAG_LEN;
1526 
1527 		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
1528 			max_mtu -= OCELOT_SHORT_PREFIX_LEN;
1529 		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
1530 			max_mtu -= OCELOT_LONG_PREFIX_LEN;
1531 	}
1532 
1533 	return max_mtu;
1534 }
1535 EXPORT_SYMBOL(ocelot_get_max_mtu);
1536 
1537 void ocelot_init_port(struct ocelot *ocelot, int port)
1538 {
1539 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1540 
1541 	skb_queue_head_init(&ocelot_port->tx_skbs);
1542 	spin_lock_init(&ocelot_port->ts_id_lock);
1543 
1544 	/* Basic L2 initialization */
1545 
1546 	/* Set MAC IFG Gaps
1547 	 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
1548 	 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
1549 	 */
1550 	ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
1551 			   DEV_MAC_IFG_CFG);
1552 
1553 	/* Load seed (0) and set MAC HDX late collision  */
1554 	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
1555 			   DEV_MAC_HDX_CFG_SEED_LOAD,
1556 			   DEV_MAC_HDX_CFG);
1557 	mdelay(1);
1558 	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
1559 			   DEV_MAC_HDX_CFG);
1560 
1561 	/* Set Max Length and maximum tags allowed */
1562 	ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
1563 	ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
1564 			   DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
1565 			   DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
1566 			   DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
1567 			   DEV_MAC_TAGS_CFG);
1568 
1569 	/* Set SMAC of Pause frame (00:00:00:00:00:00) */
1570 	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
1571 	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
1572 
1573 	/* Enable transmission of pause frames */
1574 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
1575 
1576 	/* Drop frames with multicast source address */
1577 	ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
1578 		       ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
1579 		       ANA_PORT_DROP_CFG, port);
1580 
1581 	/* Set default VLAN and tag type to 8021Q. */
1582 	ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
1583 		       REW_PORT_VLAN_CFG_PORT_TPID_M,
1584 		       REW_PORT_VLAN_CFG, port);
1585 
1586 	/* Enable vcap lookups */
1587 	ocelot_vcap_enable(ocelot, port);
1588 }
1589 EXPORT_SYMBOL(ocelot_init_port);
1590 
1591 /* Configure and enable the CPU port module, which is a set of queues
1592  * accessible through register MMIO, frame DMA or Ethernet (in case
1593  * NPI mode is used).
1594  */
1595 static void ocelot_cpu_port_init(struct ocelot *ocelot)
1596 {
1597 	int cpu = ocelot->num_phys_ports;
1598 
1599 	/* The unicast destination PGID for the CPU port module is unused */
1600 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
1601 	/* Instead set up a multicast destination PGID for traffic copied to
1602 	 * the CPU. Whitelisted MAC addresses like the port netdevice MAC
1603 	 * addresses will be copied to the CPU via this PGID.
1604 	 */
1605 	ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
1606 	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
1607 			 ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
1608 			 ANA_PORT_PORT_CFG, cpu);
1609 
1610 	/* Enable CPU port module */
1611 	ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
1612 	/* CPU port Injection/Extraction configuration */
1613 	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
1614 			    OCELOT_TAG_PREFIX_NONE);
1615 	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
1616 			    OCELOT_TAG_PREFIX_NONE);
1617 
1618 	/* Configure the CPU port to be VLAN aware */
1619 	ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
1620 				 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
1621 				 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
1622 			 ANA_PORT_VLAN_CFG, cpu);
1623 }
1624 
1625 static void ocelot_detect_features(struct ocelot *ocelot)
1626 {
1627 	int mmgt, eq_ctrl;
1628 
1629 	/* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
1630 	 * the number of 240-byte free memory words (aka 4-cell chunks) and not
1631 	 * 192 bytes as the documentation incorrectly says.
1632 	 */
1633 	mmgt = ocelot_read(ocelot, SYS_MMGT);
1634 	ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
1635 
1636 	eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
1637 	ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
1638 }
1639 
1640 int ocelot_init(struct ocelot *ocelot)
1641 {
1642 	char queue_name[32];
1643 	int i, ret;
1644 	u32 port;
1645 
1646 	if (ocelot->ops->reset) {
1647 		ret = ocelot->ops->reset(ocelot);
1648 		if (ret) {
1649 			dev_err(ocelot->dev, "Switch reset failed\n");
1650 			return ret;
1651 		}
1652 	}
1653 
1654 	ocelot->stats = devm_kcalloc(ocelot->dev,
1655 				     ocelot->num_phys_ports * ocelot->num_stats,
1656 				     sizeof(u64), GFP_KERNEL);
1657 	if (!ocelot->stats)
1658 		return -ENOMEM;
1659 
1660 	mutex_init(&ocelot->stats_lock);
1661 	mutex_init(&ocelot->ptp_lock);
1662 	spin_lock_init(&ocelot->ptp_clock_lock);
1663 	snprintf(queue_name, sizeof(queue_name), "%s-stats",
1664 		 dev_name(ocelot->dev));
1665 	ocelot->stats_queue = create_singlethread_workqueue(queue_name);
1666 	if (!ocelot->stats_queue)
1667 		return -ENOMEM;
1668 
1669 	ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
1670 	if (!ocelot->owq) {
1671 		destroy_workqueue(ocelot->stats_queue);
1672 		return -ENOMEM;
1673 	}
1674 
1675 	INIT_LIST_HEAD(&ocelot->multicast);
1676 	INIT_LIST_HEAD(&ocelot->pgids);
1677 	ocelot_detect_features(ocelot);
1678 	ocelot_mact_init(ocelot);
1679 	ocelot_vlan_init(ocelot);
1680 	ocelot_vcap_init(ocelot);
1681 	ocelot_cpu_port_init(ocelot);
1682 
1683 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1684 		/* Clear all counters (5 groups) */
1685 		ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
1686 				     SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
1687 			     SYS_STAT_CFG);
1688 	}
1689 
1690 	/* Only use S-Tag */
1691 	ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
1692 
1693 	/* Aggregation mode */
1694 	ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
1695 			     ANA_AGGR_CFG_AC_DMAC_ENA |
1696 			     ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
1697 			     ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
1698 			     ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
1699 			     ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
1700 			     ANA_AGGR_CFG);
1701 
1702 	/* Set MAC age time to default value. The entry is aged after
1703 	 * 2*AGE_PERIOD
1704 	 */
1705 	ocelot_write(ocelot,
1706 		     ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
1707 		     ANA_AUTOAGE);
1708 
1709 	/* Disable learning for frames discarded by VLAN ingress filtering */
1710 	regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
1711 
1712 	/* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
1713 	ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
1714 		     SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
1715 
1716 	/* Setup flooding PGIDs */
1717 	for (i = 0; i < ocelot->num_flooding_pgids; i++)
1718 		ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
1719 				 ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
1720 				 ANA_FLOODING_FLD_UNICAST(PGID_UC),
1721 				 ANA_FLOODING, i);
1722 	ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
1723 		     ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
1724 		     ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
1725 		     ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
1726 		     ANA_FLOODING_IPMC);
1727 
1728 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1729 		/* Transmit the frame to the local port. */
1730 		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
1731 		/* Do not forward BPDU frames to the front ports. */
1732 		ocelot_write_gix(ocelot,
1733 				 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
1734 				 ANA_PORT_CPU_FWD_BPDU_CFG,
1735 				 port);
1736 		/* Ensure bridging is disabled */
1737 		ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
1738 	}
1739 
1740 	/* Allow broadcast MAC frames. */
1741 	for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
1742 		u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
1743 
1744 		ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
1745 	}
1746 	ocelot_write_rix(ocelot,
1747 			 ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
1748 			 ANA_PGID_PGID, PGID_MC);
1749 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
1750 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
1751 
1752 	/* Allow manual injection via DEVCPU_QS registers, and byte swap these
1753 	 * registers endianness.
1754 	 */
1755 	ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
1756 			 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
1757 	ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
1758 			 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
1759 	ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
1760 		     ANA_CPUQ_CFG_CPUQ_LRN(2) |
1761 		     ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
1762 		     ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
1763 		     ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
1764 		     ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
1765 		     ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
1766 		     ANA_CPUQ_CFG_CPUQ_IGMP(6) |
1767 		     ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
1768 	for (i = 0; i < 16; i++)
1769 		ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
1770 				 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
1771 				 ANA_CPUQ_8021_CFG, i);
1772 
1773 	INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
1774 	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
1775 			   OCELOT_STATS_CHECK_DELAY);
1776 
1777 	return 0;
1778 }
1779 EXPORT_SYMBOL(ocelot_init);
1780 
1781 void ocelot_deinit(struct ocelot *ocelot)
1782 {
1783 	cancel_delayed_work(&ocelot->stats_work);
1784 	destroy_workqueue(ocelot->stats_queue);
1785 	destroy_workqueue(ocelot->owq);
1786 	mutex_destroy(&ocelot->stats_lock);
1787 }
1788 EXPORT_SYMBOL(ocelot_deinit);
1789 
1790 void ocelot_deinit_port(struct ocelot *ocelot, int port)
1791 {
1792 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1793 
1794 	skb_queue_purge(&ocelot_port->tx_skbs);
1795 }
1796 EXPORT_SYMBOL(ocelot_deinit_port);
1797 
1798 MODULE_LICENSE("Dual MIT/GPL");
1799