1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/switchdev.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 
14 /* Commands for Mac Table Command register */
15 #define MAC_CMD_LEARN         0 /* Insert (Learn) 1 entry */
16 #define MAC_CMD_UNLEARN       1 /* Unlearn (Forget) 1 entry */
17 #define MAC_CMD_LOOKUP        2 /* Look up 1 entry */
18 #define MAC_CMD_READ          3 /* Read entry at Mac Table Index */
19 #define MAC_CMD_WRITE         4 /* Write entry at Mac Table Index */
20 #define MAC_CMD_SCAN          5 /* Scan (Age or find next) */
21 #define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
22 #define MAC_CMD_CLEAR_ALL     7 /* Delete all entries in table */
23 
24 /* Commands for MAC_ENTRY_ADDR_TYPE */
25 #define  MAC_ENTRY_ADDR_TYPE_UPSID_PN         0
26 #define  MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
27 #define  MAC_ENTRY_ADDR_TYPE_GLAG             2
28 #define  MAC_ENTRY_ADDR_TYPE_MC_IDX           3
29 
30 #define TABLE_UPDATE_SLEEP_US 10
31 #define TABLE_UPDATE_TIMEOUT_US 100000
32 
33 struct sparx5_mact_entry {
34 	struct list_head list;
35 	unsigned char mac[ETH_ALEN];
36 	u32 flags;
37 #define MAC_ENT_ALIVE	BIT(0)
38 #define MAC_ENT_MOVED	BIT(1)
39 #define MAC_ENT_LOCK	BIT(2)
40 	u16 vid;
41 	u16 port;
42 };
43 
44 static int sparx5_mact_get_status(struct sparx5 *sparx5)
45 {
46 	return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
47 }
48 
49 static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
50 {
51 	u32 val;
52 
53 	return readx_poll_timeout(sparx5_mact_get_status,
54 		sparx5, val,
55 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
56 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
57 }
58 
59 static void sparx5_mact_select(struct sparx5 *sparx5,
60 			       const unsigned char mac[ETH_ALEN],
61 			       u16 vid)
62 {
63 	u32 macl = 0, mach = 0;
64 
65 	/* Set the MAC address to handle and the vlan associated in a format
66 	 * understood by the hardware.
67 	 */
68 	mach |= vid    << 16;
69 	mach |= mac[0] << 8;
70 	mach |= mac[1] << 0;
71 	macl |= mac[2] << 24;
72 	macl |= mac[3] << 16;
73 	macl |= mac[4] << 8;
74 	macl |= mac[5] << 0;
75 
76 	spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
77 	spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
78 }
79 
80 int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
81 		      const unsigned char mac[ETH_ALEN], u16 vid)
82 {
83 	int addr, type, ret;
84 
85 	if (pgid < SPX5_PORTS) {
86 		type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
87 		addr = pgid % 32;
88 		addr += (pgid / 32) << 5; /* Add upsid */
89 	} else {
90 		type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
91 		addr = pgid - SPX5_PORTS;
92 	}
93 
94 	mutex_lock(&sparx5->lock);
95 
96 	sparx5_mact_select(sparx5, mac, vid);
97 
98 	/* MAC entry properties */
99 	spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
100 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
101 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
102 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
103 		sparx5, LRN_MAC_ACCESS_CFG_2);
104 	spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
105 
106 	/*  Insert/learn new entry */
107 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
108 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
109 		sparx5, LRN_COMMON_ACCESS_CTRL);
110 
111 	ret = sparx5_mact_wait_for_completion(sparx5);
112 
113 	mutex_unlock(&sparx5->lock);
114 
115 	return ret;
116 }
117 
118 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
119 {
120 	struct sparx5_port *port = netdev_priv(dev);
121 	struct sparx5 *sparx5 = port->sparx5;
122 
123 	return sparx5_mact_forget(sparx5, addr, port->pvid);
124 }
125 
126 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
127 {
128 	struct sparx5_port *port = netdev_priv(dev);
129 	struct sparx5 *sparx5 = port->sparx5;
130 
131 	return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
132 }
133 
134 static int sparx5_mact_get(struct sparx5 *sparx5,
135 			   unsigned char mac[ETH_ALEN],
136 			   u16 *vid, u32 *pcfg2)
137 {
138 	u32 mach, macl, cfg2;
139 	int ret = -ENOENT;
140 
141 	cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
142 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
143 		mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
144 		macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
145 		mac[0] = ((mach >> 8)  & 0xff);
146 		mac[1] = ((mach >> 0)  & 0xff);
147 		mac[2] = ((macl >> 24) & 0xff);
148 		mac[3] = ((macl >> 16) & 0xff);
149 		mac[4] = ((macl >> 8)  & 0xff);
150 		mac[5] = ((macl >> 0)  & 0xff);
151 		*vid = mach >> 16;
152 		*pcfg2 = cfg2;
153 		ret = 0;
154 	}
155 
156 	return ret;
157 }
158 
159 bool sparx5_mact_getnext(struct sparx5 *sparx5,
160 			 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
161 {
162 	u32 cfg2;
163 	int ret;
164 
165 	mutex_lock(&sparx5->lock);
166 
167 	sparx5_mact_select(sparx5, mac, *vid);
168 
169 	spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
170 		LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
171 		sparx5, LRN_SCAN_NEXT_CFG);
172 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
173 		(MAC_CMD_FIND_SMALLEST) |
174 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
175 		sparx5, LRN_COMMON_ACCESS_CTRL);
176 
177 	ret = sparx5_mact_wait_for_completion(sparx5);
178 	if (ret == 0) {
179 		ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
180 		if (ret == 0)
181 			*pcfg2 = cfg2;
182 	}
183 
184 	mutex_unlock(&sparx5->lock);
185 
186 	return ret == 0;
187 }
188 
189 static int sparx5_mact_lookup(struct sparx5 *sparx5,
190 			      const unsigned char mac[ETH_ALEN],
191 			      u16 vid)
192 {
193 	int ret;
194 
195 	mutex_lock(&sparx5->lock);
196 
197 	sparx5_mact_select(sparx5, mac, vid);
198 
199 	/* Issue a lookup command */
200 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
201 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
202 		sparx5, LRN_COMMON_ACCESS_CTRL);
203 
204 	ret = sparx5_mact_wait_for_completion(sparx5);
205 	if (ret)
206 		goto out;
207 
208 	ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
209 		(spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
210 
211 out:
212 	mutex_unlock(&sparx5->lock);
213 
214 	return ret;
215 }
216 
217 int sparx5_mact_forget(struct sparx5 *sparx5,
218 		       const unsigned char mac[ETH_ALEN], u16 vid)
219 {
220 	int ret;
221 
222 	mutex_lock(&sparx5->lock);
223 
224 	sparx5_mact_select(sparx5, mac, vid);
225 
226 	/* Issue an unlearn command */
227 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
228 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
229 		sparx5, LRN_COMMON_ACCESS_CTRL);
230 
231 	ret = sparx5_mact_wait_for_completion(sparx5);
232 
233 	mutex_unlock(&sparx5->lock);
234 
235 	return ret;
236 }
237 
238 static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
239 						  const unsigned char *mac,
240 						  u16 vid, u16 port_index)
241 {
242 	struct sparx5_mact_entry *mact_entry;
243 
244 	mact_entry = devm_kzalloc(sparx5->dev,
245 				  sizeof(*mact_entry), GFP_ATOMIC);
246 	if (!mact_entry)
247 		return NULL;
248 
249 	memcpy(mact_entry->mac, mac, ETH_ALEN);
250 	mact_entry->vid = vid;
251 	mact_entry->port = port_index;
252 	return mact_entry;
253 }
254 
255 static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
256 						 const unsigned char *mac,
257 						 u16 vid, u16 port_index)
258 {
259 	struct sparx5_mact_entry *mact_entry;
260 	struct sparx5_mact_entry *res = NULL;
261 
262 	mutex_lock(&sparx5->mact_lock);
263 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
264 		if (mact_entry->vid == vid &&
265 		    ether_addr_equal(mac, mact_entry->mac) &&
266 		    mact_entry->port == port_index) {
267 			res = mact_entry;
268 			break;
269 		}
270 	}
271 	mutex_unlock(&sparx5->mact_lock);
272 
273 	return res;
274 }
275 
276 static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
277 				      const char *mac, u16 vid,
278 				      struct net_device *dev, bool offloaded)
279 {
280 	struct switchdev_notifier_fdb_info info = {};
281 
282 	info.addr = mac;
283 	info.vid = vid;
284 	info.offloaded = offloaded;
285 	call_switchdev_notifiers(type, dev, &info.info, NULL);
286 }
287 
288 int sparx5_add_mact_entry(struct sparx5 *sparx5,
289 			  struct sparx5_port *port,
290 			  const unsigned char *addr, u16 vid)
291 {
292 	struct sparx5_mact_entry *mact_entry;
293 	int ret;
294 
295 	ret = sparx5_mact_lookup(sparx5, addr, vid);
296 	if (ret)
297 		return 0;
298 
299 	/* In case the entry already exists, don't add it again to SW,
300 	 * just update HW, but we need to look in the actual HW because
301 	 * it is possible for an entry to be learn by HW and before the
302 	 * mact thread to start the frame will reach CPU and the CPU will
303 	 * add the entry but without the extern_learn flag.
304 	 */
305 	mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
306 	if (mact_entry)
307 		goto update_hw;
308 
309 	/* Add the entry in SW MAC table not to get the notification when
310 	 * SW is pulling again
311 	 */
312 	mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
313 	if (!mact_entry)
314 		return -ENOMEM;
315 
316 	mutex_lock(&sparx5->mact_lock);
317 	list_add_tail(&mact_entry->list, &sparx5->mact_entries);
318 	mutex_unlock(&sparx5->mact_lock);
319 
320 update_hw:
321 	ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
322 
323 	/* New entry? */
324 	if (mact_entry->flags == 0) {
325 		mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
326 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
327 					  port->ndev, true);
328 	}
329 
330 	return ret;
331 }
332 
333 int sparx5_del_mact_entry(struct sparx5 *sparx5,
334 			  const unsigned char *addr,
335 			  u16 vid)
336 {
337 	struct sparx5_mact_entry *mact_entry, *tmp;
338 
339 	/* Delete the entry in SW MAC table not to get the notification when
340 	 * SW is pulling again
341 	 */
342 	mutex_lock(&sparx5->mact_lock);
343 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
344 				 list) {
345 		if ((vid == 0 || mact_entry->vid == vid) &&
346 		    ether_addr_equal(addr, mact_entry->mac)) {
347 			list_del(&mact_entry->list);
348 			devm_kfree(sparx5->dev, mact_entry);
349 
350 			sparx5_mact_forget(sparx5, addr, mact_entry->vid);
351 		}
352 	}
353 	mutex_unlock(&sparx5->mact_lock);
354 
355 	return 0;
356 }
357 
358 static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
359 				     unsigned char mac[ETH_ALEN],
360 				     u16 vid, u32 cfg2)
361 {
362 	struct sparx5_mact_entry *mact_entry;
363 	bool found = false;
364 	u16 port;
365 
366 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
367 	    MAC_ENTRY_ADDR_TYPE_UPSID_PN)
368 		return;
369 
370 	port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
371 	if (port >= SPX5_PORTS)
372 		return;
373 
374 	if (!test_bit(port, sparx5->bridge_mask))
375 		return;
376 
377 	mutex_lock(&sparx5->mact_lock);
378 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
379 		if (mact_entry->vid == vid &&
380 		    ether_addr_equal(mac, mact_entry->mac)) {
381 			found = true;
382 			mact_entry->flags |= MAC_ENT_ALIVE;
383 			if (mact_entry->port != port) {
384 				dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
385 					 mact_entry->port, port);
386 				mact_entry->port = port;
387 				mact_entry->flags |= MAC_ENT_MOVED;
388 			}
389 			/* Entry handled */
390 			break;
391 		}
392 	}
393 	mutex_unlock(&sparx5->mact_lock);
394 
395 	if (found && !(mact_entry->flags & MAC_ENT_MOVED))
396 		/* Present, not moved */
397 		return;
398 
399 	if (!found) {
400 		/* Entry not found - now add */
401 		mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
402 		if (!mact_entry)
403 			return;
404 
405 		mact_entry->flags |= MAC_ENT_ALIVE;
406 		mutex_lock(&sparx5->mact_lock);
407 		list_add_tail(&mact_entry->list, &sparx5->mact_entries);
408 		mutex_unlock(&sparx5->mact_lock);
409 	}
410 
411 	/* New or moved entry - notify bridge */
412 	sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
413 				  mac, vid, sparx5->ports[port]->ndev,
414 				  true);
415 }
416 
417 void sparx5_mact_pull_work(struct work_struct *work)
418 {
419 	struct delayed_work *del_work = to_delayed_work(work);
420 	struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
421 					     mact_work);
422 	struct sparx5_mact_entry *mact_entry, *tmp;
423 	unsigned char mac[ETH_ALEN];
424 	u32 cfg2;
425 	u16 vid;
426 	int ret;
427 
428 	/* Reset MAC entry flags */
429 	mutex_lock(&sparx5->mact_lock);
430 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
431 		mact_entry->flags &= MAC_ENT_LOCK;
432 	mutex_unlock(&sparx5->mact_lock);
433 
434 	/* MAIN mac address processing loop */
435 	vid = 0;
436 	memset(mac, 0, sizeof(mac));
437 	do {
438 		mutex_lock(&sparx5->lock);
439 		sparx5_mact_select(sparx5, mac, vid);
440 		spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
441 			sparx5, LRN_SCAN_NEXT_CFG);
442 		spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
443 			(MAC_CMD_FIND_SMALLEST) |
444 			LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
445 			sparx5, LRN_COMMON_ACCESS_CTRL);
446 		ret = sparx5_mact_wait_for_completion(sparx5);
447 		if (ret == 0)
448 			ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
449 		mutex_unlock(&sparx5->lock);
450 		if (ret == 0)
451 			sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
452 	} while (ret == 0);
453 
454 	mutex_lock(&sparx5->mact_lock);
455 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
456 				 list) {
457 		/* If the entry is in HW or permanent, then skip */
458 		if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
459 			continue;
460 
461 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
462 					  mact_entry->mac, mact_entry->vid,
463 					  sparx5->ports[mact_entry->port]->ndev,
464 					  true);
465 
466 		list_del(&mact_entry->list);
467 		devm_kfree(sparx5->dev, mact_entry);
468 	}
469 	mutex_unlock(&sparx5->mact_lock);
470 
471 	queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
472 			   SPX5_MACT_PULL_DELAY);
473 }
474 
475 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
476 {
477 	int value = max(1, msecs / 10); /* unit 10 ms */
478 
479 	spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
480 		 LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
481 		 LRN_AUTOAGE_CFG_UNIT_SIZE |
482 		 LRN_AUTOAGE_CFG_PERIOD_VAL,
483 		 sparx5,
484 		 LRN_AUTOAGE_CFG(0));
485 }
486 
487 void sparx5_mact_init(struct sparx5 *sparx5)
488 {
489 	mutex_init(&sparx5->lock);
490 
491 	/*  Flush MAC table */
492 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
493 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
494 		sparx5, LRN_COMMON_ACCESS_CTRL);
495 
496 	if (sparx5_mact_wait_for_completion(sparx5) != 0)
497 		dev_warn(sparx5->dev, "MAC flush error\n");
498 
499 	sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
500 }
501