1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26 
27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38 
39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
59 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
60 {
61 	unsigned int val;
62 
63 	return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
64 					val, !(val & VLAN_START), 10, 1000);
65 }
66 
67 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
68 				  u32 *vlan_table)
69 {
70 	int ret;
71 
72 	mutex_lock(&dev->vlan_mutex);
73 
74 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
75 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
76 
77 	/* wait to be cleared */
78 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
79 	if (ret) {
80 		dev_dbg(dev->dev, "Failed to read vlan table\n");
81 		goto exit;
82 	}
83 
84 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
85 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
86 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
87 
88 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
89 
90 exit:
91 	mutex_unlock(&dev->vlan_mutex);
92 
93 	return ret;
94 }
95 
96 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
97 				  u32 *vlan_table)
98 {
99 	int ret;
100 
101 	mutex_lock(&dev->vlan_mutex);
102 
103 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
104 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
105 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
106 
107 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
108 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
109 
110 	/* wait to be cleared */
111 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
112 	if (ret) {
113 		dev_dbg(dev->dev, "Failed to write vlan table\n");
114 		goto exit;
115 	}
116 
117 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
118 
119 	/* update vlan cache table */
120 	dev->vlan_cache[vid].table[0] = vlan_table[0];
121 	dev->vlan_cache[vid].table[1] = vlan_table[1];
122 	dev->vlan_cache[vid].table[2] = vlan_table[2];
123 
124 exit:
125 	mutex_unlock(&dev->vlan_mutex);
126 
127 	return ret;
128 }
129 
130 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
131 {
132 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
133 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
134 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
135 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
136 }
137 
138 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
139 {
140 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
141 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
142 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
143 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
144 }
145 
146 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
147 {
148 	unsigned int val;
149 
150 	return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
151 					val, !(val & ALU_START), 10, 1000);
152 }
153 
154 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
155 {
156 	unsigned int val;
157 
158 	return regmap_read_poll_timeout(ksz_regmap_32(dev),
159 					REG_SW_ALU_STAT_CTRL__4,
160 					val, !(val & ALU_STAT_START),
161 					10, 1000);
162 }
163 
164 int ksz9477_reset_switch(struct ksz_device *dev)
165 {
166 	u8 data8;
167 	u32 data32;
168 
169 	/* reset switch */
170 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
171 
172 	/* turn off SPI DO Edge select */
173 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
174 			   SPI_AUTO_EDGE_DETECTION, 0);
175 
176 	/* default configuration */
177 	ksz_write8(dev, REG_SW_LUE_CTRL_1,
178 		   SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
179 
180 	/* disable interrupts */
181 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
182 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
183 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
184 
185 	/* KSZ9893 compatible chips do not support refclk configuration */
186 	if (dev->chip_id == KSZ9893_CHIP_ID ||
187 	    dev->chip_id == KSZ8563_CHIP_ID ||
188 	    dev->chip_id == KSZ9563_CHIP_ID)
189 		return 0;
190 
191 	data8 = SW_ENABLE_REFCLKO;
192 	if (dev->synclko_disable)
193 		data8 = 0;
194 	else if (dev->synclko_125)
195 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
196 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
197 
198 	return 0;
199 }
200 
201 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
202 {
203 	struct ksz_port *p = &dev->ports[port];
204 	unsigned int val;
205 	u32 data;
206 	int ret;
207 
208 	/* retain the flush/freeze bit */
209 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
210 	data |= MIB_COUNTER_READ;
211 	data |= (addr << MIB_COUNTER_INDEX_S);
212 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
213 
214 	ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
215 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
216 			val, !(val & MIB_COUNTER_READ), 10, 1000);
217 	/* failed to read MIB. get out of loop */
218 	if (ret) {
219 		dev_dbg(dev->dev, "Failed to get MIB\n");
220 		return;
221 	}
222 
223 	/* count resets upon read */
224 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
225 	*cnt += data;
226 }
227 
228 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
229 		       u64 *dropped, u64 *cnt)
230 {
231 	addr = dev->info->mib_names[addr].index;
232 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
233 }
234 
235 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
236 {
237 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
238 	struct ksz_port *p = &dev->ports[port];
239 
240 	/* enable/disable the port for flush/freeze function */
241 	mutex_lock(&p->mib.cnt_mutex);
242 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
243 
244 	/* used by MIB counter reading code to know freeze is enabled */
245 	p->freeze = freeze;
246 	mutex_unlock(&p->mib.cnt_mutex);
247 }
248 
249 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
250 {
251 	struct ksz_port_mib *mib = &dev->ports[port].mib;
252 
253 	/* flush all enabled port MIB counters */
254 	mutex_lock(&mib->cnt_mutex);
255 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
256 		     MIB_COUNTER_FLUSH_FREEZE);
257 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
258 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
259 	mutex_unlock(&mib->cnt_mutex);
260 }
261 
262 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
263 				 u16 *data)
264 {
265 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
266 	 * BMSR_ERCAP bits are set.
267 	 */
268 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
269 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
270 }
271 
272 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
273 {
274 	u16 val = 0xffff;
275 	int ret;
276 
277 	/* No real PHY after this. Simulate the PHY.
278 	 * A fixed PHY can be setup in the device tree, but this function is
279 	 * still called for that port during initialization.
280 	 * For RGMII PHY there is no way to access it so the fixed PHY should
281 	 * be used.  For SGMII PHY the supporting code will be added later.
282 	 */
283 	if (!dev->info->internal_phy[addr]) {
284 		struct ksz_port *p = &dev->ports[addr];
285 
286 		switch (reg) {
287 		case MII_BMCR:
288 			val = 0x1140;
289 			break;
290 		case MII_BMSR:
291 			val = 0x796d;
292 			break;
293 		case MII_PHYSID1:
294 			val = 0x0022;
295 			break;
296 		case MII_PHYSID2:
297 			val = 0x1631;
298 			break;
299 		case MII_ADVERTISE:
300 			val = 0x05e1;
301 			break;
302 		case MII_LPA:
303 			val = 0xc5e1;
304 			break;
305 		case MII_CTRL1000:
306 			val = 0x0700;
307 			break;
308 		case MII_STAT1000:
309 			if (p->phydev.speed == SPEED_1000)
310 				val = 0x3800;
311 			else
312 				val = 0;
313 			break;
314 		}
315 	} else {
316 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
317 		if (ret)
318 			return ret;
319 
320 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
321 	}
322 
323 	*data = val;
324 
325 	return 0;
326 }
327 
328 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
329 {
330 	u32 mask, val32;
331 
332 	/* No real PHY after this. */
333 	if (!dev->info->internal_phy[addr])
334 		return 0;
335 
336 	if (reg < 0x10)
337 		return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
338 
339 	/* Errata: When using SPI, I2C, or in-band register access,
340 	 * writes to certain PHY registers should be performed as
341 	 * 32-bit writes instead of 16-bit writes.
342 	 */
343 	val32 = val;
344 	mask = 0xffff;
345 	if ((reg & 1) == 0) {
346 		val32 <<= 16;
347 		mask <<= 16;
348 	}
349 	reg &= ~1;
350 	return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
351 }
352 
353 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
354 {
355 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
356 }
357 
358 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
359 {
360 	const u16 *regs = dev->info->regs;
361 	u8 data;
362 
363 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
364 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
365 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
366 
367 	if (port < dev->info->port_cnt) {
368 		/* flush individual port */
369 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
370 		if (!(data & PORT_LEARN_DISABLE))
371 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
372 				    data | PORT_LEARN_DISABLE);
373 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
374 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
375 	} else {
376 		/* flush all */
377 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
378 	}
379 }
380 
381 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
382 				bool flag, struct netlink_ext_ack *extack)
383 {
384 	if (flag) {
385 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
386 			     PORT_VLAN_LOOKUP_VID_0, true);
387 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
388 	} else {
389 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
390 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
391 			     PORT_VLAN_LOOKUP_VID_0, false);
392 	}
393 
394 	return 0;
395 }
396 
397 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
398 			  const struct switchdev_obj_port_vlan *vlan,
399 			  struct netlink_ext_ack *extack)
400 {
401 	u32 vlan_table[3];
402 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
403 	int err;
404 
405 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
406 	if (err) {
407 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
408 		return err;
409 	}
410 
411 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
412 	if (untagged)
413 		vlan_table[1] |= BIT(port);
414 	else
415 		vlan_table[1] &= ~BIT(port);
416 	vlan_table[1] &= ~(BIT(dev->cpu_port));
417 
418 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
419 
420 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
421 	if (err) {
422 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
423 		return err;
424 	}
425 
426 	/* change PVID */
427 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
428 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
429 
430 	return 0;
431 }
432 
433 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
434 			  const struct switchdev_obj_port_vlan *vlan)
435 {
436 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
437 	u32 vlan_table[3];
438 	u16 pvid;
439 
440 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
441 	pvid = pvid & 0xFFF;
442 
443 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
444 		dev_dbg(dev->dev, "Failed to get vlan table\n");
445 		return -ETIMEDOUT;
446 	}
447 
448 	vlan_table[2] &= ~BIT(port);
449 
450 	if (pvid == vlan->vid)
451 		pvid = 1;
452 
453 	if (untagged)
454 		vlan_table[1] &= ~BIT(port);
455 
456 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
457 		dev_dbg(dev->dev, "Failed to set vlan table\n");
458 		return -ETIMEDOUT;
459 	}
460 
461 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
462 
463 	return 0;
464 }
465 
466 int ksz9477_fdb_add(struct ksz_device *dev, int port,
467 		    const unsigned char *addr, u16 vid, struct dsa_db db)
468 {
469 	u32 alu_table[4];
470 	u32 data;
471 	int ret = 0;
472 
473 	mutex_lock(&dev->alu_mutex);
474 
475 	/* find any entry with mac & vid */
476 	data = vid << ALU_FID_INDEX_S;
477 	data |= ((addr[0] << 8) | addr[1]);
478 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
479 
480 	data = ((addr[2] << 24) | (addr[3] << 16));
481 	data |= ((addr[4] << 8) | addr[5]);
482 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
483 
484 	/* start read operation */
485 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
486 
487 	/* wait to be finished */
488 	ret = ksz9477_wait_alu_ready(dev);
489 	if (ret) {
490 		dev_dbg(dev->dev, "Failed to read ALU\n");
491 		goto exit;
492 	}
493 
494 	/* read ALU entry */
495 	ksz9477_read_table(dev, alu_table);
496 
497 	/* update ALU entry */
498 	alu_table[0] = ALU_V_STATIC_VALID;
499 	alu_table[1] |= BIT(port);
500 	if (vid)
501 		alu_table[1] |= ALU_V_USE_FID;
502 	alu_table[2] = (vid << ALU_V_FID_S);
503 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
504 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
505 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
506 
507 	ksz9477_write_table(dev, alu_table);
508 
509 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
510 
511 	/* wait to be finished */
512 	ret = ksz9477_wait_alu_ready(dev);
513 	if (ret)
514 		dev_dbg(dev->dev, "Failed to write ALU\n");
515 
516 exit:
517 	mutex_unlock(&dev->alu_mutex);
518 
519 	return ret;
520 }
521 
522 int ksz9477_fdb_del(struct ksz_device *dev, int port,
523 		    const unsigned char *addr, u16 vid, struct dsa_db db)
524 {
525 	u32 alu_table[4];
526 	u32 data;
527 	int ret = 0;
528 
529 	mutex_lock(&dev->alu_mutex);
530 
531 	/* read any entry with mac & vid */
532 	data = vid << ALU_FID_INDEX_S;
533 	data |= ((addr[0] << 8) | addr[1]);
534 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
535 
536 	data = ((addr[2] << 24) | (addr[3] << 16));
537 	data |= ((addr[4] << 8) | addr[5]);
538 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
539 
540 	/* start read operation */
541 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
542 
543 	/* wait to be finished */
544 	ret = ksz9477_wait_alu_ready(dev);
545 	if (ret) {
546 		dev_dbg(dev->dev, "Failed to read ALU\n");
547 		goto exit;
548 	}
549 
550 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
551 	if (alu_table[0] & ALU_V_STATIC_VALID) {
552 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
553 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
554 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
555 
556 		/* clear forwarding port */
557 		alu_table[1] &= ~BIT(port);
558 
559 		/* if there is no port to forward, clear table */
560 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
561 			alu_table[0] = 0;
562 			alu_table[1] = 0;
563 			alu_table[2] = 0;
564 			alu_table[3] = 0;
565 		}
566 	} else {
567 		alu_table[0] = 0;
568 		alu_table[1] = 0;
569 		alu_table[2] = 0;
570 		alu_table[3] = 0;
571 	}
572 
573 	ksz9477_write_table(dev, alu_table);
574 
575 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
576 
577 	/* wait to be finished */
578 	ret = ksz9477_wait_alu_ready(dev);
579 	if (ret)
580 		dev_dbg(dev->dev, "Failed to write ALU\n");
581 
582 exit:
583 	mutex_unlock(&dev->alu_mutex);
584 
585 	return ret;
586 }
587 
588 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
589 {
590 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
591 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
592 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
593 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
594 			ALU_V_PRIO_AGE_CNT_M;
595 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
596 
597 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
598 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
599 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
600 
601 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
602 
603 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
604 	alu->mac[1] = alu_table[2] & 0xFF;
605 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
606 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
607 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
608 	alu->mac[5] = alu_table[3] & 0xFF;
609 }
610 
611 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
612 		     dsa_fdb_dump_cb_t *cb, void *data)
613 {
614 	int ret = 0;
615 	u32 ksz_data;
616 	u32 alu_table[4];
617 	struct alu_struct alu;
618 	int timeout;
619 
620 	mutex_lock(&dev->alu_mutex);
621 
622 	/* start ALU search */
623 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
624 
625 	do {
626 		timeout = 1000;
627 		do {
628 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
629 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
630 				break;
631 			usleep_range(1, 10);
632 		} while (timeout-- > 0);
633 
634 		if (!timeout) {
635 			dev_dbg(dev->dev, "Failed to search ALU\n");
636 			ret = -ETIMEDOUT;
637 			goto exit;
638 		}
639 
640 		if (!(ksz_data & ALU_VALID))
641 			continue;
642 
643 		/* read ALU table */
644 		ksz9477_read_table(dev, alu_table);
645 
646 		ksz9477_convert_alu(&alu, alu_table);
647 
648 		if (alu.port_forward & BIT(port)) {
649 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
650 			if (ret)
651 				goto exit;
652 		}
653 	} while (ksz_data & ALU_START);
654 
655 exit:
656 
657 	/* stop ALU search */
658 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
659 
660 	mutex_unlock(&dev->alu_mutex);
661 
662 	return ret;
663 }
664 
665 int ksz9477_mdb_add(struct ksz_device *dev, int port,
666 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
667 {
668 	u32 static_table[4];
669 	const u8 *shifts;
670 	const u32 *masks;
671 	u32 data;
672 	int index;
673 	u32 mac_hi, mac_lo;
674 	int err = 0;
675 
676 	shifts = dev->info->shifts;
677 	masks = dev->info->masks;
678 
679 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
680 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
681 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
682 
683 	mutex_lock(&dev->alu_mutex);
684 
685 	for (index = 0; index < dev->info->num_statics; index++) {
686 		/* find empty slot first */
687 		data = (index << shifts[ALU_STAT_INDEX]) |
688 			masks[ALU_STAT_READ] | ALU_STAT_START;
689 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
690 
691 		/* wait to be finished */
692 		err = ksz9477_wait_alu_sta_ready(dev);
693 		if (err) {
694 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
695 			goto exit;
696 		}
697 
698 		/* read ALU static table */
699 		ksz9477_read_table(dev, static_table);
700 
701 		if (static_table[0] & ALU_V_STATIC_VALID) {
702 			/* check this has same vid & mac address */
703 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
704 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
705 			    static_table[3] == mac_lo) {
706 				/* found matching one */
707 				break;
708 			}
709 		} else {
710 			/* found empty one */
711 			break;
712 		}
713 	}
714 
715 	/* no available entry */
716 	if (index == dev->info->num_statics) {
717 		err = -ENOSPC;
718 		goto exit;
719 	}
720 
721 	/* add entry */
722 	static_table[0] = ALU_V_STATIC_VALID;
723 	static_table[1] |= BIT(port);
724 	if (mdb->vid)
725 		static_table[1] |= ALU_V_USE_FID;
726 	static_table[2] = (mdb->vid << ALU_V_FID_S);
727 	static_table[2] |= mac_hi;
728 	static_table[3] = mac_lo;
729 
730 	ksz9477_write_table(dev, static_table);
731 
732 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
733 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
734 
735 	/* wait to be finished */
736 	if (ksz9477_wait_alu_sta_ready(dev))
737 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
738 
739 exit:
740 	mutex_unlock(&dev->alu_mutex);
741 	return err;
742 }
743 
744 int ksz9477_mdb_del(struct ksz_device *dev, int port,
745 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
746 {
747 	u32 static_table[4];
748 	const u8 *shifts;
749 	const u32 *masks;
750 	u32 data;
751 	int index;
752 	int ret = 0;
753 	u32 mac_hi, mac_lo;
754 
755 	shifts = dev->info->shifts;
756 	masks = dev->info->masks;
757 
758 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
759 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
760 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
761 
762 	mutex_lock(&dev->alu_mutex);
763 
764 	for (index = 0; index < dev->info->num_statics; index++) {
765 		/* find empty slot first */
766 		data = (index << shifts[ALU_STAT_INDEX]) |
767 			masks[ALU_STAT_READ] | ALU_STAT_START;
768 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
769 
770 		/* wait to be finished */
771 		ret = ksz9477_wait_alu_sta_ready(dev);
772 		if (ret) {
773 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
774 			goto exit;
775 		}
776 
777 		/* read ALU static table */
778 		ksz9477_read_table(dev, static_table);
779 
780 		if (static_table[0] & ALU_V_STATIC_VALID) {
781 			/* check this has same vid & mac address */
782 
783 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
784 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
785 			    static_table[3] == mac_lo) {
786 				/* found matching one */
787 				break;
788 			}
789 		}
790 	}
791 
792 	/* no available entry */
793 	if (index == dev->info->num_statics)
794 		goto exit;
795 
796 	/* clear port */
797 	static_table[1] &= ~BIT(port);
798 
799 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
800 		/* delete entry */
801 		static_table[0] = 0;
802 		static_table[1] = 0;
803 		static_table[2] = 0;
804 		static_table[3] = 0;
805 	}
806 
807 	ksz9477_write_table(dev, static_table);
808 
809 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
810 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
811 
812 	/* wait to be finished */
813 	ret = ksz9477_wait_alu_sta_ready(dev);
814 	if (ret)
815 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
816 
817 exit:
818 	mutex_unlock(&dev->alu_mutex);
819 
820 	return ret;
821 }
822 
823 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
824 			    struct dsa_mall_mirror_tc_entry *mirror,
825 			    bool ingress, struct netlink_ext_ack *extack)
826 {
827 	u8 data;
828 	int p;
829 
830 	/* Limit to one sniffer port
831 	 * Check if any of the port is already set for sniffing
832 	 * If yes, instruct the user to remove the previous entry & exit
833 	 */
834 	for (p = 0; p < dev->info->port_cnt; p++) {
835 		/* Skip the current sniffing port */
836 		if (p == mirror->to_local_port)
837 			continue;
838 
839 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
840 
841 		if (data & PORT_MIRROR_SNIFFER) {
842 			NL_SET_ERR_MSG_MOD(extack,
843 					   "Sniffer port is already configured, delete existing rules & retry");
844 			return -EBUSY;
845 		}
846 	}
847 
848 	if (ingress)
849 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
850 	else
851 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
852 
853 	/* configure mirror port */
854 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
855 		     PORT_MIRROR_SNIFFER, true);
856 
857 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
858 
859 	return 0;
860 }
861 
862 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
863 			     struct dsa_mall_mirror_tc_entry *mirror)
864 {
865 	bool in_use = false;
866 	u8 data;
867 	int p;
868 
869 	if (mirror->ingress)
870 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
871 	else
872 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
873 
874 
875 	/* Check if any of the port is still referring to sniffer port */
876 	for (p = 0; p < dev->info->port_cnt; p++) {
877 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
878 
879 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
880 			in_use = true;
881 			break;
882 		}
883 	}
884 
885 	/* delete sniffing if there are no other mirroring rules */
886 	if (!in_use)
887 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
888 			     PORT_MIRROR_SNIFFER, false);
889 }
890 
891 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
892 {
893 	phy_interface_t interface;
894 	bool gbit;
895 
896 	if (dev->info->internal_phy[port])
897 		return PHY_INTERFACE_MODE_NA;
898 
899 	gbit = ksz_get_gbit(dev, port);
900 
901 	interface = ksz_get_xmii(dev, port, gbit);
902 
903 	return interface;
904 }
905 
906 void ksz9477_get_caps(struct ksz_device *dev, int port,
907 		      struct phylink_config *config)
908 {
909 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
910 				   MAC_SYM_PAUSE;
911 
912 	if (dev->info->gbit_capable[port])
913 		config->mac_capabilities |= MAC_1000FD;
914 }
915 
916 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
917 {
918 	u32 secs = msecs / 1000;
919 	u8 value;
920 	u8 data;
921 	int ret;
922 
923 	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
924 
925 	ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
926 	if (ret < 0)
927 		return ret;
928 
929 	data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
930 
931 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
932 	if (ret < 0)
933 		return ret;
934 
935 	value &= ~SW_AGE_CNT_M;
936 	value |= FIELD_PREP(SW_AGE_CNT_M, data);
937 
938 	return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
939 }
940 
941 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
942 {
943 	u8 data;
944 
945 	if (dev->info->num_tx_queues == 8)
946 		data = PORT_EIGHT_QUEUE;
947 	else if (dev->info->num_tx_queues == 4)
948 		data = PORT_FOUR_QUEUE;
949 	else if (dev->info->num_tx_queues == 2)
950 		data = PORT_TWO_QUEUE;
951 	else
952 		data = PORT_SINGLE_QUEUE;
953 
954 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
955 }
956 
957 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
958 {
959 	struct dsa_switch *ds = dev->ds;
960 	u16 data16;
961 	u8 member;
962 
963 	/* enable tag tail for host port */
964 	if (cpu_port)
965 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
966 			     true);
967 
968 	ksz9477_port_queue_split(dev, port);
969 
970 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
971 
972 	/* set back pressure */
973 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
974 
975 	/* enable broadcast storm limit */
976 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
977 
978 	/* disable DiffServ priority */
979 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
980 
981 	/* replace priority */
982 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
983 		     false);
984 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
985 			   MTI_PVID_REPLACE, false);
986 
987 	/* enable 802.1p priority */
988 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
989 
990 	/* force flow control for non-PHY ports only */
991 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
992 		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
993 		     !dev->info->internal_phy[port]);
994 
995 	if (cpu_port)
996 		member = dsa_user_ports(ds);
997 	else
998 		member = BIT(dsa_upstream_port(ds, port));
999 
1000 	ksz9477_cfg_port_member(dev, port, member);
1001 
1002 	/* clear pending interrupts */
1003 	if (dev->info->internal_phy[port])
1004 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1005 }
1006 
1007 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1008 {
1009 	struct ksz_device *dev = ds->priv;
1010 	struct ksz_port *p;
1011 	int i;
1012 
1013 	for (i = 0; i < dev->info->port_cnt; i++) {
1014 		if (dsa_is_cpu_port(ds, i) &&
1015 		    (dev->info->cpu_ports & (1 << i))) {
1016 			phy_interface_t interface;
1017 			const char *prev_msg;
1018 			const char *prev_mode;
1019 
1020 			dev->cpu_port = i;
1021 			p = &dev->ports[i];
1022 
1023 			/* Read from XMII register to determine host port
1024 			 * interface.  If set specifically in device tree
1025 			 * note the difference to help debugging.
1026 			 */
1027 			interface = ksz9477_get_interface(dev, i);
1028 			if (!p->interface) {
1029 				if (dev->compat_interface) {
1030 					dev_warn(dev->dev,
1031 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1032 						 "Please update your device tree.\n",
1033 						 i);
1034 					p->interface = dev->compat_interface;
1035 				} else {
1036 					p->interface = interface;
1037 				}
1038 			}
1039 			if (interface && interface != p->interface) {
1040 				prev_msg = " instead of ";
1041 				prev_mode = phy_modes(interface);
1042 			} else {
1043 				prev_msg = "";
1044 				prev_mode = "";
1045 			}
1046 			dev_info(dev->dev,
1047 				 "Port%d: using phy mode %s%s%s\n",
1048 				 i,
1049 				 phy_modes(p->interface),
1050 				 prev_msg,
1051 				 prev_mode);
1052 
1053 			/* enable cpu port */
1054 			ksz9477_port_setup(dev, i, true);
1055 		}
1056 	}
1057 
1058 	for (i = 0; i < dev->info->port_cnt; i++) {
1059 		if (i == dev->cpu_port)
1060 			continue;
1061 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1062 	}
1063 }
1064 
1065 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1066 {
1067 	const u32 *masks;
1068 	u32 data;
1069 	int ret;
1070 
1071 	masks = dev->info->masks;
1072 
1073 	/* Enable Reserved multicast table */
1074 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1075 
1076 	/* Set the Override bit for forwarding BPDU packet to CPU */
1077 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1078 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1079 	if (ret < 0)
1080 		return ret;
1081 
1082 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1083 
1084 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1085 	if (ret < 0)
1086 		return ret;
1087 
1088 	/* wait to be finished */
1089 	ret = ksz9477_wait_alu_sta_ready(dev);
1090 	if (ret < 0) {
1091 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1092 		return ret;
1093 	}
1094 
1095 	return 0;
1096 }
1097 
1098 int ksz9477_setup(struct dsa_switch *ds)
1099 {
1100 	struct ksz_device *dev = ds->priv;
1101 	int ret = 0;
1102 
1103 	ds->mtu_enforcement_ingress = true;
1104 
1105 	/* Required for port partitioning. */
1106 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1107 		      true);
1108 
1109 	/* Do not work correctly with tail tagging. */
1110 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1111 
1112 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1113 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1114 
1115 	/* Use collision based back pressure mode. */
1116 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
1117 		SW_BACK_PRESSURE_COLLISION);
1118 
1119 	/* Now we can configure default MTU value */
1120 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1121 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1122 	if (ret)
1123 		return ret;
1124 
1125 	/* queue based egress rate limit */
1126 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1127 
1128 	/* enable global MIB counter freeze function */
1129 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1130 
1131 	return 0;
1132 }
1133 
1134 u32 ksz9477_get_port_addr(int port, int offset)
1135 {
1136 	return PORT_CTRL_ADDR(port, offset);
1137 }
1138 
1139 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1140 {
1141 	val = val >> 8;
1142 
1143 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1144 }
1145 
1146 int ksz9477_switch_init(struct ksz_device *dev)
1147 {
1148 	u8 data8;
1149 	int ret;
1150 
1151 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1152 
1153 	/* turn off SPI DO Edge select */
1154 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1155 	if (ret)
1156 		return ret;
1157 
1158 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1159 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1160 	if (ret)
1161 		return ret;
1162 
1163 	return 0;
1164 }
1165 
1166 void ksz9477_switch_exit(struct ksz_device *dev)
1167 {
1168 	ksz9477_reset_switch(dev);
1169 }
1170 
1171 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1172 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1173 MODULE_LICENSE("GPL");
1174