1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Microchip KSZ9477 switch driver main logic 4 * 5 * Copyright (C) 2017-2019 Microchip Technology Inc. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/iopoll.h> 11 #include <linux/platform_data/microchip-ksz.h> 12 #include <linux/phy.h> 13 #include <linux/if_bridge.h> 14 #include <linux/if_vlan.h> 15 #include <net/dsa.h> 16 #include <net/switchdev.h> 17 18 #include "ksz9477_reg.h" 19 #include "ksz_common.h" 20 #include "ksz9477.h" 21 22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) 23 { 24 regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); 25 } 26 27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, 28 bool set) 29 { 30 regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), 31 bits, set ? bits : 0); 32 } 33 34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) 35 { 36 regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0); 37 } 38 39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, 40 u32 bits, bool set) 41 { 42 regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset), 43 bits, set ? bits : 0); 44 } 45 46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) 47 { 48 u16 frame_size, max_frame = 0; 49 int i; 50 51 frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 52 53 /* Cache the per-port MTU setting */ 54 dev->ports[port].max_frame = frame_size; 55 56 for (i = 0; i < dev->info->port_cnt; i++) 57 max_frame = max(max_frame, dev->ports[i].max_frame); 58 59 return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, 60 REG_SW_MTU_MASK, max_frame); 61 } 62 63 int ksz9477_max_mtu(struct ksz_device *dev, int port) 64 { 65 return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; 66 } 67 68 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) 69 { 70 unsigned int val; 71 72 return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL, 73 val, !(val & VLAN_START), 10, 1000); 74 } 75 76 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid, 77 u32 *vlan_table) 78 { 79 int ret; 80 81 mutex_lock(&dev->vlan_mutex); 82 83 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 84 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); 85 86 /* wait to be cleared */ 87 ret = ksz9477_wait_vlan_ctrl_ready(dev); 88 if (ret) { 89 dev_dbg(dev->dev, "Failed to read vlan table\n"); 90 goto exit; 91 } 92 93 ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); 94 ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); 95 ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); 96 97 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 98 99 exit: 100 mutex_unlock(&dev->vlan_mutex); 101 102 return ret; 103 } 104 105 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid, 106 u32 *vlan_table) 107 { 108 int ret; 109 110 mutex_lock(&dev->vlan_mutex); 111 112 ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); 113 ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); 114 ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); 115 116 ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); 117 ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); 118 119 /* wait to be cleared */ 120 ret = ksz9477_wait_vlan_ctrl_ready(dev); 121 if (ret) { 122 dev_dbg(dev->dev, "Failed to write vlan table\n"); 123 goto exit; 124 } 125 126 ksz_write8(dev, REG_SW_VLAN_CTRL, 0); 127 128 /* update vlan cache table */ 129 dev->vlan_cache[vid].table[0] = vlan_table[0]; 130 dev->vlan_cache[vid].table[1] = vlan_table[1]; 131 dev->vlan_cache[vid].table[2] = vlan_table[2]; 132 133 exit: 134 mutex_unlock(&dev->vlan_mutex); 135 136 return ret; 137 } 138 139 static void ksz9477_read_table(struct ksz_device *dev, u32 *table) 140 { 141 ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); 142 ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); 143 ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); 144 ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); 145 } 146 147 static void ksz9477_write_table(struct ksz_device *dev, u32 *table) 148 { 149 ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); 150 ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); 151 ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); 152 ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); 153 } 154 155 static int ksz9477_wait_alu_ready(struct ksz_device *dev) 156 { 157 unsigned int val; 158 159 return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4, 160 val, !(val & ALU_START), 10, 1000); 161 } 162 163 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev) 164 { 165 unsigned int val; 166 167 return regmap_read_poll_timeout(dev->regmap[2], 168 REG_SW_ALU_STAT_CTRL__4, 169 val, !(val & ALU_STAT_START), 170 10, 1000); 171 } 172 173 int ksz9477_reset_switch(struct ksz_device *dev) 174 { 175 u8 data8; 176 u32 data32; 177 178 /* reset switch */ 179 ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); 180 181 /* turn off SPI DO Edge select */ 182 regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0, 183 SPI_AUTO_EDGE_DETECTION, 0); 184 185 /* default configuration */ 186 ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); 187 data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | 188 SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; 189 ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); 190 191 /* disable interrupts */ 192 ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); 193 ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); 194 ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); 195 196 /* KSZ9893 compatible chips do not support refclk configuration */ 197 if (dev->chip_id == KSZ9893_CHIP_ID || 198 dev->chip_id == KSZ8563_CHIP_ID) 199 return 0; 200 201 data8 = SW_ENABLE_REFCLKO; 202 if (dev->synclko_disable) 203 data8 = 0; 204 else if (dev->synclko_125) 205 data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ; 206 ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8); 207 208 return 0; 209 } 210 211 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) 212 { 213 struct ksz_port *p = &dev->ports[port]; 214 unsigned int val; 215 u32 data; 216 int ret; 217 218 /* retain the flush/freeze bit */ 219 data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 220 data |= MIB_COUNTER_READ; 221 data |= (addr << MIB_COUNTER_INDEX_S); 222 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); 223 224 ret = regmap_read_poll_timeout(dev->regmap[2], 225 PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4), 226 val, !(val & MIB_COUNTER_READ), 10, 1000); 227 /* failed to read MIB. get out of loop */ 228 if (ret) { 229 dev_dbg(dev->dev, "Failed to get MIB\n"); 230 return; 231 } 232 233 /* count resets upon read */ 234 ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); 235 *cnt += data; 236 } 237 238 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, 239 u64 *dropped, u64 *cnt) 240 { 241 addr = dev->info->mib_names[addr].index; 242 ksz9477_r_mib_cnt(dev, port, addr, cnt); 243 } 244 245 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) 246 { 247 u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; 248 struct ksz_port *p = &dev->ports[port]; 249 250 /* enable/disable the port for flush/freeze function */ 251 mutex_lock(&p->mib.cnt_mutex); 252 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val); 253 254 /* used by MIB counter reading code to know freeze is enabled */ 255 p->freeze = freeze; 256 mutex_unlock(&p->mib.cnt_mutex); 257 } 258 259 void ksz9477_port_init_cnt(struct ksz_device *dev, int port) 260 { 261 struct ksz_port_mib *mib = &dev->ports[port].mib; 262 263 /* flush all enabled port MIB counters */ 264 mutex_lock(&mib->cnt_mutex); 265 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 266 MIB_COUNTER_FLUSH_FREEZE); 267 ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH); 268 ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0); 269 mutex_unlock(&mib->cnt_mutex); 270 } 271 272 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg, 273 u16 *data) 274 { 275 /* KSZ8563R do not have extended registers but BMSR_ESTATEN and 276 * BMSR_ERCAP bits are set. 277 */ 278 if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR) 279 *data &= ~(BMSR_ESTATEN | BMSR_ERCAP); 280 } 281 282 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) 283 { 284 u16 val = 0xffff; 285 int ret; 286 287 /* No real PHY after this. Simulate the PHY. 288 * A fixed PHY can be setup in the device tree, but this function is 289 * still called for that port during initialization. 290 * For RGMII PHY there is no way to access it so the fixed PHY should 291 * be used. For SGMII PHY the supporting code will be added later. 292 */ 293 if (!dev->info->internal_phy[addr]) { 294 struct ksz_port *p = &dev->ports[addr]; 295 296 switch (reg) { 297 case MII_BMCR: 298 val = 0x1140; 299 break; 300 case MII_BMSR: 301 val = 0x796d; 302 break; 303 case MII_PHYSID1: 304 val = 0x0022; 305 break; 306 case MII_PHYSID2: 307 val = 0x1631; 308 break; 309 case MII_ADVERTISE: 310 val = 0x05e1; 311 break; 312 case MII_LPA: 313 val = 0xc5e1; 314 break; 315 case MII_CTRL1000: 316 val = 0x0700; 317 break; 318 case MII_STAT1000: 319 if (p->phydev.speed == SPEED_1000) 320 val = 0x3800; 321 else 322 val = 0; 323 break; 324 } 325 } else { 326 ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); 327 if (ret) 328 return ret; 329 330 ksz9477_r_phy_quirks(dev, addr, reg, &val); 331 } 332 333 *data = val; 334 335 return 0; 336 } 337 338 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) 339 { 340 /* No real PHY after this. */ 341 if (!dev->info->internal_phy[addr]) 342 return 0; 343 344 return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); 345 } 346 347 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) 348 { 349 ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); 350 } 351 352 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) 353 { 354 const u16 *regs = dev->info->regs; 355 u8 data; 356 357 regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2, 358 SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, 359 SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); 360 361 if (port < dev->info->port_cnt) { 362 /* flush individual port */ 363 ksz_pread8(dev, port, regs[P_STP_CTRL], &data); 364 if (!(data & PORT_LEARN_DISABLE)) 365 ksz_pwrite8(dev, port, regs[P_STP_CTRL], 366 data | PORT_LEARN_DISABLE); 367 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); 368 ksz_pwrite8(dev, port, regs[P_STP_CTRL], data); 369 } else { 370 /* flush all */ 371 ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true); 372 } 373 } 374 375 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port, 376 bool flag, struct netlink_ext_ack *extack) 377 { 378 if (flag) { 379 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 380 PORT_VLAN_LOOKUP_VID_0, true); 381 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); 382 } else { 383 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); 384 ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, 385 PORT_VLAN_LOOKUP_VID_0, false); 386 } 387 388 return 0; 389 } 390 391 int ksz9477_port_vlan_add(struct ksz_device *dev, int port, 392 const struct switchdev_obj_port_vlan *vlan, 393 struct netlink_ext_ack *extack) 394 { 395 u32 vlan_table[3]; 396 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 397 int err; 398 399 err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table); 400 if (err) { 401 NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table"); 402 return err; 403 } 404 405 vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M); 406 if (untagged) 407 vlan_table[1] |= BIT(port); 408 else 409 vlan_table[1] &= ~BIT(port); 410 vlan_table[1] &= ~(BIT(dev->cpu_port)); 411 412 vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); 413 414 err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table); 415 if (err) { 416 NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table"); 417 return err; 418 } 419 420 /* change PVID */ 421 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) 422 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid); 423 424 return 0; 425 } 426 427 int ksz9477_port_vlan_del(struct ksz_device *dev, int port, 428 const struct switchdev_obj_port_vlan *vlan) 429 { 430 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 431 u32 vlan_table[3]; 432 u16 pvid; 433 434 ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); 435 pvid = pvid & 0xFFF; 436 437 if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) { 438 dev_dbg(dev->dev, "Failed to get vlan table\n"); 439 return -ETIMEDOUT; 440 } 441 442 vlan_table[2] &= ~BIT(port); 443 444 if (pvid == vlan->vid) 445 pvid = 1; 446 447 if (untagged) 448 vlan_table[1] &= ~BIT(port); 449 450 if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) { 451 dev_dbg(dev->dev, "Failed to set vlan table\n"); 452 return -ETIMEDOUT; 453 } 454 455 ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); 456 457 return 0; 458 } 459 460 int ksz9477_fdb_add(struct ksz_device *dev, int port, 461 const unsigned char *addr, u16 vid, struct dsa_db db) 462 { 463 u32 alu_table[4]; 464 u32 data; 465 int ret = 0; 466 467 mutex_lock(&dev->alu_mutex); 468 469 /* find any entry with mac & vid */ 470 data = vid << ALU_FID_INDEX_S; 471 data |= ((addr[0] << 8) | addr[1]); 472 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 473 474 data = ((addr[2] << 24) | (addr[3] << 16)); 475 data |= ((addr[4] << 8) | addr[5]); 476 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 477 478 /* start read operation */ 479 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 480 481 /* wait to be finished */ 482 ret = ksz9477_wait_alu_ready(dev); 483 if (ret) { 484 dev_dbg(dev->dev, "Failed to read ALU\n"); 485 goto exit; 486 } 487 488 /* read ALU entry */ 489 ksz9477_read_table(dev, alu_table); 490 491 /* update ALU entry */ 492 alu_table[0] = ALU_V_STATIC_VALID; 493 alu_table[1] |= BIT(port); 494 if (vid) 495 alu_table[1] |= ALU_V_USE_FID; 496 alu_table[2] = (vid << ALU_V_FID_S); 497 alu_table[2] |= ((addr[0] << 8) | addr[1]); 498 alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); 499 alu_table[3] |= ((addr[4] << 8) | addr[5]); 500 501 ksz9477_write_table(dev, alu_table); 502 503 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 504 505 /* wait to be finished */ 506 ret = ksz9477_wait_alu_ready(dev); 507 if (ret) 508 dev_dbg(dev->dev, "Failed to write ALU\n"); 509 510 exit: 511 mutex_unlock(&dev->alu_mutex); 512 513 return ret; 514 } 515 516 int ksz9477_fdb_del(struct ksz_device *dev, int port, 517 const unsigned char *addr, u16 vid, struct dsa_db db) 518 { 519 u32 alu_table[4]; 520 u32 data; 521 int ret = 0; 522 523 mutex_lock(&dev->alu_mutex); 524 525 /* read any entry with mac & vid */ 526 data = vid << ALU_FID_INDEX_S; 527 data |= ((addr[0] << 8) | addr[1]); 528 ksz_write32(dev, REG_SW_ALU_INDEX_0, data); 529 530 data = ((addr[2] << 24) | (addr[3] << 16)); 531 data |= ((addr[4] << 8) | addr[5]); 532 ksz_write32(dev, REG_SW_ALU_INDEX_1, data); 533 534 /* start read operation */ 535 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); 536 537 /* wait to be finished */ 538 ret = ksz9477_wait_alu_ready(dev); 539 if (ret) { 540 dev_dbg(dev->dev, "Failed to read ALU\n"); 541 goto exit; 542 } 543 544 ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); 545 if (alu_table[0] & ALU_V_STATIC_VALID) { 546 ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); 547 ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); 548 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); 549 550 /* clear forwarding port */ 551 alu_table[2] &= ~BIT(port); 552 553 /* if there is no port to forward, clear table */ 554 if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { 555 alu_table[0] = 0; 556 alu_table[1] = 0; 557 alu_table[2] = 0; 558 alu_table[3] = 0; 559 } 560 } else { 561 alu_table[0] = 0; 562 alu_table[1] = 0; 563 alu_table[2] = 0; 564 alu_table[3] = 0; 565 } 566 567 ksz9477_write_table(dev, alu_table); 568 569 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); 570 571 /* wait to be finished */ 572 ret = ksz9477_wait_alu_ready(dev); 573 if (ret) 574 dev_dbg(dev->dev, "Failed to write ALU\n"); 575 576 exit: 577 mutex_unlock(&dev->alu_mutex); 578 579 return ret; 580 } 581 582 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table) 583 { 584 alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); 585 alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); 586 alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); 587 alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & 588 ALU_V_PRIO_AGE_CNT_M; 589 alu->mstp = alu_table[0] & ALU_V_MSTP_M; 590 591 alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); 592 alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); 593 alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; 594 595 alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; 596 597 alu->mac[0] = (alu_table[2] >> 8) & 0xFF; 598 alu->mac[1] = alu_table[2] & 0xFF; 599 alu->mac[2] = (alu_table[3] >> 24) & 0xFF; 600 alu->mac[3] = (alu_table[3] >> 16) & 0xFF; 601 alu->mac[4] = (alu_table[3] >> 8) & 0xFF; 602 alu->mac[5] = alu_table[3] & 0xFF; 603 } 604 605 int ksz9477_fdb_dump(struct ksz_device *dev, int port, 606 dsa_fdb_dump_cb_t *cb, void *data) 607 { 608 int ret = 0; 609 u32 ksz_data; 610 u32 alu_table[4]; 611 struct alu_struct alu; 612 int timeout; 613 614 mutex_lock(&dev->alu_mutex); 615 616 /* start ALU search */ 617 ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); 618 619 do { 620 timeout = 1000; 621 do { 622 ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); 623 if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) 624 break; 625 usleep_range(1, 10); 626 } while (timeout-- > 0); 627 628 if (!timeout) { 629 dev_dbg(dev->dev, "Failed to search ALU\n"); 630 ret = -ETIMEDOUT; 631 goto exit; 632 } 633 634 if (!(ksz_data & ALU_VALID)) 635 continue; 636 637 /* read ALU table */ 638 ksz9477_read_table(dev, alu_table); 639 640 ksz9477_convert_alu(&alu, alu_table); 641 642 if (alu.port_forward & BIT(port)) { 643 ret = cb(alu.mac, alu.fid, alu.is_static, data); 644 if (ret) 645 goto exit; 646 } 647 } while (ksz_data & ALU_START); 648 649 exit: 650 651 /* stop ALU search */ 652 ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); 653 654 mutex_unlock(&dev->alu_mutex); 655 656 return ret; 657 } 658 659 int ksz9477_mdb_add(struct ksz_device *dev, int port, 660 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 661 { 662 u32 static_table[4]; 663 const u8 *shifts; 664 const u32 *masks; 665 u32 data; 666 int index; 667 u32 mac_hi, mac_lo; 668 int err = 0; 669 670 shifts = dev->info->shifts; 671 masks = dev->info->masks; 672 673 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 674 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 675 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 676 677 mutex_lock(&dev->alu_mutex); 678 679 for (index = 0; index < dev->info->num_statics; index++) { 680 /* find empty slot first */ 681 data = (index << shifts[ALU_STAT_INDEX]) | 682 masks[ALU_STAT_READ] | ALU_STAT_START; 683 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 684 685 /* wait to be finished */ 686 err = ksz9477_wait_alu_sta_ready(dev); 687 if (err) { 688 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 689 goto exit; 690 } 691 692 /* read ALU static table */ 693 ksz9477_read_table(dev, static_table); 694 695 if (static_table[0] & ALU_V_STATIC_VALID) { 696 /* check this has same vid & mac address */ 697 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 698 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 699 static_table[3] == mac_lo) { 700 /* found matching one */ 701 break; 702 } 703 } else { 704 /* found empty one */ 705 break; 706 } 707 } 708 709 /* no available entry */ 710 if (index == dev->info->num_statics) { 711 err = -ENOSPC; 712 goto exit; 713 } 714 715 /* add entry */ 716 static_table[0] = ALU_V_STATIC_VALID; 717 static_table[1] |= BIT(port); 718 if (mdb->vid) 719 static_table[1] |= ALU_V_USE_FID; 720 static_table[2] = (mdb->vid << ALU_V_FID_S); 721 static_table[2] |= mac_hi; 722 static_table[3] = mac_lo; 723 724 ksz9477_write_table(dev, static_table); 725 726 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 727 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 728 729 /* wait to be finished */ 730 if (ksz9477_wait_alu_sta_ready(dev)) 731 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 732 733 exit: 734 mutex_unlock(&dev->alu_mutex); 735 return err; 736 } 737 738 int ksz9477_mdb_del(struct ksz_device *dev, int port, 739 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) 740 { 741 u32 static_table[4]; 742 const u8 *shifts; 743 const u32 *masks; 744 u32 data; 745 int index; 746 int ret = 0; 747 u32 mac_hi, mac_lo; 748 749 shifts = dev->info->shifts; 750 masks = dev->info->masks; 751 752 mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); 753 mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); 754 mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); 755 756 mutex_lock(&dev->alu_mutex); 757 758 for (index = 0; index < dev->info->num_statics; index++) { 759 /* find empty slot first */ 760 data = (index << shifts[ALU_STAT_INDEX]) | 761 masks[ALU_STAT_READ] | ALU_STAT_START; 762 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 763 764 /* wait to be finished */ 765 ret = ksz9477_wait_alu_sta_ready(dev); 766 if (ret) { 767 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 768 goto exit; 769 } 770 771 /* read ALU static table */ 772 ksz9477_read_table(dev, static_table); 773 774 if (static_table[0] & ALU_V_STATIC_VALID) { 775 /* check this has same vid & mac address */ 776 777 if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && 778 ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && 779 static_table[3] == mac_lo) { 780 /* found matching one */ 781 break; 782 } 783 } 784 } 785 786 /* no available entry */ 787 if (index == dev->info->num_statics) 788 goto exit; 789 790 /* clear port */ 791 static_table[1] &= ~BIT(port); 792 793 if ((static_table[1] & ALU_V_PORT_MAP) == 0) { 794 /* delete entry */ 795 static_table[0] = 0; 796 static_table[1] = 0; 797 static_table[2] = 0; 798 static_table[3] = 0; 799 } 800 801 ksz9477_write_table(dev, static_table); 802 803 data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START; 804 ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 805 806 /* wait to be finished */ 807 ret = ksz9477_wait_alu_sta_ready(dev); 808 if (ret) 809 dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); 810 811 exit: 812 mutex_unlock(&dev->alu_mutex); 813 814 return ret; 815 } 816 817 int ksz9477_port_mirror_add(struct ksz_device *dev, int port, 818 struct dsa_mall_mirror_tc_entry *mirror, 819 bool ingress, struct netlink_ext_ack *extack) 820 { 821 u8 data; 822 int p; 823 824 /* Limit to one sniffer port 825 * Check if any of the port is already set for sniffing 826 * If yes, instruct the user to remove the previous entry & exit 827 */ 828 for (p = 0; p < dev->info->port_cnt; p++) { 829 /* Skip the current sniffing port */ 830 if (p == mirror->to_local_port) 831 continue; 832 833 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 834 835 if (data & PORT_MIRROR_SNIFFER) { 836 NL_SET_ERR_MSG_MOD(extack, 837 "Sniffer port is already configured, delete existing rules & retry"); 838 return -EBUSY; 839 } 840 } 841 842 if (ingress) 843 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); 844 else 845 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); 846 847 /* configure mirror port */ 848 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 849 PORT_MIRROR_SNIFFER, true); 850 851 ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); 852 853 return 0; 854 } 855 856 void ksz9477_port_mirror_del(struct ksz_device *dev, int port, 857 struct dsa_mall_mirror_tc_entry *mirror) 858 { 859 bool in_use = false; 860 u8 data; 861 int p; 862 863 if (mirror->ingress) 864 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); 865 else 866 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); 867 868 869 /* Check if any of the port is still referring to sniffer port */ 870 for (p = 0; p < dev->info->port_cnt; p++) { 871 ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 872 873 if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { 874 in_use = true; 875 break; 876 } 877 } 878 879 /* delete sniffing if there are no other mirroring rules */ 880 if (!in_use) 881 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 882 PORT_MIRROR_SNIFFER, false); 883 } 884 885 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) 886 { 887 phy_interface_t interface; 888 bool gbit; 889 890 if (dev->info->internal_phy[port]) 891 return PHY_INTERFACE_MODE_NA; 892 893 gbit = ksz_get_gbit(dev, port); 894 895 interface = ksz_get_xmii(dev, port, gbit); 896 897 return interface; 898 } 899 900 static void ksz9477_port_mmd_write(struct ksz_device *dev, int port, 901 u8 dev_addr, u16 reg_addr, u16 val) 902 { 903 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP, 904 MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr)); 905 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr); 906 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP, 907 MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr)); 908 ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val); 909 } 910 911 static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) 912 { 913 /* Apply PHY settings to address errata listed in 914 * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565 915 * Silicon Errata and Data Sheet Clarification documents: 916 * 917 * Register settings are needed to improve PHY receive performance 918 */ 919 ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b); 920 ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032); 921 ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c); 922 ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060); 923 ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777); 924 ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008); 925 ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001); 926 927 /* Transmit waveform amplitude can be improved 928 * (1000BASE-T, 100BASE-TX, 10BASE-Te) 929 */ 930 ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0); 931 932 /* Energy Efficient Ethernet (EEE) feature select must 933 * be manually disabled (except on KSZ8565 which is 100Mbit) 934 */ 935 if (dev->info->gbit_capable[port]) 936 ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000); 937 938 /* Register settings are required to meet data sheet 939 * supply current specifications 940 */ 941 ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff); 942 ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff); 943 ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff); 944 ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff); 945 ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff); 946 ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff); 947 ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff); 948 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff); 949 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff); 950 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff); 951 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff); 952 ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff); 953 ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee); 954 } 955 956 void ksz9477_get_caps(struct ksz_device *dev, int port, 957 struct phylink_config *config) 958 { 959 config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE | 960 MAC_SYM_PAUSE; 961 962 if (dev->info->gbit_capable[port]) 963 config->mac_capabilities |= MAC_1000FD; 964 } 965 966 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs) 967 { 968 u32 secs = msecs / 1000; 969 u8 value; 970 u8 data; 971 int ret; 972 973 value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); 974 975 ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value); 976 if (ret < 0) 977 return ret; 978 979 data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs); 980 981 ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value); 982 if (ret < 0) 983 return ret; 984 985 value &= ~SW_AGE_CNT_M; 986 value |= FIELD_PREP(SW_AGE_CNT_M, data); 987 988 return ksz_write8(dev, REG_SW_LUE_CTRL_0, value); 989 } 990 991 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) 992 { 993 struct dsa_switch *ds = dev->ds; 994 u16 data16; 995 u8 member; 996 997 /* enable tag tail for host port */ 998 if (cpu_port) 999 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, 1000 true); 1001 1002 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); 1003 1004 /* set back pressure */ 1005 ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); 1006 1007 /* enable broadcast storm limit */ 1008 ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); 1009 1010 /* disable DiffServ priority */ 1011 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); 1012 1013 /* replace priority */ 1014 ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, 1015 false); 1016 ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, 1017 MTI_PVID_REPLACE, false); 1018 1019 /* enable 802.1p priority */ 1020 ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); 1021 1022 if (dev->info->internal_phy[port]) { 1023 /* do not force flow control */ 1024 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, 1025 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, 1026 false); 1027 1028 if (dev->info->phy_errata_9477) 1029 ksz9477_phy_errata_setup(dev, port); 1030 } else { 1031 /* force flow control */ 1032 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, 1033 PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, 1034 true); 1035 } 1036 1037 if (cpu_port) 1038 member = dsa_user_ports(ds); 1039 else 1040 member = BIT(dsa_upstream_port(ds, port)); 1041 1042 ksz9477_cfg_port_member(dev, port, member); 1043 1044 /* clear pending interrupts */ 1045 if (dev->info->internal_phy[port]) 1046 ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); 1047 } 1048 1049 void ksz9477_config_cpu_port(struct dsa_switch *ds) 1050 { 1051 struct ksz_device *dev = ds->priv; 1052 struct ksz_port *p; 1053 int i; 1054 1055 for (i = 0; i < dev->info->port_cnt; i++) { 1056 if (dsa_is_cpu_port(ds, i) && 1057 (dev->info->cpu_ports & (1 << i))) { 1058 phy_interface_t interface; 1059 const char *prev_msg; 1060 const char *prev_mode; 1061 1062 dev->cpu_port = i; 1063 p = &dev->ports[i]; 1064 1065 /* Read from XMII register to determine host port 1066 * interface. If set specifically in device tree 1067 * note the difference to help debugging. 1068 */ 1069 interface = ksz9477_get_interface(dev, i); 1070 if (!p->interface) { 1071 if (dev->compat_interface) { 1072 dev_warn(dev->dev, 1073 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " 1074 "Please update your device tree.\n", 1075 i); 1076 p->interface = dev->compat_interface; 1077 } else { 1078 p->interface = interface; 1079 } 1080 } 1081 if (interface && interface != p->interface) { 1082 prev_msg = " instead of "; 1083 prev_mode = phy_modes(interface); 1084 } else { 1085 prev_msg = ""; 1086 prev_mode = ""; 1087 } 1088 dev_info(dev->dev, 1089 "Port%d: using phy mode %s%s%s\n", 1090 i, 1091 phy_modes(p->interface), 1092 prev_msg, 1093 prev_mode); 1094 1095 /* enable cpu port */ 1096 ksz9477_port_setup(dev, i, true); 1097 } 1098 } 1099 1100 for (i = 0; i < dev->info->port_cnt; i++) { 1101 if (i == dev->cpu_port) 1102 continue; 1103 ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); 1104 } 1105 } 1106 1107 int ksz9477_enable_stp_addr(struct ksz_device *dev) 1108 { 1109 const u32 *masks; 1110 u32 data; 1111 int ret; 1112 1113 masks = dev->info->masks; 1114 1115 /* Enable Reserved multicast table */ 1116 ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true); 1117 1118 /* Set the Override bit for forwarding BPDU packet to CPU */ 1119 ret = ksz_write32(dev, REG_SW_ALU_VAL_B, 1120 ALU_V_OVERRIDE | BIT(dev->cpu_port)); 1121 if (ret < 0) 1122 return ret; 1123 1124 data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE]; 1125 1126 ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); 1127 if (ret < 0) 1128 return ret; 1129 1130 /* wait to be finished */ 1131 ret = ksz9477_wait_alu_sta_ready(dev); 1132 if (ret < 0) { 1133 dev_err(dev->dev, "Failed to update Reserved Multicast table\n"); 1134 return ret; 1135 } 1136 1137 return 0; 1138 } 1139 1140 int ksz9477_setup(struct dsa_switch *ds) 1141 { 1142 struct ksz_device *dev = ds->priv; 1143 int ret = 0; 1144 1145 /* Required for port partitioning. */ 1146 ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, 1147 true); 1148 1149 /* Do not work correctly with tail tagging. */ 1150 ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false); 1151 1152 /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ 1153 ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); 1154 1155 /* Now we can configure default MTU value */ 1156 ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK, 1157 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); 1158 if (ret) 1159 return ret; 1160 1161 /* queue based egress rate limit */ 1162 ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); 1163 1164 /* enable global MIB counter freeze function */ 1165 ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); 1166 1167 return 0; 1168 } 1169 1170 u32 ksz9477_get_port_addr(int port, int offset) 1171 { 1172 return PORT_CTRL_ADDR(port, offset); 1173 } 1174 1175 int ksz9477_switch_init(struct ksz_device *dev) 1176 { 1177 u8 data8; 1178 int ret; 1179 1180 dev->port_mask = (1 << dev->info->port_cnt) - 1; 1181 1182 /* turn off SPI DO Edge select */ 1183 ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); 1184 if (ret) 1185 return ret; 1186 1187 data8 &= ~SPI_AUTO_EDGE_DETECTION; 1188 ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); 1189 if (ret) 1190 return ret; 1191 1192 return 0; 1193 } 1194 1195 void ksz9477_switch_exit(struct ksz_device *dev) 1196 { 1197 ksz9477_reset_switch(dev); 1198 } 1199 1200 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 1201 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver"); 1202 MODULE_LICENSE("GPL"); 1203