1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Microchip switch driver main logic 4 * 5 * Copyright (C) 2017-2019 Microchip Technology Inc. 6 */ 7 8 #include <linux/delay.h> 9 #include <linux/export.h> 10 #include <linux/gpio/consumer.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/platform_data/microchip-ksz.h> 14 #include <linux/phy.h> 15 #include <linux/etherdevice.h> 16 #include <linux/if_bridge.h> 17 #include <linux/of_net.h> 18 #include <net/dsa.h> 19 #include <net/switchdev.h> 20 21 #include "ksz_common.h" 22 23 struct ksz_stats_raw { 24 u64 rx_hi; 25 u64 rx_undersize; 26 u64 rx_fragments; 27 u64 rx_oversize; 28 u64 rx_jabbers; 29 u64 rx_symbol_err; 30 u64 rx_crc_err; 31 u64 rx_align_err; 32 u64 rx_mac_ctrl; 33 u64 rx_pause; 34 u64 rx_bcast; 35 u64 rx_mcast; 36 u64 rx_ucast; 37 u64 rx_64_or_less; 38 u64 rx_65_127; 39 u64 rx_128_255; 40 u64 rx_256_511; 41 u64 rx_512_1023; 42 u64 rx_1024_1522; 43 u64 rx_1523_2000; 44 u64 rx_2001; 45 u64 tx_hi; 46 u64 tx_late_col; 47 u64 tx_pause; 48 u64 tx_bcast; 49 u64 tx_mcast; 50 u64 tx_ucast; 51 u64 tx_deferred; 52 u64 tx_total_col; 53 u64 tx_exc_col; 54 u64 tx_single_col; 55 u64 tx_mult_col; 56 u64 rx_total; 57 u64 tx_total; 58 u64 rx_discards; 59 u64 tx_discards; 60 }; 61 62 void ksz_r_mib_stats64(struct ksz_device *dev, int port) 63 { 64 struct rtnl_link_stats64 *stats; 65 struct ksz_stats_raw *raw; 66 struct ksz_port_mib *mib; 67 68 mib = &dev->ports[port].mib; 69 stats = &mib->stats64; 70 raw = (struct ksz_stats_raw *)mib->counters; 71 72 spin_lock(&mib->stats64_lock); 73 74 stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; 75 stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; 76 77 /* HW counters are counting bytes + FCS which is not acceptable 78 * for rtnl_link_stats64 interface 79 */ 80 stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; 81 stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; 82 83 stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + 84 raw->rx_oversize; 85 86 stats->rx_crc_errors = raw->rx_crc_err; 87 stats->rx_frame_errors = raw->rx_align_err; 88 stats->rx_dropped = raw->rx_discards; 89 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + 90 stats->rx_frame_errors + stats->rx_dropped; 91 92 stats->tx_window_errors = raw->tx_late_col; 93 stats->tx_fifo_errors = raw->tx_discards; 94 stats->tx_aborted_errors = raw->tx_exc_col; 95 stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + 96 stats->tx_aborted_errors; 97 98 stats->multicast = raw->rx_mcast; 99 stats->collisions = raw->tx_total_col; 100 101 spin_unlock(&mib->stats64_lock); 102 } 103 EXPORT_SYMBOL_GPL(ksz_r_mib_stats64); 104 105 void ksz_get_stats64(struct dsa_switch *ds, int port, 106 struct rtnl_link_stats64 *s) 107 { 108 struct ksz_device *dev = ds->priv; 109 struct ksz_port_mib *mib; 110 111 mib = &dev->ports[port].mib; 112 113 spin_lock(&mib->stats64_lock); 114 memcpy(s, &mib->stats64, sizeof(*s)); 115 spin_unlock(&mib->stats64_lock); 116 } 117 EXPORT_SYMBOL_GPL(ksz_get_stats64); 118 119 void ksz_update_port_member(struct ksz_device *dev, int port) 120 { 121 struct ksz_port *p = &dev->ports[port]; 122 struct dsa_switch *ds = dev->ds; 123 u8 port_member = 0, cpu_port; 124 const struct dsa_port *dp; 125 int i, j; 126 127 if (!dsa_is_user_port(ds, port)) 128 return; 129 130 dp = dsa_to_port(ds, port); 131 cpu_port = BIT(dsa_upstream_port(ds, port)); 132 133 for (i = 0; i < ds->num_ports; i++) { 134 const struct dsa_port *other_dp = dsa_to_port(ds, i); 135 struct ksz_port *other_p = &dev->ports[i]; 136 u8 val = 0; 137 138 if (!dsa_is_user_port(ds, i)) 139 continue; 140 if (port == i) 141 continue; 142 if (!dsa_port_bridge_same(dp, other_dp)) 143 continue; 144 if (other_p->stp_state != BR_STATE_FORWARDING) 145 continue; 146 147 if (p->stp_state == BR_STATE_FORWARDING) { 148 val |= BIT(port); 149 port_member |= BIT(i); 150 } 151 152 /* Retain port [i]'s relationship to other ports than [port] */ 153 for (j = 0; j < ds->num_ports; j++) { 154 const struct dsa_port *third_dp; 155 struct ksz_port *third_p; 156 157 if (j == i) 158 continue; 159 if (j == port) 160 continue; 161 if (!dsa_is_user_port(ds, j)) 162 continue; 163 third_p = &dev->ports[j]; 164 if (third_p->stp_state != BR_STATE_FORWARDING) 165 continue; 166 third_dp = dsa_to_port(ds, j); 167 if (dsa_port_bridge_same(other_dp, third_dp)) 168 val |= BIT(j); 169 } 170 171 dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); 172 } 173 174 dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); 175 } 176 EXPORT_SYMBOL_GPL(ksz_update_port_member); 177 178 static void port_r_cnt(struct ksz_device *dev, int port) 179 { 180 struct ksz_port_mib *mib = &dev->ports[port].mib; 181 u64 *dropped; 182 183 /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ 184 while (mib->cnt_ptr < dev->reg_mib_cnt) { 185 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, 186 &mib->counters[mib->cnt_ptr]); 187 ++mib->cnt_ptr; 188 } 189 190 /* last one in storage */ 191 dropped = &mib->counters[dev->mib_cnt]; 192 193 /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ 194 while (mib->cnt_ptr < dev->mib_cnt) { 195 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, 196 dropped, &mib->counters[mib->cnt_ptr]); 197 ++mib->cnt_ptr; 198 } 199 mib->cnt_ptr = 0; 200 } 201 202 static void ksz_mib_read_work(struct work_struct *work) 203 { 204 struct ksz_device *dev = container_of(work, struct ksz_device, 205 mib_read.work); 206 struct ksz_port_mib *mib; 207 struct ksz_port *p; 208 int i; 209 210 for (i = 0; i < dev->port_cnt; i++) { 211 if (dsa_is_unused_port(dev->ds, i)) 212 continue; 213 214 p = &dev->ports[i]; 215 mib = &p->mib; 216 mutex_lock(&mib->cnt_mutex); 217 218 /* Only read MIB counters when the port is told to do. 219 * If not, read only dropped counters when link is not up. 220 */ 221 if (!p->read) { 222 const struct dsa_port *dp = dsa_to_port(dev->ds, i); 223 224 if (!netif_carrier_ok(dp->slave)) 225 mib->cnt_ptr = dev->reg_mib_cnt; 226 } 227 port_r_cnt(dev, i); 228 p->read = false; 229 230 if (dev->dev_ops->r_mib_stat64) 231 dev->dev_ops->r_mib_stat64(dev, i); 232 233 mutex_unlock(&mib->cnt_mutex); 234 } 235 236 schedule_delayed_work(&dev->mib_read, dev->mib_read_interval); 237 } 238 239 void ksz_init_mib_timer(struct ksz_device *dev) 240 { 241 int i; 242 243 INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); 244 245 for (i = 0; i < dev->port_cnt; i++) 246 dev->dev_ops->port_init_cnt(dev, i); 247 } 248 EXPORT_SYMBOL_GPL(ksz_init_mib_timer); 249 250 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg) 251 { 252 struct ksz_device *dev = ds->priv; 253 u16 val = 0xffff; 254 255 dev->dev_ops->r_phy(dev, addr, reg, &val); 256 257 return val; 258 } 259 EXPORT_SYMBOL_GPL(ksz_phy_read16); 260 261 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) 262 { 263 struct ksz_device *dev = ds->priv; 264 265 dev->dev_ops->w_phy(dev, addr, reg, val); 266 267 return 0; 268 } 269 EXPORT_SYMBOL_GPL(ksz_phy_write16); 270 271 void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, 272 phy_interface_t interface) 273 { 274 struct ksz_device *dev = ds->priv; 275 struct ksz_port *p = &dev->ports[port]; 276 277 /* Read all MIB counters when the link is going down. */ 278 p->read = true; 279 /* timer started */ 280 if (dev->mib_read_interval) 281 schedule_delayed_work(&dev->mib_read, 0); 282 } 283 EXPORT_SYMBOL_GPL(ksz_mac_link_down); 284 285 int ksz_sset_count(struct dsa_switch *ds, int port, int sset) 286 { 287 struct ksz_device *dev = ds->priv; 288 289 if (sset != ETH_SS_STATS) 290 return 0; 291 292 return dev->mib_cnt; 293 } 294 EXPORT_SYMBOL_GPL(ksz_sset_count); 295 296 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf) 297 { 298 const struct dsa_port *dp = dsa_to_port(ds, port); 299 struct ksz_device *dev = ds->priv; 300 struct ksz_port_mib *mib; 301 302 mib = &dev->ports[port].mib; 303 mutex_lock(&mib->cnt_mutex); 304 305 /* Only read dropped counters if no link. */ 306 if (!netif_carrier_ok(dp->slave)) 307 mib->cnt_ptr = dev->reg_mib_cnt; 308 port_r_cnt(dev, port); 309 memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64)); 310 mutex_unlock(&mib->cnt_mutex); 311 } 312 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); 313 314 int ksz_port_bridge_join(struct dsa_switch *ds, int port, 315 struct dsa_bridge bridge, 316 bool *tx_fwd_offload, 317 struct netlink_ext_ack *extack) 318 { 319 /* port_stp_state_set() will be called after to put the port in 320 * appropriate state so there is no need to do anything. 321 */ 322 323 return 0; 324 } 325 EXPORT_SYMBOL_GPL(ksz_port_bridge_join); 326 327 void ksz_port_bridge_leave(struct dsa_switch *ds, int port, 328 struct dsa_bridge bridge) 329 { 330 /* port_stp_state_set() will be called after to put the port in 331 * forwarding state so there is no need to do anything. 332 */ 333 } 334 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave); 335 336 void ksz_port_fast_age(struct dsa_switch *ds, int port) 337 { 338 struct ksz_device *dev = ds->priv; 339 340 dev->dev_ops->flush_dyn_mac_table(dev, port); 341 } 342 EXPORT_SYMBOL_GPL(ksz_port_fast_age); 343 344 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, 345 void *data) 346 { 347 struct ksz_device *dev = ds->priv; 348 int ret = 0; 349 u16 i = 0; 350 u16 entries = 0; 351 u8 timestamp = 0; 352 u8 fid; 353 u8 member; 354 struct alu_struct alu; 355 356 do { 357 alu.is_static = false; 358 ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid, 359 &member, ×tamp, 360 &entries); 361 if (!ret && (member & BIT(port))) { 362 ret = cb(alu.mac, alu.fid, alu.is_static, data); 363 if (ret) 364 break; 365 } 366 i++; 367 } while (i < entries); 368 if (i >= entries) 369 ret = 0; 370 371 return ret; 372 } 373 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump); 374 375 int ksz_port_mdb_add(struct dsa_switch *ds, int port, 376 const struct switchdev_obj_port_mdb *mdb, 377 struct dsa_db db) 378 { 379 struct ksz_device *dev = ds->priv; 380 struct alu_struct alu; 381 int index; 382 int empty = 0; 383 384 alu.port_forward = 0; 385 for (index = 0; index < dev->num_statics; index++) { 386 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { 387 /* Found one already in static MAC table. */ 388 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && 389 alu.fid == mdb->vid) 390 break; 391 /* Remember the first empty entry. */ 392 } else if (!empty) { 393 empty = index + 1; 394 } 395 } 396 397 /* no available entry */ 398 if (index == dev->num_statics && !empty) 399 return -ENOSPC; 400 401 /* add entry */ 402 if (index == dev->num_statics) { 403 index = empty - 1; 404 memset(&alu, 0, sizeof(alu)); 405 memcpy(alu.mac, mdb->addr, ETH_ALEN); 406 alu.is_static = true; 407 } 408 alu.port_forward |= BIT(port); 409 if (mdb->vid) { 410 alu.is_use_fid = true; 411 412 /* Need a way to map VID to FID. */ 413 alu.fid = mdb->vid; 414 } 415 dev->dev_ops->w_sta_mac_table(dev, index, &alu); 416 417 return 0; 418 } 419 EXPORT_SYMBOL_GPL(ksz_port_mdb_add); 420 421 int ksz_port_mdb_del(struct dsa_switch *ds, int port, 422 const struct switchdev_obj_port_mdb *mdb, 423 struct dsa_db db) 424 { 425 struct ksz_device *dev = ds->priv; 426 struct alu_struct alu; 427 int index; 428 429 for (index = 0; index < dev->num_statics; index++) { 430 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { 431 /* Found one already in static MAC table. */ 432 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && 433 alu.fid == mdb->vid) 434 break; 435 } 436 } 437 438 /* no available entry */ 439 if (index == dev->num_statics) 440 goto exit; 441 442 /* clear port */ 443 alu.port_forward &= ~BIT(port); 444 if (!alu.port_forward) 445 alu.is_static = false; 446 dev->dev_ops->w_sta_mac_table(dev, index, &alu); 447 448 exit: 449 return 0; 450 } 451 EXPORT_SYMBOL_GPL(ksz_port_mdb_del); 452 453 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) 454 { 455 struct ksz_device *dev = ds->priv; 456 457 if (!dsa_is_user_port(ds, port)) 458 return 0; 459 460 /* setup slave port */ 461 dev->dev_ops->port_setup(dev, port, false); 462 463 /* port_stp_state_set() will be called after to enable the port so 464 * there is no need to do anything. 465 */ 466 467 return 0; 468 } 469 EXPORT_SYMBOL_GPL(ksz_enable_port); 470 471 void ksz_port_stp_state_set(struct dsa_switch *ds, int port, 472 u8 state, int reg) 473 { 474 struct ksz_device *dev = ds->priv; 475 struct ksz_port *p; 476 u8 data; 477 478 ksz_pread8(dev, port, reg, &data); 479 data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); 480 481 switch (state) { 482 case BR_STATE_DISABLED: 483 data |= PORT_LEARN_DISABLE; 484 break; 485 case BR_STATE_LISTENING: 486 data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); 487 break; 488 case BR_STATE_LEARNING: 489 data |= PORT_RX_ENABLE; 490 break; 491 case BR_STATE_FORWARDING: 492 data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); 493 break; 494 case BR_STATE_BLOCKING: 495 data |= PORT_LEARN_DISABLE; 496 break; 497 default: 498 dev_err(ds->dev, "invalid STP state: %d\n", state); 499 return; 500 } 501 502 ksz_pwrite8(dev, port, reg, data); 503 504 p = &dev->ports[port]; 505 p->stp_state = state; 506 507 ksz_update_port_member(dev, port); 508 } 509 EXPORT_SYMBOL_GPL(ksz_port_stp_state_set); 510 511 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) 512 { 513 struct dsa_switch *ds; 514 struct ksz_device *swdev; 515 516 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); 517 if (!ds) 518 return NULL; 519 520 ds->dev = base; 521 ds->num_ports = DSA_MAX_PORTS; 522 523 swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL); 524 if (!swdev) 525 return NULL; 526 527 ds->priv = swdev; 528 swdev->dev = base; 529 530 swdev->ds = ds; 531 swdev->priv = priv; 532 533 return swdev; 534 } 535 EXPORT_SYMBOL(ksz_switch_alloc); 536 537 int ksz_switch_register(struct ksz_device *dev, 538 const struct ksz_dev_ops *ops) 539 { 540 struct device_node *port, *ports; 541 phy_interface_t interface; 542 unsigned int port_num; 543 int ret; 544 545 if (dev->pdata) 546 dev->chip_id = dev->pdata->chip_id; 547 548 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", 549 GPIOD_OUT_LOW); 550 if (IS_ERR(dev->reset_gpio)) 551 return PTR_ERR(dev->reset_gpio); 552 553 if (dev->reset_gpio) { 554 gpiod_set_value_cansleep(dev->reset_gpio, 1); 555 usleep_range(10000, 12000); 556 gpiod_set_value_cansleep(dev->reset_gpio, 0); 557 msleep(100); 558 } 559 560 mutex_init(&dev->dev_mutex); 561 mutex_init(&dev->regmap_mutex); 562 mutex_init(&dev->alu_mutex); 563 mutex_init(&dev->vlan_mutex); 564 565 dev->dev_ops = ops; 566 567 if (dev->dev_ops->detect(dev)) 568 return -EINVAL; 569 570 ret = dev->dev_ops->init(dev); 571 if (ret) 572 return ret; 573 574 /* Host port interface will be self detected, or specifically set in 575 * device tree. 576 */ 577 for (port_num = 0; port_num < dev->port_cnt; ++port_num) 578 dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; 579 if (dev->dev->of_node) { 580 ret = of_get_phy_mode(dev->dev->of_node, &interface); 581 if (ret == 0) 582 dev->compat_interface = interface; 583 ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); 584 if (!ports) 585 ports = of_get_child_by_name(dev->dev->of_node, "ports"); 586 if (ports) 587 for_each_available_child_of_node(ports, port) { 588 if (of_property_read_u32(port, "reg", 589 &port_num)) 590 continue; 591 if (!(dev->port_mask & BIT(port_num))) { 592 of_node_put(port); 593 return -EINVAL; 594 } 595 of_get_phy_mode(port, 596 &dev->ports[port_num].interface); 597 } 598 dev->synclko_125 = of_property_read_bool(dev->dev->of_node, 599 "microchip,synclko-125"); 600 dev->synclko_disable = of_property_read_bool(dev->dev->of_node, 601 "microchip,synclko-disable"); 602 if (dev->synclko_125 && dev->synclko_disable) { 603 dev_err(dev->dev, "inconsistent synclko settings\n"); 604 return -EINVAL; 605 } 606 } 607 608 ret = dsa_register_switch(dev->ds); 609 if (ret) { 610 dev->dev_ops->exit(dev); 611 return ret; 612 } 613 614 /* Read MIB counters every 30 seconds to avoid overflow. */ 615 dev->mib_read_interval = msecs_to_jiffies(5000); 616 617 /* Start the MIB timer. */ 618 schedule_delayed_work(&dev->mib_read, 0); 619 620 return 0; 621 } 622 EXPORT_SYMBOL(ksz_switch_register); 623 624 void ksz_switch_remove(struct ksz_device *dev) 625 { 626 /* timer started */ 627 if (dev->mib_read_interval) { 628 dev->mib_read_interval = 0; 629 cancel_delayed_work_sync(&dev->mib_read); 630 } 631 632 dev->dev_ops->exit(dev); 633 dsa_unregister_switch(dev->ds); 634 635 if (dev->reset_gpio) 636 gpiod_set_value_cansleep(dev->reset_gpio, 1); 637 638 } 639 EXPORT_SYMBOL(ksz_switch_remove); 640 641 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 642 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver"); 643 MODULE_LICENSE("GPL"); 644