1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include <linux/if_bridge.h> 8 #include <net/switchdev.h> 9 10 #include "sparx5_main_regs.h" 11 #include "sparx5_main.h" 12 13 static struct workqueue_struct *sparx5_owq; 14 15 struct sparx5_switchdev_event_work { 16 struct work_struct work; 17 struct switchdev_notifier_fdb_info fdb_info; 18 struct net_device *dev; 19 struct sparx5 *sparx5; 20 unsigned long event; 21 }; 22 23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port, 24 struct switchdev_brport_flags flags) 25 { 26 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD)) 27 return -EINVAL; 28 29 return 0; 30 } 31 32 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port, 33 struct switchdev_brport_flags flags) 34 { 35 int pgid; 36 37 if (flags.mask & BR_MCAST_FLOOD) 38 for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++) 39 sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD)); 40 if (flags.mask & BR_FLOOD) 41 sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD)); 42 if (flags.mask & BR_BCAST_FLOOD) 43 sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD)); 44 } 45 46 static void sparx5_attr_stp_state_set(struct sparx5_port *port, 47 u8 state) 48 { 49 struct sparx5 *sparx5 = port->sparx5; 50 51 if (!test_bit(port->portno, sparx5->bridge_mask)) { 52 netdev_err(port->ndev, 53 "Controlling non-bridged port %d?\n", port->portno); 54 return; 55 } 56 57 switch (state) { 58 case BR_STATE_FORWARDING: 59 set_bit(port->portno, sparx5->bridge_fwd_mask); 60 fallthrough; 61 case BR_STATE_LEARNING: 62 set_bit(port->portno, sparx5->bridge_lrn_mask); 63 break; 64 65 default: 66 /* All other states treated as blocking */ 67 clear_bit(port->portno, sparx5->bridge_fwd_mask); 68 clear_bit(port->portno, sparx5->bridge_lrn_mask); 69 break; 70 } 71 72 /* apply the bridge_fwd_mask to all the ports */ 73 sparx5_update_fwd(sparx5); 74 } 75 76 static void sparx5_port_attr_ageing_set(struct sparx5_port *port, 77 unsigned long ageing_clock_t) 78 { 79 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 80 u32 ageing_time = jiffies_to_msecs(ageing_jiffies); 81 82 sparx5_set_ageing(port->sparx5, ageing_time); 83 } 84 85 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, 86 const struct switchdev_attr *attr, 87 struct netlink_ext_ack *extack) 88 { 89 struct sparx5_port *port = netdev_priv(dev); 90 91 switch (attr->id) { 92 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 93 return sparx5_port_attr_pre_bridge_flags(port, 94 attr->u.brport_flags); 95 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 96 sparx5_port_attr_bridge_flags(port, attr->u.brport_flags); 97 break; 98 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 99 sparx5_attr_stp_state_set(port, attr->u.stp_state); 100 break; 101 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 102 sparx5_port_attr_ageing_set(port, attr->u.ageing_time); 103 break; 104 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 105 /* Used PVID 1 when default_pvid is 0, to avoid 106 * collision with non-bridged ports. 107 */ 108 if (port->pvid == 0) 109 port->pvid = 1; 110 port->vlan_aware = attr->u.vlan_filtering; 111 sparx5_vlan_port_apply(port->sparx5, port); 112 break; 113 default: 114 return -EOPNOTSUPP; 115 } 116 117 return 0; 118 } 119 120 static int sparx5_port_bridge_join(struct sparx5_port *port, 121 struct net_device *bridge, 122 struct netlink_ext_ack *extack) 123 { 124 struct sparx5 *sparx5 = port->sparx5; 125 struct net_device *ndev = port->ndev; 126 int err; 127 128 if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) 129 /* First bridged port */ 130 sparx5->hw_bridge_dev = bridge; 131 else 132 if (sparx5->hw_bridge_dev != bridge) 133 /* This is adding the port to a second bridge, this is 134 * unsupported 135 */ 136 return -ENODEV; 137 138 set_bit(port->portno, sparx5->bridge_mask); 139 140 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, 141 false, extack); 142 if (err) 143 goto err_switchdev_offload; 144 145 /* Remove standalone port entry */ 146 sparx5_mact_forget(sparx5, ndev->dev_addr, 0); 147 148 /* Port enters in bridge mode therefor don't need to copy to CPU 149 * frames for multicast in case the bridge is not requesting them 150 */ 151 __dev_mc_unsync(ndev, sparx5_mc_unsync); 152 153 return 0; 154 155 err_switchdev_offload: 156 clear_bit(port->portno, sparx5->bridge_mask); 157 return err; 158 } 159 160 static void sparx5_port_bridge_leave(struct sparx5_port *port, 161 struct net_device *bridge) 162 { 163 struct sparx5 *sparx5 = port->sparx5; 164 165 switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); 166 167 clear_bit(port->portno, sparx5->bridge_mask); 168 if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) 169 sparx5->hw_bridge_dev = NULL; 170 171 /* Clear bridge vlan settings before updating the port settings */ 172 port->vlan_aware = 0; 173 port->pvid = NULL_VID; 174 port->vid = NULL_VID; 175 176 /* Forward frames to CPU */ 177 sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0); 178 179 /* Port enters in host more therefore restore mc list */ 180 __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); 181 } 182 183 static int sparx5_port_changeupper(struct net_device *dev, 184 struct netdev_notifier_changeupper_info *info) 185 { 186 struct sparx5_port *port = netdev_priv(dev); 187 struct netlink_ext_ack *extack; 188 int err = 0; 189 190 extack = netdev_notifier_info_to_extack(&info->info); 191 192 if (netif_is_bridge_master(info->upper_dev)) { 193 if (info->linking) 194 err = sparx5_port_bridge_join(port, info->upper_dev, 195 extack); 196 else 197 sparx5_port_bridge_leave(port, info->upper_dev); 198 199 sparx5_vlan_port_apply(port->sparx5, port); 200 } 201 202 return err; 203 } 204 205 static int sparx5_port_add_addr(struct net_device *dev, bool up) 206 { 207 struct sparx5_port *port = netdev_priv(dev); 208 struct sparx5 *sparx5 = port->sparx5; 209 u16 vid = port->pvid; 210 211 if (up) 212 sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid); 213 else 214 sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid); 215 216 return 0; 217 } 218 219 static int sparx5_netdevice_port_event(struct net_device *dev, 220 struct notifier_block *nb, 221 unsigned long event, void *ptr) 222 { 223 int err = 0; 224 225 if (!sparx5_netdevice_check(dev)) 226 return 0; 227 228 switch (event) { 229 case NETDEV_CHANGEUPPER: 230 err = sparx5_port_changeupper(dev, ptr); 231 break; 232 case NETDEV_PRE_UP: 233 err = sparx5_port_add_addr(dev, true); 234 break; 235 case NETDEV_DOWN: 236 err = sparx5_port_add_addr(dev, false); 237 break; 238 } 239 240 return err; 241 } 242 243 static int sparx5_netdevice_event(struct notifier_block *nb, 244 unsigned long event, void *ptr) 245 { 246 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 247 int ret = 0; 248 249 ret = sparx5_netdevice_port_event(dev, nb, event, ptr); 250 251 return notifier_from_errno(ret); 252 } 253 254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work) 255 { 256 struct sparx5_switchdev_event_work *switchdev_work = 257 container_of(work, struct sparx5_switchdev_event_work, work); 258 struct net_device *dev = switchdev_work->dev; 259 struct switchdev_notifier_fdb_info *fdb_info; 260 struct sparx5_port *port; 261 struct sparx5 *sparx5; 262 bool host_addr; 263 u16 vid; 264 265 rtnl_lock(); 266 if (!sparx5_netdevice_check(dev)) { 267 host_addr = true; 268 sparx5 = switchdev_work->sparx5; 269 } else { 270 host_addr = false; 271 sparx5 = switchdev_work->sparx5; 272 port = netdev_priv(dev); 273 } 274 275 fdb_info = &switchdev_work->fdb_info; 276 277 /* Used PVID 1 when default_pvid is 0, to avoid 278 * collision with non-bridged ports. 279 */ 280 if (fdb_info->vid == 0) 281 vid = 1; 282 else 283 vid = fdb_info->vid; 284 285 switch (switchdev_work->event) { 286 case SWITCHDEV_FDB_ADD_TO_DEVICE: 287 if (host_addr) 288 sparx5_add_mact_entry(sparx5, dev, PGID_CPU, 289 fdb_info->addr, vid); 290 else 291 sparx5_add_mact_entry(sparx5, port->ndev, port->portno, 292 fdb_info->addr, vid); 293 break; 294 case SWITCHDEV_FDB_DEL_TO_DEVICE: 295 sparx5_del_mact_entry(sparx5, fdb_info->addr, vid); 296 break; 297 } 298 299 rtnl_unlock(); 300 kfree(switchdev_work->fdb_info.addr); 301 kfree(switchdev_work); 302 dev_put(dev); 303 } 304 305 static void sparx5_schedule_work(struct work_struct *work) 306 { 307 queue_work(sparx5_owq, work); 308 } 309 310 static int sparx5_switchdev_event(struct notifier_block *nb, 311 unsigned long event, void *ptr) 312 { 313 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 314 struct sparx5_switchdev_event_work *switchdev_work; 315 struct switchdev_notifier_fdb_info *fdb_info; 316 struct switchdev_notifier_info *info = ptr; 317 struct sparx5 *spx5; 318 int err; 319 320 spx5 = container_of(nb, struct sparx5, switchdev_nb); 321 322 switch (event) { 323 case SWITCHDEV_PORT_ATTR_SET: 324 err = switchdev_handle_port_attr_set(dev, ptr, 325 sparx5_netdevice_check, 326 sparx5_port_attr_set); 327 return notifier_from_errno(err); 328 case SWITCHDEV_FDB_ADD_TO_DEVICE: 329 fallthrough; 330 case SWITCHDEV_FDB_DEL_TO_DEVICE: 331 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 332 if (!switchdev_work) 333 return NOTIFY_BAD; 334 335 switchdev_work->dev = dev; 336 switchdev_work->event = event; 337 switchdev_work->sparx5 = spx5; 338 339 fdb_info = container_of(info, 340 struct switchdev_notifier_fdb_info, 341 info); 342 INIT_WORK(&switchdev_work->work, 343 sparx5_switchdev_bridge_fdb_event_work); 344 memcpy(&switchdev_work->fdb_info, ptr, 345 sizeof(switchdev_work->fdb_info)); 346 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 347 if (!switchdev_work->fdb_info.addr) 348 goto err_addr_alloc; 349 350 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 351 fdb_info->addr); 352 dev_hold(dev); 353 354 sparx5_schedule_work(&switchdev_work->work); 355 break; 356 } 357 358 return NOTIFY_DONE; 359 err_addr_alloc: 360 kfree(switchdev_work); 361 return NOTIFY_BAD; 362 } 363 364 static int sparx5_handle_port_vlan_add(struct net_device *dev, 365 struct notifier_block *nb, 366 const struct switchdev_obj_port_vlan *v) 367 { 368 struct sparx5_port *port = netdev_priv(dev); 369 370 if (netif_is_bridge_master(dev)) { 371 struct sparx5 *sparx5 = 372 container_of(nb, struct sparx5, 373 switchdev_blocking_nb); 374 375 /* Flood broadcast to CPU */ 376 sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast, 377 v->vid); 378 return 0; 379 } 380 381 if (!sparx5_netdevice_check(dev)) 382 return -EOPNOTSUPP; 383 384 return sparx5_vlan_vid_add(port, v->vid, 385 v->flags & BRIDGE_VLAN_INFO_PVID, 386 v->flags & BRIDGE_VLAN_INFO_UNTAGGED); 387 } 388 389 static int sparx5_handle_port_mdb_add(struct net_device *dev, 390 struct notifier_block *nb, 391 const struct switchdev_obj_port_mdb *v) 392 { 393 struct sparx5_port *port = netdev_priv(dev); 394 struct sparx5 *spx5 = port->sparx5; 395 u16 pgid_idx, vid; 396 u32 mact_entry; 397 bool is_host; 398 int res, err; 399 400 is_host = netif_is_bridge_master(v->obj.orig_dev); 401 402 /* When VLAN unaware the vlan value is not parsed and we receive vid 0. 403 * Fall back to bridge vid 1. 404 */ 405 if (!br_vlan_enabled(spx5->hw_bridge_dev)) 406 vid = 1; 407 else 408 vid = v->vid; 409 410 res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry); 411 412 if (res == 0) { 413 pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry); 414 415 /* MC_IDX starts after the port masks in the PGID table */ 416 pgid_idx += SPX5_PORTS; 417 418 if (is_host) 419 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), 420 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5, 421 ANA_AC_PGID_MISC_CFG(pgid_idx)); 422 else 423 sparx5_pgid_update_mask(port, pgid_idx, true); 424 425 } else { 426 err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx); 427 if (err) { 428 netdev_warn(dev, "multicast pgid table full\n"); 429 return err; 430 } 431 432 if (is_host) 433 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), 434 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5, 435 ANA_AC_PGID_MISC_CFG(pgid_idx)); 436 else 437 sparx5_pgid_update_mask(port, pgid_idx, true); 438 439 err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid); 440 441 if (err) { 442 netdev_warn(dev, "could not learn mac address %pM\n", v->addr); 443 sparx5_pgid_free(spx5, pgid_idx); 444 sparx5_pgid_update_mask(port, pgid_idx, false); 445 return err; 446 } 447 } 448 449 return 0; 450 } 451 452 static int sparx5_mdb_del_entry(struct net_device *dev, 453 struct sparx5 *spx5, 454 const unsigned char mac[ETH_ALEN], 455 const u16 vid, 456 u16 pgid_idx) 457 { 458 int err; 459 460 err = sparx5_mact_forget(spx5, mac, vid); 461 if (err) { 462 netdev_warn(dev, "could not forget mac address %pM", mac); 463 return err; 464 } 465 err = sparx5_pgid_free(spx5, pgid_idx); 466 if (err) { 467 netdev_err(dev, "attempted to free already freed pgid\n"); 468 return err; 469 } 470 return 0; 471 } 472 473 static int sparx5_handle_port_mdb_del(struct net_device *dev, 474 struct notifier_block *nb, 475 const struct switchdev_obj_port_mdb *v) 476 { 477 struct sparx5_port *port = netdev_priv(dev); 478 struct sparx5 *spx5 = port->sparx5; 479 u16 pgid_idx, vid; 480 u32 mact_entry, res, pgid_entry[3], misc_cfg; 481 bool host_ena; 482 483 if (!br_vlan_enabled(spx5->hw_bridge_dev)) 484 vid = 1; 485 else 486 vid = v->vid; 487 488 res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry); 489 490 if (res == 0) { 491 pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry); 492 493 /* MC_IDX starts after the port masks in the PGID table */ 494 pgid_idx += SPX5_PORTS; 495 496 if (netif_is_bridge_master(v->obj.orig_dev)) 497 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0), 498 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5, 499 ANA_AC_PGID_MISC_CFG(pgid_idx)); 500 else 501 sparx5_pgid_update_mask(port, pgid_idx, false); 502 503 misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx)); 504 host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg); 505 506 sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry); 507 if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena) 508 /* No ports or CPU are in MC group. Remove entry */ 509 return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx); 510 } 511 512 return 0; 513 } 514 515 static int sparx5_handle_port_obj_add(struct net_device *dev, 516 struct notifier_block *nb, 517 struct switchdev_notifier_port_obj_info *info) 518 { 519 const struct switchdev_obj *obj = info->obj; 520 int err; 521 522 switch (obj->id) { 523 case SWITCHDEV_OBJ_ID_PORT_VLAN: 524 err = sparx5_handle_port_vlan_add(dev, nb, 525 SWITCHDEV_OBJ_PORT_VLAN(obj)); 526 break; 527 case SWITCHDEV_OBJ_ID_PORT_MDB: 528 case SWITCHDEV_OBJ_ID_HOST_MDB: 529 err = sparx5_handle_port_mdb_add(dev, nb, 530 SWITCHDEV_OBJ_PORT_MDB(obj)); 531 break; 532 default: 533 err = -EOPNOTSUPP; 534 break; 535 } 536 537 info->handled = true; 538 return err; 539 } 540 541 static int sparx5_handle_port_vlan_del(struct net_device *dev, 542 struct notifier_block *nb, 543 u16 vid) 544 { 545 struct sparx5_port *port = netdev_priv(dev); 546 int ret; 547 548 /* Master bridge? */ 549 if (netif_is_bridge_master(dev)) { 550 struct sparx5 *sparx5 = 551 container_of(nb, struct sparx5, 552 switchdev_blocking_nb); 553 554 sparx5_mact_forget(sparx5, dev->broadcast, vid); 555 return 0; 556 } 557 558 if (!sparx5_netdevice_check(dev)) 559 return -EOPNOTSUPP; 560 561 ret = sparx5_vlan_vid_del(port, vid); 562 if (ret) 563 return ret; 564 565 return 0; 566 } 567 568 static int sparx5_handle_port_obj_del(struct net_device *dev, 569 struct notifier_block *nb, 570 struct switchdev_notifier_port_obj_info *info) 571 { 572 const struct switchdev_obj *obj = info->obj; 573 int err; 574 575 switch (obj->id) { 576 case SWITCHDEV_OBJ_ID_PORT_VLAN: 577 err = sparx5_handle_port_vlan_del(dev, nb, 578 SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); 579 break; 580 case SWITCHDEV_OBJ_ID_PORT_MDB: 581 case SWITCHDEV_OBJ_ID_HOST_MDB: 582 err = sparx5_handle_port_mdb_del(dev, nb, 583 SWITCHDEV_OBJ_PORT_MDB(obj)); 584 break; 585 default: 586 err = -EOPNOTSUPP; 587 break; 588 } 589 590 info->handled = true; 591 return err; 592 } 593 594 static int sparx5_switchdev_blocking_event(struct notifier_block *nb, 595 unsigned long event, 596 void *ptr) 597 { 598 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 599 int err; 600 601 switch (event) { 602 case SWITCHDEV_PORT_OBJ_ADD: 603 err = sparx5_handle_port_obj_add(dev, nb, ptr); 604 return notifier_from_errno(err); 605 case SWITCHDEV_PORT_OBJ_DEL: 606 err = sparx5_handle_port_obj_del(dev, nb, ptr); 607 return notifier_from_errno(err); 608 case SWITCHDEV_PORT_ATTR_SET: 609 err = switchdev_handle_port_attr_set(dev, ptr, 610 sparx5_netdevice_check, 611 sparx5_port_attr_set); 612 return notifier_from_errno(err); 613 } 614 615 return NOTIFY_DONE; 616 } 617 618 int sparx5_register_notifier_blocks(struct sparx5 *s5) 619 { 620 int err; 621 622 s5->netdevice_nb.notifier_call = sparx5_netdevice_event; 623 err = register_netdevice_notifier(&s5->netdevice_nb); 624 if (err) 625 return err; 626 627 s5->switchdev_nb.notifier_call = sparx5_switchdev_event; 628 err = register_switchdev_notifier(&s5->switchdev_nb); 629 if (err) 630 goto err_switchdev_nb; 631 632 s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event; 633 err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); 634 if (err) 635 goto err_switchdev_blocking_nb; 636 637 sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0); 638 if (!sparx5_owq) { 639 err = -ENOMEM; 640 goto err_switchdev_blocking_nb; 641 } 642 643 return 0; 644 645 err_switchdev_blocking_nb: 646 unregister_switchdev_notifier(&s5->switchdev_nb); 647 err_switchdev_nb: 648 unregister_netdevice_notifier(&s5->netdevice_nb); 649 650 return err; 651 } 652 653 void sparx5_unregister_notifier_blocks(struct sparx5 *s5) 654 { 655 destroy_workqueue(sparx5_owq); 656 657 unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb); 658 unregister_switchdev_notifier(&s5->switchdev_nb); 659 unregister_netdevice_notifier(&s5->netdevice_nb); 660 } 661