1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <linux/if_bridge.h> 4 #include <net/switchdev.h> 5 6 #include "lan966x_main.h" 7 8 static struct notifier_block lan966x_netdevice_nb __read_mostly; 9 static struct notifier_block lan966x_switchdev_nb __read_mostly; 10 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly; 11 12 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port, 13 u32 pgid_ip) 14 { 15 struct lan966x *lan966x = port->lan966x; 16 u32 flood_mask_ip; 17 18 flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip)); 19 flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip); 20 21 /* If mcast snooping is not enabled then use mcast flood mask 22 * to decide to enable multicast flooding or not. 23 */ 24 if (!port->mcast_ena) { 25 u32 flood_mask; 26 27 flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC)); 28 flood_mask = ANA_PGID_PGID_GET(flood_mask); 29 30 if (flood_mask & BIT(port->chip_port)) 31 flood_mask_ip |= BIT(port->chip_port); 32 else 33 flood_mask_ip &= ~BIT(port->chip_port); 34 } else { 35 flood_mask_ip &= ~BIT(port->chip_port); 36 } 37 38 lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip), 39 ANA_PGID_PGID, 40 lan966x, ANA_PGID(pgid_ip)); 41 } 42 43 static void lan966x_port_set_mcast_flood(struct lan966x_port *port, 44 bool enabled) 45 { 46 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC)); 47 48 val = ANA_PGID_PGID_GET(val); 49 if (enabled) 50 val |= BIT(port->chip_port); 51 else 52 val &= ~BIT(port->chip_port); 53 54 lan_rmw(ANA_PGID_PGID_SET(val), 55 ANA_PGID_PGID, 56 port->lan966x, ANA_PGID(PGID_MC)); 57 58 if (!port->mcast_ena) { 59 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); 60 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); 61 } 62 } 63 64 static void lan966x_port_set_ucast_flood(struct lan966x_port *port, 65 bool enabled) 66 { 67 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC)); 68 69 val = ANA_PGID_PGID_GET(val); 70 if (enabled) 71 val |= BIT(port->chip_port); 72 else 73 val &= ~BIT(port->chip_port); 74 75 lan_rmw(ANA_PGID_PGID_SET(val), 76 ANA_PGID_PGID, 77 port->lan966x, ANA_PGID(PGID_UC)); 78 } 79 80 static void lan966x_port_set_bcast_flood(struct lan966x_port *port, 81 bool enabled) 82 { 83 u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC)); 84 85 val = ANA_PGID_PGID_GET(val); 86 if (enabled) 87 val |= BIT(port->chip_port); 88 else 89 val &= ~BIT(port->chip_port); 90 91 lan_rmw(ANA_PGID_PGID_SET(val), 92 ANA_PGID_PGID, 93 port->lan966x, ANA_PGID(PGID_BC)); 94 } 95 96 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled) 97 { 98 lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled), 99 ANA_PORT_CFG_LEARN_ENA, 100 port->lan966x, ANA_PORT_CFG(port->chip_port)); 101 102 port->learn_ena = enabled; 103 } 104 105 static void lan966x_port_bridge_flags(struct lan966x_port *port, 106 struct switchdev_brport_flags flags) 107 { 108 if (flags.mask & BR_MCAST_FLOOD) 109 lan966x_port_set_mcast_flood(port, 110 !!(flags.val & BR_MCAST_FLOOD)); 111 112 if (flags.mask & BR_FLOOD) 113 lan966x_port_set_ucast_flood(port, 114 !!(flags.val & BR_FLOOD)); 115 116 if (flags.mask & BR_BCAST_FLOOD) 117 lan966x_port_set_bcast_flood(port, 118 !!(flags.val & BR_BCAST_FLOOD)); 119 120 if (flags.mask & BR_LEARNING) 121 lan966x_port_set_learning(port, 122 !!(flags.val & BR_LEARNING)); 123 } 124 125 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port, 126 struct switchdev_brport_flags flags) 127 { 128 if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD | 129 BR_LEARNING)) 130 return -EINVAL; 131 132 return 0; 133 } 134 135 static void lan966x_update_fwd_mask(struct lan966x *lan966x) 136 { 137 int i; 138 139 for (i = 0; i < lan966x->num_phys_ports; i++) { 140 struct lan966x_port *port = lan966x->ports[i]; 141 unsigned long mask = 0; 142 143 if (port && lan966x->bridge_fwd_mask & BIT(i)) 144 mask = lan966x->bridge_fwd_mask & ~BIT(i); 145 146 mask |= BIT(CPU_PORT); 147 148 lan_wr(ANA_PGID_PGID_SET(mask), 149 lan966x, ANA_PGID(PGID_SRC + i)); 150 } 151 } 152 153 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state) 154 { 155 struct lan966x *lan966x = port->lan966x; 156 bool learn_ena = false; 157 158 if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) && 159 port->learn_ena) 160 learn_ena = true; 161 162 if (state == BR_STATE_FORWARDING) 163 lan966x->bridge_fwd_mask |= BIT(port->chip_port); 164 else 165 lan966x->bridge_fwd_mask &= ~BIT(port->chip_port); 166 167 lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena), 168 ANA_PORT_CFG_LEARN_ENA, 169 lan966x, ANA_PORT_CFG(port->chip_port)); 170 171 lan966x_update_fwd_mask(lan966x); 172 } 173 174 static void lan966x_port_ageing_set(struct lan966x_port *port, 175 unsigned long ageing_clock_t) 176 { 177 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 178 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 179 180 lan966x_mac_set_ageing(port->lan966x, ageing_time); 181 } 182 183 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena) 184 { 185 struct lan966x *lan966x = port->lan966x; 186 187 port->mcast_ena = mcast_ena; 188 if (mcast_ena) 189 lan966x_mdb_restore_entries(lan966x); 190 else 191 lan966x_mdb_clear_entries(lan966x); 192 193 lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) | 194 ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) | 195 ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena), 196 ANA_CPU_FWD_CFG_IGMP_REDIR_ENA | 197 ANA_CPU_FWD_CFG_MLD_REDIR_ENA | 198 ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, 199 lan966x, ANA_CPU_FWD_CFG(port->chip_port)); 200 201 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4); 202 lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6); 203 } 204 205 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx, 206 const struct switchdev_attr *attr, 207 struct netlink_ext_ack *extack) 208 { 209 struct lan966x_port *port = netdev_priv(dev); 210 int err = 0; 211 212 if (ctx && ctx != port) 213 return 0; 214 215 switch (attr->id) { 216 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 217 lan966x_port_bridge_flags(port, attr->u.brport_flags); 218 break; 219 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 220 err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags); 221 break; 222 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 223 lan966x_port_stp_state_set(port, attr->u.stp_state); 224 break; 225 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 226 lan966x_port_ageing_set(port, attr->u.ageing_time); 227 break; 228 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 229 lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering); 230 lan966x_vlan_port_apply(port); 231 break; 232 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 233 lan966x_port_mc_set(port, !attr->u.mc_disabled); 234 break; 235 default: 236 err = -EOPNOTSUPP; 237 break; 238 } 239 240 return err; 241 } 242 243 static int lan966x_port_bridge_join(struct lan966x_port *port, 244 struct net_device *bridge, 245 struct netlink_ext_ack *extack) 246 { 247 struct switchdev_brport_flags flags = {0}; 248 struct lan966x *lan966x = port->lan966x; 249 struct net_device *dev = port->dev; 250 int err; 251 252 if (!lan966x->bridge_mask) { 253 lan966x->bridge = bridge; 254 } else { 255 if (lan966x->bridge != bridge) { 256 NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge"); 257 return -ENODEV; 258 } 259 } 260 261 err = switchdev_bridge_port_offload(dev, dev, port, 262 &lan966x_switchdev_nb, 263 &lan966x_switchdev_blocking_nb, 264 false, extack); 265 if (err) 266 return err; 267 268 lan966x->bridge_mask |= BIT(port->chip_port); 269 270 flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 271 flags.val = flags.mask; 272 lan966x_port_bridge_flags(port, flags); 273 274 return 0; 275 } 276 277 static void lan966x_port_bridge_leave(struct lan966x_port *port, 278 struct net_device *bridge) 279 { 280 struct switchdev_brport_flags flags = {0}; 281 struct lan966x *lan966x = port->lan966x; 282 283 flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; 284 flags.val = flags.mask & ~BR_LEARNING; 285 lan966x_port_bridge_flags(port, flags); 286 287 lan966x->bridge_mask &= ~BIT(port->chip_port); 288 289 if (!lan966x->bridge_mask) 290 lan966x->bridge = NULL; 291 292 /* Set the port back to host mode */ 293 lan966x_vlan_port_set_vlan_aware(port, false); 294 lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); 295 lan966x_vlan_port_apply(port); 296 } 297 298 static int lan966x_port_changeupper(struct net_device *dev, 299 struct netdev_notifier_changeupper_info *info) 300 { 301 struct lan966x_port *port = netdev_priv(dev); 302 struct netlink_ext_ack *extack; 303 int err = 0; 304 305 extack = netdev_notifier_info_to_extack(&info->info); 306 307 if (netif_is_bridge_master(info->upper_dev)) { 308 if (info->linking) 309 err = lan966x_port_bridge_join(port, info->upper_dev, 310 extack); 311 else 312 lan966x_port_bridge_leave(port, info->upper_dev); 313 } 314 315 return err; 316 } 317 318 static int lan966x_port_prechangeupper(struct net_device *dev, 319 struct netdev_notifier_changeupper_info *info) 320 { 321 struct lan966x_port *port = netdev_priv(dev); 322 323 if (netif_is_bridge_master(info->upper_dev) && !info->linking) 324 switchdev_bridge_port_unoffload(port->dev, port, 325 &lan966x_switchdev_nb, 326 &lan966x_switchdev_blocking_nb); 327 328 return NOTIFY_DONE; 329 } 330 331 static int lan966x_foreign_bridging_check(struct net_device *bridge, 332 struct netlink_ext_ack *extack) 333 { 334 struct lan966x *lan966x = NULL; 335 bool has_foreign = false; 336 struct net_device *dev; 337 struct list_head *iter; 338 339 if (!netif_is_bridge_master(bridge)) 340 return 0; 341 342 netdev_for_each_lower_dev(bridge, dev, iter) { 343 if (lan966x_netdevice_check(dev)) { 344 struct lan966x_port *port = netdev_priv(dev); 345 346 if (lan966x) { 347 /* Bridge already has at least one port of a 348 * lan966x switch inside it, check that it's 349 * the same instance of the driver. 350 */ 351 if (port->lan966x != lan966x) { 352 NL_SET_ERR_MSG_MOD(extack, 353 "Bridging between multiple lan966x switches disallowed"); 354 return -EINVAL; 355 } 356 } else { 357 /* This is the first lan966x port inside this 358 * bridge 359 */ 360 lan966x = port->lan966x; 361 } 362 } else { 363 has_foreign = true; 364 } 365 366 if (lan966x && has_foreign) { 367 NL_SET_ERR_MSG_MOD(extack, 368 "Bridging lan966x ports with foreign interfaces disallowed"); 369 return -EINVAL; 370 } 371 } 372 373 return 0; 374 } 375 376 static int lan966x_bridge_check(struct net_device *dev, 377 struct netdev_notifier_changeupper_info *info) 378 { 379 return lan966x_foreign_bridging_check(info->upper_dev, 380 info->info.extack); 381 } 382 383 static int lan966x_netdevice_port_event(struct net_device *dev, 384 struct notifier_block *nb, 385 unsigned long event, void *ptr) 386 { 387 int err = 0; 388 389 if (!lan966x_netdevice_check(dev)) { 390 if (event == NETDEV_CHANGEUPPER) 391 return lan966x_bridge_check(dev, ptr); 392 return 0; 393 } 394 395 switch (event) { 396 case NETDEV_PRECHANGEUPPER: 397 err = lan966x_port_prechangeupper(dev, ptr); 398 break; 399 case NETDEV_CHANGEUPPER: 400 err = lan966x_bridge_check(dev, ptr); 401 if (err) 402 return err; 403 404 err = lan966x_port_changeupper(dev, ptr); 405 break; 406 } 407 408 return err; 409 } 410 411 static int lan966x_netdevice_event(struct notifier_block *nb, 412 unsigned long event, void *ptr) 413 { 414 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 415 int ret; 416 417 ret = lan966x_netdevice_port_event(dev, nb, event, ptr); 418 419 return notifier_from_errno(ret); 420 } 421 422 /* We don't offload uppers such as LAG as bridge ports, so every device except 423 * the bridge itself is foreign. 424 */ 425 static bool lan966x_foreign_dev_check(const struct net_device *dev, 426 const struct net_device *foreign_dev) 427 { 428 struct lan966x_port *port = netdev_priv(dev); 429 struct lan966x *lan966x = port->lan966x; 430 431 if (netif_is_bridge_master(foreign_dev)) 432 if (lan966x->bridge == foreign_dev) 433 return false; 434 435 return true; 436 } 437 438 static int lan966x_switchdev_event(struct notifier_block *nb, 439 unsigned long event, void *ptr) 440 { 441 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 442 int err; 443 444 switch (event) { 445 case SWITCHDEV_PORT_ATTR_SET: 446 err = switchdev_handle_port_attr_set(dev, ptr, 447 lan966x_netdevice_check, 448 lan966x_port_attr_set); 449 return notifier_from_errno(err); 450 case SWITCHDEV_FDB_ADD_TO_DEVICE: 451 case SWITCHDEV_FDB_DEL_TO_DEVICE: 452 err = switchdev_handle_fdb_event_to_device(dev, event, ptr, 453 lan966x_netdevice_check, 454 lan966x_foreign_dev_check, 455 lan966x_handle_fdb); 456 return notifier_from_errno(err); 457 } 458 459 return NOTIFY_DONE; 460 } 461 462 static int lan966x_handle_port_vlan_add(struct lan966x_port *port, 463 const struct switchdev_obj *obj) 464 { 465 const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); 466 struct lan966x *lan966x = port->lan966x; 467 468 if (!netif_is_bridge_master(obj->orig_dev)) 469 lan966x_vlan_port_add_vlan(port, v->vid, 470 v->flags & BRIDGE_VLAN_INFO_PVID, 471 v->flags & BRIDGE_VLAN_INFO_UNTAGGED); 472 else 473 lan966x_vlan_cpu_add_vlan(lan966x, v->vid); 474 475 return 0; 476 } 477 478 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx, 479 const struct switchdev_obj *obj, 480 struct netlink_ext_ack *extack) 481 { 482 struct lan966x_port *port = netdev_priv(dev); 483 int err; 484 485 if (ctx && ctx != port) 486 return 0; 487 488 switch (obj->id) { 489 case SWITCHDEV_OBJ_ID_PORT_VLAN: 490 err = lan966x_handle_port_vlan_add(port, obj); 491 break; 492 case SWITCHDEV_OBJ_ID_PORT_MDB: 493 case SWITCHDEV_OBJ_ID_HOST_MDB: 494 err = lan966x_handle_port_mdb_add(port, obj); 495 break; 496 default: 497 err = -EOPNOTSUPP; 498 break; 499 } 500 501 return err; 502 } 503 504 static int lan966x_handle_port_vlan_del(struct lan966x_port *port, 505 const struct switchdev_obj *obj) 506 { 507 const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); 508 struct lan966x *lan966x = port->lan966x; 509 510 if (!netif_is_bridge_master(obj->orig_dev)) 511 lan966x_vlan_port_del_vlan(port, v->vid); 512 else 513 lan966x_vlan_cpu_del_vlan(lan966x, v->vid); 514 515 return 0; 516 } 517 518 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx, 519 const struct switchdev_obj *obj) 520 { 521 struct lan966x_port *port = netdev_priv(dev); 522 int err; 523 524 if (ctx && ctx != port) 525 return 0; 526 527 switch (obj->id) { 528 case SWITCHDEV_OBJ_ID_PORT_VLAN: 529 err = lan966x_handle_port_vlan_del(port, obj); 530 break; 531 case SWITCHDEV_OBJ_ID_PORT_MDB: 532 case SWITCHDEV_OBJ_ID_HOST_MDB: 533 err = lan966x_handle_port_mdb_del(port, obj); 534 break; 535 default: 536 err = -EOPNOTSUPP; 537 break; 538 } 539 540 return err; 541 } 542 543 static int lan966x_switchdev_blocking_event(struct notifier_block *nb, 544 unsigned long event, 545 void *ptr) 546 { 547 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 548 int err; 549 550 switch (event) { 551 case SWITCHDEV_PORT_OBJ_ADD: 552 err = switchdev_handle_port_obj_add(dev, ptr, 553 lan966x_netdevice_check, 554 lan966x_handle_port_obj_add); 555 return notifier_from_errno(err); 556 case SWITCHDEV_PORT_OBJ_DEL: 557 err = switchdev_handle_port_obj_del(dev, ptr, 558 lan966x_netdevice_check, 559 lan966x_handle_port_obj_del); 560 return notifier_from_errno(err); 561 case SWITCHDEV_PORT_ATTR_SET: 562 err = switchdev_handle_port_attr_set(dev, ptr, 563 lan966x_netdevice_check, 564 lan966x_port_attr_set); 565 return notifier_from_errno(err); 566 } 567 568 return NOTIFY_DONE; 569 } 570 571 static struct notifier_block lan966x_netdevice_nb __read_mostly = { 572 .notifier_call = lan966x_netdevice_event, 573 }; 574 575 static struct notifier_block lan966x_switchdev_nb __read_mostly = { 576 .notifier_call = lan966x_switchdev_event, 577 }; 578 579 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = { 580 .notifier_call = lan966x_switchdev_blocking_event, 581 }; 582 583 void lan966x_register_notifier_blocks(void) 584 { 585 register_netdevice_notifier(&lan966x_netdevice_nb); 586 register_switchdev_notifier(&lan966x_switchdev_nb); 587 register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); 588 } 589 590 void lan966x_unregister_notifier_blocks(void) 591 { 592 unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); 593 unregister_switchdev_notifier(&lan966x_switchdev_nb); 594 unregister_netdevice_notifier(&lan966x_netdevice_nb); 595 } 596