1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/slab.h> 9 #include <linux/device.h> 10 #include <linux/skbuff.h> 11 #include <linux/if_vlan.h> 12 #include <linux/if_bridge.h> 13 #include <linux/workqueue.h> 14 #include <linux/jiffies.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/netlink.h> 17 #include <net/switchdev.h> 18 #include <net/vxlan.h> 19 20 #include "spectrum_span.h" 21 #include "spectrum_switchdev.h" 22 #include "spectrum.h" 23 #include "core.h" 24 #include "reg.h" 25 26 struct mlxsw_sp_bridge_ops; 27 28 struct mlxsw_sp_bridge { 29 struct mlxsw_sp *mlxsw_sp; 30 struct { 31 struct delayed_work dw; 32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 33 unsigned int interval; /* ms */ 34 } fdb_notify; 35 #define MLXSW_SP_MIN_AGEING_TIME 10 36 #define MLXSW_SP_MAX_AGEING_TIME 1000000 37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 38 u32 ageing_time; 39 bool vlan_enabled_exists; 40 struct list_head bridges_list; 41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); 42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; 43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; 44 }; 45 46 struct mlxsw_sp_bridge_device { 47 struct net_device *dev; 48 struct list_head list; 49 struct list_head ports_list; 50 struct list_head mids_list; 51 u8 vlan_enabled:1, 52 multicast_enabled:1, 53 mrouter:1; 54 const struct mlxsw_sp_bridge_ops *ops; 55 }; 56 57 struct mlxsw_sp_bridge_port { 58 struct net_device *dev; 59 struct mlxsw_sp_bridge_device *bridge_device; 60 struct list_head list; 61 struct list_head vlans_list; 62 unsigned int ref_count; 63 u8 stp_state; 64 unsigned long flags; 65 bool mrouter; 66 bool lagged; 67 union { 68 u16 lag_id; 69 u16 system_port; 70 }; 71 }; 72 73 struct mlxsw_sp_bridge_vlan { 74 struct list_head list; 75 struct list_head port_vlan_list; 76 u16 vid; 77 }; 78 79 struct mlxsw_sp_bridge_ops { 80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, 81 struct mlxsw_sp_bridge_port *bridge_port, 82 struct mlxsw_sp_port *mlxsw_sp_port, 83 struct netlink_ext_ack *extack); 84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, 85 struct mlxsw_sp_bridge_port *bridge_port, 86 struct mlxsw_sp_port *mlxsw_sp_port); 87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device, 88 const struct net_device *vxlan_dev, u16 vid, 89 struct netlink_ext_ack *extack); 90 struct mlxsw_sp_fid * 91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, 92 u16 vid, struct netlink_ext_ack *extack); 93 struct mlxsw_sp_fid * 94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device, 95 u16 vid); 96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device, 97 const struct mlxsw_sp_fid *fid); 98 }; 99 100 static int 101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 102 struct mlxsw_sp_bridge_port *bridge_port, 103 u16 fid_index); 104 105 static void 106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 107 struct mlxsw_sp_bridge_port *bridge_port); 108 109 static void 110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 111 struct mlxsw_sp_bridge_device 112 *bridge_device); 113 114 static void 115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 116 struct mlxsw_sp_bridge_port *bridge_port, 117 bool add); 118 119 static struct mlxsw_sp_bridge_device * 120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, 121 const struct net_device *br_dev) 122 { 123 struct mlxsw_sp_bridge_device *bridge_device; 124 125 list_for_each_entry(bridge_device, &bridge->bridges_list, list) 126 if (bridge_device->dev == br_dev) 127 return bridge_device; 128 129 return NULL; 130 } 131 132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, 133 const struct net_device *br_dev) 134 { 135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 136 } 137 138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, 139 void *data) 140 { 141 struct mlxsw_sp *mlxsw_sp = data; 142 143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 144 return 0; 145 } 146 147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, 148 struct net_device *dev) 149 { 150 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); 151 netdev_walk_all_upper_dev_rcu(dev, 152 mlxsw_sp_bridge_device_upper_rif_destroy, 153 mlxsw_sp); 154 } 155 156 static struct mlxsw_sp_bridge_device * 157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 158 struct net_device *br_dev) 159 { 160 struct device *dev = bridge->mlxsw_sp->bus_info->dev; 161 struct mlxsw_sp_bridge_device *bridge_device; 162 bool vlan_enabled = br_vlan_enabled(br_dev); 163 164 if (vlan_enabled && bridge->vlan_enabled_exists) { 165 dev_err(dev, "Only one VLAN-aware bridge is supported\n"); 166 return ERR_PTR(-EINVAL); 167 } 168 169 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL); 170 if (!bridge_device) 171 return ERR_PTR(-ENOMEM); 172 173 bridge_device->dev = br_dev; 174 bridge_device->vlan_enabled = vlan_enabled; 175 bridge_device->multicast_enabled = br_multicast_enabled(br_dev); 176 bridge_device->mrouter = br_multicast_router(br_dev); 177 INIT_LIST_HEAD(&bridge_device->ports_list); 178 if (vlan_enabled) { 179 bridge->vlan_enabled_exists = true; 180 bridge_device->ops = bridge->bridge_8021q_ops; 181 } else { 182 bridge_device->ops = bridge->bridge_8021d_ops; 183 } 184 INIT_LIST_HEAD(&bridge_device->mids_list); 185 list_add(&bridge_device->list, &bridge->bridges_list); 186 187 return bridge_device; 188 } 189 190 static void 191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 192 struct mlxsw_sp_bridge_device *bridge_device) 193 { 194 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, 195 bridge_device->dev); 196 list_del(&bridge_device->list); 197 if (bridge_device->vlan_enabled) 198 bridge->vlan_enabled_exists = false; 199 WARN_ON(!list_empty(&bridge_device->ports_list)); 200 WARN_ON(!list_empty(&bridge_device->mids_list)); 201 kfree(bridge_device); 202 } 203 204 static struct mlxsw_sp_bridge_device * 205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, 206 struct net_device *br_dev) 207 { 208 struct mlxsw_sp_bridge_device *bridge_device; 209 210 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 211 if (bridge_device) 212 return bridge_device; 213 214 return mlxsw_sp_bridge_device_create(bridge, br_dev); 215 } 216 217 static void 218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge, 219 struct mlxsw_sp_bridge_device *bridge_device) 220 { 221 if (list_empty(&bridge_device->ports_list)) 222 mlxsw_sp_bridge_device_destroy(bridge, bridge_device); 223 } 224 225 static struct mlxsw_sp_bridge_port * 226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, 227 const struct net_device *brport_dev) 228 { 229 struct mlxsw_sp_bridge_port *bridge_port; 230 231 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 232 if (bridge_port->dev == brport_dev) 233 return bridge_port; 234 } 235 236 return NULL; 237 } 238 239 struct mlxsw_sp_bridge_port * 240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, 241 struct net_device *brport_dev) 242 { 243 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 244 struct mlxsw_sp_bridge_device *bridge_device; 245 246 if (!br_dev) 247 return NULL; 248 249 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 250 if (!bridge_device) 251 return NULL; 252 253 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 254 } 255 256 static struct mlxsw_sp_bridge_port * 257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, 258 struct net_device *brport_dev) 259 { 260 struct mlxsw_sp_bridge_port *bridge_port; 261 struct mlxsw_sp_port *mlxsw_sp_port; 262 263 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); 264 if (!bridge_port) 265 return NULL; 266 267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); 268 bridge_port->lagged = mlxsw_sp_port->lagged; 269 if (bridge_port->lagged) 270 bridge_port->lag_id = mlxsw_sp_port->lag_id; 271 else 272 bridge_port->system_port = mlxsw_sp_port->local_port; 273 bridge_port->dev = brport_dev; 274 bridge_port->bridge_device = bridge_device; 275 bridge_port->stp_state = BR_STATE_DISABLED; 276 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | 277 BR_MCAST_FLOOD; 278 INIT_LIST_HEAD(&bridge_port->vlans_list); 279 list_add(&bridge_port->list, &bridge_device->ports_list); 280 bridge_port->ref_count = 1; 281 282 return bridge_port; 283 } 284 285 static void 286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) 287 { 288 list_del(&bridge_port->list); 289 WARN_ON(!list_empty(&bridge_port->vlans_list)); 290 kfree(bridge_port); 291 } 292 293 static struct mlxsw_sp_bridge_port * 294 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, 295 struct net_device *brport_dev) 296 { 297 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 298 struct mlxsw_sp_bridge_device *bridge_device; 299 struct mlxsw_sp_bridge_port *bridge_port; 300 int err; 301 302 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); 303 if (bridge_port) { 304 bridge_port->ref_count++; 305 return bridge_port; 306 } 307 308 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); 309 if (IS_ERR(bridge_device)) 310 return ERR_CAST(bridge_device); 311 312 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); 313 if (!bridge_port) { 314 err = -ENOMEM; 315 goto err_bridge_port_create; 316 } 317 318 return bridge_port; 319 320 err_bridge_port_create: 321 mlxsw_sp_bridge_device_put(bridge, bridge_device); 322 return ERR_PTR(err); 323 } 324 325 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, 326 struct mlxsw_sp_bridge_port *bridge_port) 327 { 328 struct mlxsw_sp_bridge_device *bridge_device; 329 330 if (--bridge_port->ref_count != 0) 331 return; 332 bridge_device = bridge_port->bridge_device; 333 mlxsw_sp_bridge_port_destroy(bridge_port); 334 mlxsw_sp_bridge_device_put(bridge, bridge_device); 335 } 336 337 static struct mlxsw_sp_port_vlan * 338 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port, 339 const struct mlxsw_sp_bridge_device * 340 bridge_device, 341 u16 vid) 342 { 343 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 344 345 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 346 list) { 347 if (!mlxsw_sp_port_vlan->bridge_port) 348 continue; 349 if (mlxsw_sp_port_vlan->bridge_port->bridge_device != 350 bridge_device) 351 continue; 352 if (bridge_device->vlan_enabled && 353 mlxsw_sp_port_vlan->vid != vid) 354 continue; 355 return mlxsw_sp_port_vlan; 356 } 357 358 return NULL; 359 } 360 361 static struct mlxsw_sp_port_vlan* 362 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port, 363 u16 fid_index) 364 { 365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 366 367 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 368 list) { 369 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 370 371 if (fid && mlxsw_sp_fid_index(fid) == fid_index) 372 return mlxsw_sp_port_vlan; 373 } 374 375 return NULL; 376 } 377 378 static struct mlxsw_sp_bridge_vlan * 379 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port, 380 u16 vid) 381 { 382 struct mlxsw_sp_bridge_vlan *bridge_vlan; 383 384 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 385 if (bridge_vlan->vid == vid) 386 return bridge_vlan; 387 } 388 389 return NULL; 390 } 391 392 static struct mlxsw_sp_bridge_vlan * 393 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 394 { 395 struct mlxsw_sp_bridge_vlan *bridge_vlan; 396 397 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL); 398 if (!bridge_vlan) 399 return NULL; 400 401 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list); 402 bridge_vlan->vid = vid; 403 list_add(&bridge_vlan->list, &bridge_port->vlans_list); 404 405 return bridge_vlan; 406 } 407 408 static void 409 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan) 410 { 411 list_del(&bridge_vlan->list); 412 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list)); 413 kfree(bridge_vlan); 414 } 415 416 static struct mlxsw_sp_bridge_vlan * 417 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 418 { 419 struct mlxsw_sp_bridge_vlan *bridge_vlan; 420 421 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 422 if (bridge_vlan) 423 return bridge_vlan; 424 425 return mlxsw_sp_bridge_vlan_create(bridge_port, vid); 426 } 427 428 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) 429 { 430 if (list_empty(&bridge_vlan->port_vlan_list)) 431 mlxsw_sp_bridge_vlan_destroy(bridge_vlan); 432 } 433 434 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, 435 struct net_device *dev, 436 unsigned long *brport_flags) 437 { 438 struct mlxsw_sp_bridge_port *bridge_port; 439 440 bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); 441 if (WARN_ON(!bridge_port)) 442 return; 443 444 memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); 445 } 446 447 static int mlxsw_sp_port_attr_get(struct net_device *dev, 448 struct switchdev_attr *attr) 449 { 450 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 451 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 452 453 switch (attr->id) { 454 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 455 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 456 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 457 attr->u.ppid.id_len); 458 break; 459 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 460 mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, 461 &attr->u.brport_flags); 462 break; 463 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: 464 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | 465 BR_MCAST_FLOOD; 466 break; 467 default: 468 return -EOPNOTSUPP; 469 } 470 471 return 0; 472 } 473 474 static int 475 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 476 struct mlxsw_sp_bridge_vlan *bridge_vlan, 477 u8 state) 478 { 479 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 480 481 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 482 bridge_vlan_node) { 483 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 484 continue; 485 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, 486 bridge_vlan->vid, state); 487 } 488 489 return 0; 490 } 491 492 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 493 struct switchdev_trans *trans, 494 struct net_device *orig_dev, 495 u8 state) 496 { 497 struct mlxsw_sp_bridge_port *bridge_port; 498 struct mlxsw_sp_bridge_vlan *bridge_vlan; 499 int err; 500 501 if (switchdev_trans_ph_prepare(trans)) 502 return 0; 503 504 /* It's possible we failed to enslave the port, yet this 505 * operation is executed due to it being deferred. 506 */ 507 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 508 orig_dev); 509 if (!bridge_port) 510 return 0; 511 512 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 513 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, 514 bridge_vlan, state); 515 if (err) 516 goto err_port_bridge_vlan_stp_set; 517 } 518 519 bridge_port->stp_state = state; 520 521 return 0; 522 523 err_port_bridge_vlan_stp_set: 524 list_for_each_entry_continue_reverse(bridge_vlan, 525 &bridge_port->vlans_list, list) 526 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan, 527 bridge_port->stp_state); 528 return err; 529 } 530 531 static int 532 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 533 struct mlxsw_sp_bridge_vlan *bridge_vlan, 534 enum mlxsw_sp_flood_type packet_type, 535 bool member) 536 { 537 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 538 539 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 540 bridge_vlan_node) { 541 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 542 continue; 543 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, 544 packet_type, 545 mlxsw_sp_port->local_port, 546 member); 547 } 548 549 return 0; 550 } 551 552 static int 553 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 554 struct mlxsw_sp_bridge_port *bridge_port, 555 enum mlxsw_sp_flood_type packet_type, 556 bool member) 557 { 558 struct mlxsw_sp_bridge_vlan *bridge_vlan; 559 int err; 560 561 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 562 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, 563 bridge_vlan, 564 packet_type, 565 member); 566 if (err) 567 goto err_port_bridge_vlan_flood_set; 568 } 569 570 return 0; 571 572 err_port_bridge_vlan_flood_set: 573 list_for_each_entry_continue_reverse(bridge_vlan, 574 &bridge_port->vlans_list, list) 575 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan, 576 packet_type, !member); 577 return err; 578 } 579 580 static int 581 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 582 struct mlxsw_sp_bridge_vlan *bridge_vlan, 583 bool set) 584 { 585 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 586 u16 vid = bridge_vlan->vid; 587 588 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 589 bridge_vlan_node) { 590 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 591 continue; 592 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); 593 } 594 595 return 0; 596 } 597 598 static int 599 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 600 struct mlxsw_sp_bridge_port *bridge_port, 601 bool set) 602 { 603 struct mlxsw_sp_bridge_vlan *bridge_vlan; 604 int err; 605 606 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 607 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 608 bridge_vlan, set); 609 if (err) 610 goto err_port_bridge_vlan_learning_set; 611 } 612 613 return 0; 614 615 err_port_bridge_vlan_learning_set: 616 list_for_each_entry_continue_reverse(bridge_vlan, 617 &bridge_port->vlans_list, list) 618 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 619 bridge_vlan, !set); 620 return err; 621 } 622 623 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 624 struct switchdev_trans *trans, 625 struct net_device *orig_dev, 626 unsigned long brport_flags) 627 { 628 struct mlxsw_sp_bridge_port *bridge_port; 629 int err; 630 631 if (switchdev_trans_ph_prepare(trans)) 632 return 0; 633 634 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 635 orig_dev); 636 if (!bridge_port) 637 return 0; 638 639 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 640 MLXSW_SP_FLOOD_TYPE_UC, 641 brport_flags & BR_FLOOD); 642 if (err) 643 return err; 644 645 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, 646 brport_flags & BR_LEARNING); 647 if (err) 648 return err; 649 650 if (bridge_port->bridge_device->multicast_enabled) 651 goto out; 652 653 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 654 MLXSW_SP_FLOOD_TYPE_MC, 655 brport_flags & 656 BR_MCAST_FLOOD); 657 if (err) 658 return err; 659 660 out: 661 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); 662 return 0; 663 } 664 665 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 666 { 667 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 668 int err; 669 670 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 671 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 672 if (err) 673 return err; 674 mlxsw_sp->bridge->ageing_time = ageing_time; 675 return 0; 676 } 677 678 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 679 struct switchdev_trans *trans, 680 unsigned long ageing_clock_t) 681 { 682 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 683 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 684 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 685 686 if (switchdev_trans_ph_prepare(trans)) { 687 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 688 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 689 return -ERANGE; 690 else 691 return 0; 692 } 693 694 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 695 } 696 697 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 struct switchdev_trans *trans, 699 struct net_device *orig_dev, 700 bool vlan_enabled) 701 { 702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 703 struct mlxsw_sp_bridge_device *bridge_device; 704 705 if (!switchdev_trans_ph_prepare(trans)) 706 return 0; 707 708 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 709 if (WARN_ON(!bridge_device)) 710 return -EINVAL; 711 712 if (bridge_device->vlan_enabled == vlan_enabled) 713 return 0; 714 715 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n"); 716 return -EINVAL; 717 } 718 719 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 720 struct switchdev_trans *trans, 721 struct net_device *orig_dev, 722 bool is_port_mrouter) 723 { 724 struct mlxsw_sp_bridge_port *bridge_port; 725 int err; 726 727 if (switchdev_trans_ph_prepare(trans)) 728 return 0; 729 730 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 731 orig_dev); 732 if (!bridge_port) 733 return 0; 734 735 if (!bridge_port->bridge_device->multicast_enabled) 736 goto out; 737 738 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 739 MLXSW_SP_FLOOD_TYPE_MC, 740 is_port_mrouter); 741 if (err) 742 return err; 743 744 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, 745 is_port_mrouter); 746 out: 747 bridge_port->mrouter = is_port_mrouter; 748 return 0; 749 } 750 751 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) 752 { 753 const struct mlxsw_sp_bridge_device *bridge_device; 754 755 bridge_device = bridge_port->bridge_device; 756 return bridge_device->multicast_enabled ? bridge_port->mrouter : 757 bridge_port->flags & BR_MCAST_FLOOD; 758 } 759 760 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 761 struct switchdev_trans *trans, 762 struct net_device *orig_dev, 763 bool mc_disabled) 764 { 765 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 766 struct mlxsw_sp_bridge_device *bridge_device; 767 struct mlxsw_sp_bridge_port *bridge_port; 768 int err; 769 770 if (switchdev_trans_ph_prepare(trans)) 771 return 0; 772 773 /* It's possible we failed to enslave the port, yet this 774 * operation is executed due to it being deferred. 775 */ 776 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 777 if (!bridge_device) 778 return 0; 779 780 if (bridge_device->multicast_enabled != !mc_disabled) { 781 bridge_device->multicast_enabled = !mc_disabled; 782 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, 783 bridge_device); 784 } 785 786 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 787 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; 788 bool member = mlxsw_sp_mc_flood(bridge_port); 789 790 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, 791 bridge_port, 792 packet_type, member); 793 if (err) 794 return err; 795 } 796 797 bridge_device->multicast_enabled = !mc_disabled; 798 799 return 0; 800 } 801 802 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, 803 u16 mid_idx, bool add) 804 { 805 char *smid_pl; 806 int err; 807 808 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 809 if (!smid_pl) 810 return -ENOMEM; 811 812 mlxsw_reg_smid_pack(smid_pl, mid_idx, 813 mlxsw_sp_router_port(mlxsw_sp), add); 814 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 815 kfree(smid_pl); 816 return err; 817 } 818 819 static void 820 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, 821 struct mlxsw_sp_bridge_device *bridge_device, 822 bool add) 823 { 824 struct mlxsw_sp_mid *mid; 825 826 list_for_each_entry(mid, &bridge_device->mids_list, list) 827 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); 828 } 829 830 static int 831 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 832 struct switchdev_trans *trans, 833 struct net_device *orig_dev, 834 bool is_mrouter) 835 { 836 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 837 struct mlxsw_sp_bridge_device *bridge_device; 838 839 if (switchdev_trans_ph_prepare(trans)) 840 return 0; 841 842 /* It's possible we failed to enslave the port, yet this 843 * operation is executed due to it being deferred. 844 */ 845 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 846 if (!bridge_device) 847 return 0; 848 849 if (bridge_device->mrouter != is_mrouter) 850 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device, 851 is_mrouter); 852 bridge_device->mrouter = is_mrouter; 853 return 0; 854 } 855 856 static int mlxsw_sp_port_attr_set(struct net_device *dev, 857 const struct switchdev_attr *attr, 858 struct switchdev_trans *trans) 859 { 860 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 861 int err; 862 863 switch (attr->id) { 864 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 865 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 866 attr->orig_dev, 867 attr->u.stp_state); 868 break; 869 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 870 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 871 attr->orig_dev, 872 attr->u.brport_flags); 873 break; 874 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 875 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 876 attr->u.ageing_time); 877 break; 878 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 879 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 880 attr->orig_dev, 881 attr->u.vlan_filtering); 882 break; 883 case SWITCHDEV_ATTR_ID_PORT_MROUTER: 884 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, 885 attr->orig_dev, 886 attr->u.mrouter); 887 break; 888 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 889 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, 890 attr->orig_dev, 891 attr->u.mc_disabled); 892 break; 893 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: 894 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, 895 attr->orig_dev, 896 attr->u.mrouter); 897 break; 898 default: 899 err = -EOPNOTSUPP; 900 break; 901 } 902 903 if (switchdev_trans_ph_commit(trans)) 904 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 905 906 return err; 907 } 908 909 static int 910 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 911 struct mlxsw_sp_bridge_port *bridge_port, 912 struct netlink_ext_ack *extack) 913 { 914 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 915 struct mlxsw_sp_bridge_device *bridge_device; 916 u8 local_port = mlxsw_sp_port->local_port; 917 u16 vid = mlxsw_sp_port_vlan->vid; 918 struct mlxsw_sp_fid *fid; 919 int err; 920 921 bridge_device = bridge_port->bridge_device; 922 fid = bridge_device->ops->fid_get(bridge_device, vid, extack); 923 if (IS_ERR(fid)) 924 return PTR_ERR(fid); 925 926 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, 927 bridge_port->flags & BR_FLOOD); 928 if (err) 929 goto err_fid_uc_flood_set; 930 931 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, 932 mlxsw_sp_mc_flood(bridge_port)); 933 if (err) 934 goto err_fid_mc_flood_set; 935 936 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, 937 true); 938 if (err) 939 goto err_fid_bc_flood_set; 940 941 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 942 if (err) 943 goto err_fid_port_vid_map; 944 945 mlxsw_sp_port_vlan->fid = fid; 946 947 return 0; 948 949 err_fid_port_vid_map: 950 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 951 err_fid_bc_flood_set: 952 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 953 err_fid_mc_flood_set: 954 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 955 err_fid_uc_flood_set: 956 mlxsw_sp_fid_put(fid); 957 return err; 958 } 959 960 static void 961 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 962 { 963 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 964 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 965 u8 local_port = mlxsw_sp_port->local_port; 966 u16 vid = mlxsw_sp_port_vlan->vid; 967 968 mlxsw_sp_port_vlan->fid = NULL; 969 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 970 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 971 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 972 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 973 mlxsw_sp_fid_put(fid); 974 } 975 976 static u16 977 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, 978 u16 vid, bool is_pvid) 979 { 980 if (is_pvid) 981 return vid; 982 else if (mlxsw_sp_port->pvid == vid) 983 return 0; /* Dis-allow untagged packets */ 984 else 985 return mlxsw_sp_port->pvid; 986 } 987 988 static int 989 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 990 struct mlxsw_sp_bridge_port *bridge_port, 991 struct netlink_ext_ack *extack) 992 { 993 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 994 struct mlxsw_sp_bridge_vlan *bridge_vlan; 995 u16 vid = mlxsw_sp_port_vlan->vid; 996 int err; 997 998 /* No need to continue if only VLAN flags were changed */ 999 if (mlxsw_sp_port_vlan->bridge_port) 1000 return 0; 1001 1002 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port, 1003 extack); 1004 if (err) 1005 return err; 1006 1007 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, 1008 bridge_port->flags & BR_LEARNING); 1009 if (err) 1010 goto err_port_vid_learning_set; 1011 1012 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 1013 bridge_port->stp_state); 1014 if (err) 1015 goto err_port_vid_stp_set; 1016 1017 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid); 1018 if (!bridge_vlan) { 1019 err = -ENOMEM; 1020 goto err_bridge_vlan_get; 1021 } 1022 1023 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node, 1024 &bridge_vlan->port_vlan_list); 1025 1026 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, 1027 bridge_port->dev); 1028 mlxsw_sp_port_vlan->bridge_port = bridge_port; 1029 1030 return 0; 1031 1032 err_bridge_vlan_get: 1033 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1034 err_port_vid_stp_set: 1035 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1036 err_port_vid_learning_set: 1037 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1038 return err; 1039 } 1040 1041 void 1042 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1043 { 1044 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1045 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1046 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1047 struct mlxsw_sp_bridge_port *bridge_port; 1048 u16 vid = mlxsw_sp_port_vlan->vid; 1049 bool last_port, last_vlan; 1050 1051 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && 1052 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) 1053 return; 1054 1055 bridge_port = mlxsw_sp_port_vlan->bridge_port; 1056 last_vlan = list_is_singular(&bridge_port->vlans_list); 1057 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 1058 last_port = list_is_singular(&bridge_vlan->port_vlan_list); 1059 1060 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); 1061 mlxsw_sp_bridge_vlan_put(bridge_vlan); 1062 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1063 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1064 if (last_port) 1065 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, 1066 bridge_port, 1067 mlxsw_sp_fid_index(fid)); 1068 if (last_vlan) 1069 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); 1070 1071 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1072 1073 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); 1074 mlxsw_sp_port_vlan->bridge_port = NULL; 1075 } 1076 1077 static int 1078 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1079 struct mlxsw_sp_bridge_port *bridge_port, 1080 u16 vid, bool is_untagged, bool is_pvid, 1081 struct netlink_ext_ack *extack, 1082 struct switchdev_trans *trans) 1083 { 1084 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1086 u16 old_pvid = mlxsw_sp_port->pvid; 1087 int err; 1088 1089 /* The only valid scenario in which a port-vlan already exists, is if 1090 * the VLAN flags were changed and the port-vlan is associated with the 1091 * correct bridge port 1092 */ 1093 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1094 if (mlxsw_sp_port_vlan && 1095 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1096 return -EEXIST; 1097 1098 if (switchdev_trans_ph_prepare(trans)) 1099 return 0; 1100 1101 if (!mlxsw_sp_port_vlan) { 1102 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1103 vid); 1104 if (IS_ERR(mlxsw_sp_port_vlan)) 1105 return PTR_ERR(mlxsw_sp_port_vlan); 1106 } 1107 1108 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, 1109 is_untagged); 1110 if (err) 1111 goto err_port_vlan_set; 1112 1113 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1114 if (err) 1115 goto err_port_pvid_set; 1116 1117 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, 1118 extack); 1119 if (err) 1120 goto err_port_vlan_bridge_join; 1121 1122 return 0; 1123 1124 err_port_vlan_bridge_join: 1125 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 1126 err_port_pvid_set: 1127 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1128 err_port_vlan_set: 1129 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1130 return err; 1131 } 1132 1133 static int 1134 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, 1135 const struct net_device *br_dev, 1136 const struct switchdev_obj_port_vlan *vlan) 1137 { 1138 struct mlxsw_sp_rif *rif; 1139 struct mlxsw_sp_fid *fid; 1140 u16 pvid; 1141 u16 vid; 1142 1143 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); 1144 if (!rif) 1145 return 0; 1146 fid = mlxsw_sp_rif_fid(rif); 1147 pvid = mlxsw_sp_fid_8021q_vid(fid); 1148 1149 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1150 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { 1151 if (vid != pvid) { 1152 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); 1153 return -EBUSY; 1154 } 1155 } else { 1156 if (vid == pvid) { 1157 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); 1158 return -EBUSY; 1159 } 1160 } 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 1167 const struct switchdev_obj_port_vlan *vlan, 1168 struct switchdev_trans *trans, 1169 struct netlink_ext_ack *extack) 1170 { 1171 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1172 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1174 struct net_device *orig_dev = vlan->obj.orig_dev; 1175 struct mlxsw_sp_bridge_port *bridge_port; 1176 u16 vid; 1177 1178 if (netif_is_bridge_master(orig_dev)) { 1179 int err = 0; 1180 1181 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && 1182 br_vlan_enabled(orig_dev) && 1183 switchdev_trans_ph_prepare(trans)) 1184 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, 1185 orig_dev, vlan); 1186 if (!err) 1187 err = -EOPNOTSUPP; 1188 return err; 1189 } 1190 1191 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1192 if (WARN_ON(!bridge_port)) 1193 return -EINVAL; 1194 1195 if (!bridge_port->bridge_device->vlan_enabled) 1196 return 0; 1197 1198 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1199 int err; 1200 1201 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1202 vid, flag_untagged, 1203 flag_pvid, extack, trans); 1204 if (err) 1205 return err; 1206 } 1207 1208 return 0; 1209 } 1210 1211 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) 1212 { 1213 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID : 1214 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID; 1215 } 1216 1217 static int 1218 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 1219 struct mlxsw_sp_bridge_port *bridge_port, 1220 u16 fid_index) 1221 { 1222 bool lagged = bridge_port->lagged; 1223 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 1224 u16 system_port; 1225 1226 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port; 1227 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged)); 1228 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); 1229 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port); 1230 1231 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 1232 } 1233 1234 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1235 { 1236 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1237 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1238 } 1239 1240 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1241 { 1242 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 1243 MLXSW_REG_SFD_OP_WRITE_REMOVE; 1244 } 1245 1246 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, 1247 const char *mac, u16 fid, 1248 enum mlxsw_sp_l3proto proto, 1249 const union mlxsw_sp_l3addr *addr, 1250 bool adding, bool dynamic) 1251 { 1252 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; 1253 char *sfd_pl; 1254 u8 num_rec; 1255 u32 uip; 1256 int err; 1257 1258 switch (proto) { 1259 case MLXSW_SP_L3_PROTO_IPV4: 1260 uip = be32_to_cpu(addr->addr4); 1261 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; 1262 break; 1263 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ 1264 default: 1265 WARN_ON(1); 1266 return -EOPNOTSUPP; 1267 } 1268 1269 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1270 if (!sfd_pl) 1271 return -ENOMEM; 1272 1273 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1274 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, 1275 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, 1276 MLXSW_REG_SFD_REC_ACTION_NOP, uip, 1277 sfd_proto); 1278 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1279 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1280 if (err) 1281 goto out; 1282 1283 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1284 err = -EBUSY; 1285 1286 out: 1287 kfree(sfd_pl); 1288 return err; 1289 } 1290 1291 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1292 const char *mac, u16 fid, bool adding, 1293 enum mlxsw_reg_sfd_rec_action action, 1294 bool dynamic) 1295 { 1296 char *sfd_pl; 1297 u8 num_rec; 1298 int err; 1299 1300 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1301 if (!sfd_pl) 1302 return -ENOMEM; 1303 1304 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1305 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1306 mac, fid, action, local_port); 1307 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1309 if (err) 1310 goto out; 1311 1312 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1313 err = -EBUSY; 1314 1315 out: 1316 kfree(sfd_pl); 1317 return err; 1318 } 1319 1320 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1321 const char *mac, u16 fid, bool adding, 1322 bool dynamic) 1323 { 1324 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1325 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1326 } 1327 1328 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1329 bool adding) 1330 { 1331 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1332 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1333 false); 1334 } 1335 1336 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1337 const char *mac, u16 fid, u16 lag_vid, 1338 bool adding, bool dynamic) 1339 { 1340 char *sfd_pl; 1341 u8 num_rec; 1342 int err; 1343 1344 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1345 if (!sfd_pl) 1346 return -ENOMEM; 1347 1348 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1349 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1350 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1351 lag_vid, lag_id); 1352 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1353 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1354 if (err) 1355 goto out; 1356 1357 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1358 err = -EBUSY; 1359 1360 out: 1361 kfree(sfd_pl); 1362 return err; 1363 } 1364 1365 static int 1366 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, 1367 struct switchdev_notifier_fdb_info *fdb_info, bool adding) 1368 { 1369 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1370 struct net_device *orig_dev = fdb_info->info.dev; 1371 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1372 struct mlxsw_sp_bridge_device *bridge_device; 1373 struct mlxsw_sp_bridge_port *bridge_port; 1374 u16 fid_index, vid; 1375 1376 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1377 if (!bridge_port) 1378 return -EINVAL; 1379 1380 bridge_device = bridge_port->bridge_device; 1381 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1382 bridge_device, 1383 fdb_info->vid); 1384 if (!mlxsw_sp_port_vlan) 1385 return 0; 1386 1387 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1388 vid = mlxsw_sp_port_vlan->vid; 1389 1390 if (!bridge_port->lagged) 1391 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 1392 bridge_port->system_port, 1393 fdb_info->addr, fid_index, 1394 adding, false); 1395 else 1396 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, 1397 bridge_port->lag_id, 1398 fdb_info->addr, fid_index, 1399 vid, adding, false); 1400 } 1401 1402 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 1403 u16 fid, u16 mid_idx, bool adding) 1404 { 1405 char *sfd_pl; 1406 u8 num_rec; 1407 int err; 1408 1409 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1410 if (!sfd_pl) 1411 return -ENOMEM; 1412 1413 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1414 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1415 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1416 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1418 if (err) 1419 goto out; 1420 1421 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) 1422 err = -EBUSY; 1423 1424 out: 1425 kfree(sfd_pl); 1426 return err; 1427 } 1428 1429 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, 1430 long *ports_bitmap, 1431 bool set_router_port) 1432 { 1433 char *smid_pl; 1434 int err, i; 1435 1436 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1437 if (!smid_pl) 1438 return -ENOMEM; 1439 1440 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); 1441 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { 1442 if (mlxsw_sp->ports[i]) 1443 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 1444 } 1445 1446 mlxsw_reg_smid_port_mask_set(smid_pl, 1447 mlxsw_sp_router_port(mlxsw_sp), 1); 1448 1449 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) 1450 mlxsw_reg_smid_port_set(smid_pl, i, 1); 1451 1452 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), 1453 set_router_port); 1454 1455 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1456 kfree(smid_pl); 1457 return err; 1458 } 1459 1460 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, 1461 u16 mid_idx, bool add) 1462 { 1463 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1464 char *smid_pl; 1465 int err; 1466 1467 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1468 if (!smid_pl) 1469 return -ENOMEM; 1470 1471 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); 1472 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1473 kfree(smid_pl); 1474 return err; 1475 } 1476 1477 static struct 1478 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, 1479 const unsigned char *addr, 1480 u16 fid) 1481 { 1482 struct mlxsw_sp_mid *mid; 1483 1484 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1485 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 1486 return mid; 1487 } 1488 return NULL; 1489 } 1490 1491 static void 1492 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, 1493 struct mlxsw_sp_bridge_port *bridge_port, 1494 unsigned long *ports_bitmap) 1495 { 1496 struct mlxsw_sp_port *mlxsw_sp_port; 1497 u64 max_lag_members, i; 1498 int lag_id; 1499 1500 if (!bridge_port->lagged) { 1501 set_bit(bridge_port->system_port, ports_bitmap); 1502 } else { 1503 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1504 MAX_LAG_MEMBERS); 1505 lag_id = bridge_port->lag_id; 1506 for (i = 0; i < max_lag_members; i++) { 1507 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, 1508 lag_id, i); 1509 if (mlxsw_sp_port) 1510 set_bit(mlxsw_sp_port->local_port, 1511 ports_bitmap); 1512 } 1513 } 1514 } 1515 1516 static void 1517 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, 1518 struct mlxsw_sp_bridge_device *bridge_device, 1519 struct mlxsw_sp *mlxsw_sp) 1520 { 1521 struct mlxsw_sp_bridge_port *bridge_port; 1522 1523 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 1524 if (bridge_port->mrouter) { 1525 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, 1526 bridge_port, 1527 flood_bitmap); 1528 } 1529 } 1530 } 1531 1532 static bool 1533 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1534 struct mlxsw_sp_mid *mid, 1535 struct mlxsw_sp_bridge_device *bridge_device) 1536 { 1537 long *flood_bitmap; 1538 int num_of_ports; 1539 int alloc_size; 1540 u16 mid_idx; 1541 int err; 1542 1543 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, 1544 MLXSW_SP_MID_MAX); 1545 if (mid_idx == MLXSW_SP_MID_MAX) 1546 return false; 1547 1548 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1549 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); 1550 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); 1551 if (!flood_bitmap) 1552 return false; 1553 1554 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); 1555 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); 1556 1557 mid->mid = mid_idx; 1558 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, 1559 bridge_device->mrouter); 1560 kfree(flood_bitmap); 1561 if (err) 1562 return false; 1563 1564 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, 1565 true); 1566 if (err) 1567 return false; 1568 1569 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); 1570 mid->in_hw = true; 1571 return true; 1572 } 1573 1574 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1575 struct mlxsw_sp_mid *mid) 1576 { 1577 if (!mid->in_hw) 1578 return 0; 1579 1580 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1581 mid->in_hw = false; 1582 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, 1583 false); 1584 } 1585 1586 static struct 1587 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 1588 struct mlxsw_sp_bridge_device *bridge_device, 1589 const unsigned char *addr, 1590 u16 fid) 1591 { 1592 struct mlxsw_sp_mid *mid; 1593 size_t alloc_size; 1594 1595 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 1596 if (!mid) 1597 return NULL; 1598 1599 alloc_size = sizeof(unsigned long) * 1600 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); 1601 1602 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); 1603 if (!mid->ports_in_mid) 1604 goto err_ports_in_mid_alloc; 1605 1606 ether_addr_copy(mid->addr, addr); 1607 mid->fid = fid; 1608 mid->in_hw = false; 1609 1610 if (!bridge_device->multicast_enabled) 1611 goto out; 1612 1613 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) 1614 goto err_write_mdb_entry; 1615 1616 out: 1617 list_add_tail(&mid->list, &bridge_device->mids_list); 1618 return mid; 1619 1620 err_write_mdb_entry: 1621 kfree(mid->ports_in_mid); 1622 err_ports_in_mid_alloc: 1623 kfree(mid); 1624 return NULL; 1625 } 1626 1627 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, 1628 struct mlxsw_sp_mid *mid) 1629 { 1630 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1631 int err = 0; 1632 1633 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1634 if (bitmap_empty(mid->ports_in_mid, 1635 mlxsw_core_max_ports(mlxsw_sp->core))) { 1636 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1637 list_del(&mid->list); 1638 kfree(mid->ports_in_mid); 1639 kfree(mid); 1640 } 1641 return err; 1642 } 1643 1644 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 1645 const struct switchdev_obj_port_mdb *mdb, 1646 struct switchdev_trans *trans) 1647 { 1648 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1649 struct net_device *orig_dev = mdb->obj.orig_dev; 1650 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1651 struct net_device *dev = mlxsw_sp_port->dev; 1652 struct mlxsw_sp_bridge_device *bridge_device; 1653 struct mlxsw_sp_bridge_port *bridge_port; 1654 struct mlxsw_sp_mid *mid; 1655 u16 fid_index; 1656 int err = 0; 1657 1658 if (switchdev_trans_ph_prepare(trans)) 1659 return 0; 1660 1661 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1662 if (!bridge_port) 1663 return 0; 1664 1665 bridge_device = bridge_port->bridge_device; 1666 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1667 bridge_device, 1668 mdb->vid); 1669 if (!mlxsw_sp_port_vlan) 1670 return 0; 1671 1672 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1673 1674 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1675 if (!mid) { 1676 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, 1677 fid_index); 1678 if (!mid) { 1679 netdev_err(dev, "Unable to allocate MC group\n"); 1680 return -ENOMEM; 1681 } 1682 } 1683 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1684 1685 if (!bridge_device->multicast_enabled) 1686 return 0; 1687 1688 if (bridge_port->mrouter) 1689 return 0; 1690 1691 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); 1692 if (err) { 1693 netdev_err(dev, "Unable to set SMID\n"); 1694 goto err_out; 1695 } 1696 1697 return 0; 1698 1699 err_out: 1700 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1701 return err; 1702 } 1703 1704 static void 1705 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 1706 struct mlxsw_sp_bridge_device 1707 *bridge_device) 1708 { 1709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1710 struct mlxsw_sp_mid *mid; 1711 bool mc_enabled; 1712 1713 mc_enabled = bridge_device->multicast_enabled; 1714 1715 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1716 if (mc_enabled) 1717 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, 1718 bridge_device); 1719 else 1720 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1721 } 1722 } 1723 1724 static void 1725 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 1726 struct mlxsw_sp_bridge_port *bridge_port, 1727 bool add) 1728 { 1729 struct mlxsw_sp_bridge_device *bridge_device; 1730 struct mlxsw_sp_mid *mid; 1731 1732 bridge_device = bridge_port->bridge_device; 1733 1734 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1735 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) 1736 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); 1737 } 1738 } 1739 1740 struct mlxsw_sp_span_respin_work { 1741 struct work_struct work; 1742 struct mlxsw_sp *mlxsw_sp; 1743 }; 1744 1745 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1746 { 1747 struct mlxsw_sp_span_respin_work *respin_work = 1748 container_of(work, struct mlxsw_sp_span_respin_work, work); 1749 1750 rtnl_lock(); 1751 mlxsw_sp_span_respin(respin_work->mlxsw_sp); 1752 rtnl_unlock(); 1753 kfree(respin_work); 1754 } 1755 1756 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) 1757 { 1758 struct mlxsw_sp_span_respin_work *respin_work; 1759 1760 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); 1761 if (!respin_work) 1762 return; 1763 1764 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); 1765 respin_work->mlxsw_sp = mlxsw_sp; 1766 1767 mlxsw_core_schedule_work(&respin_work->work); 1768 } 1769 1770 static int mlxsw_sp_port_obj_add(struct net_device *dev, 1771 const struct switchdev_obj *obj, 1772 struct switchdev_trans *trans, 1773 struct netlink_ext_ack *extack) 1774 { 1775 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1776 const struct switchdev_obj_port_vlan *vlan; 1777 int err = 0; 1778 1779 switch (obj->id) { 1780 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1781 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 1782 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans, 1783 extack); 1784 1785 if (switchdev_trans_ph_prepare(trans)) { 1786 /* The event is emitted before the changes are actually 1787 * applied to the bridge. Therefore schedule the respin 1788 * call for later, so that the respin logic sees the 1789 * updated bridge state. 1790 */ 1791 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1792 } 1793 break; 1794 case SWITCHDEV_OBJ_ID_PORT_MDB: 1795 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1796 SWITCHDEV_OBJ_PORT_MDB(obj), 1797 trans); 1798 break; 1799 default: 1800 err = -EOPNOTSUPP; 1801 break; 1802 } 1803 1804 return err; 1805 } 1806 1807 static void 1808 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1809 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1810 { 1811 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1813 1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1815 if (WARN_ON(!mlxsw_sp_port_vlan)) 1816 return; 1817 1818 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1819 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1820 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1821 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1822 } 1823 1824 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1825 const struct switchdev_obj_port_vlan *vlan) 1826 { 1827 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1828 struct net_device *orig_dev = vlan->obj.orig_dev; 1829 struct mlxsw_sp_bridge_port *bridge_port; 1830 u16 vid; 1831 1832 if (netif_is_bridge_master(orig_dev)) 1833 return -EOPNOTSUPP; 1834 1835 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1836 if (WARN_ON(!bridge_port)) 1837 return -EINVAL; 1838 1839 if (!bridge_port->bridge_device->vlan_enabled) 1840 return 0; 1841 1842 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 1843 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); 1844 1845 return 0; 1846 } 1847 1848 static int 1849 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1850 struct mlxsw_sp_bridge_port *bridge_port, 1851 struct mlxsw_sp_mid *mid) 1852 { 1853 struct net_device *dev = mlxsw_sp_port->dev; 1854 int err; 1855 1856 if (bridge_port->bridge_device->multicast_enabled && 1857 !bridge_port->mrouter) { 1858 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1859 if (err) 1860 netdev_err(dev, "Unable to remove port from SMID\n"); 1861 } 1862 1863 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1864 if (err) 1865 netdev_err(dev, "Unable to remove MC SFD\n"); 1866 1867 return err; 1868 } 1869 1870 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1871 const struct switchdev_obj_port_mdb *mdb) 1872 { 1873 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1874 struct net_device *orig_dev = mdb->obj.orig_dev; 1875 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1876 struct mlxsw_sp_bridge_device *bridge_device; 1877 struct net_device *dev = mlxsw_sp_port->dev; 1878 struct mlxsw_sp_bridge_port *bridge_port; 1879 struct mlxsw_sp_mid *mid; 1880 u16 fid_index; 1881 1882 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1883 if (!bridge_port) 1884 return 0; 1885 1886 bridge_device = bridge_port->bridge_device; 1887 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1888 bridge_device, 1889 mdb->vid); 1890 if (!mlxsw_sp_port_vlan) 1891 return 0; 1892 1893 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1894 1895 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1896 if (!mid) { 1897 netdev_err(dev, "Unable to remove port from MC DB\n"); 1898 return -EINVAL; 1899 } 1900 1901 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); 1902 } 1903 1904 static void 1905 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1906 struct mlxsw_sp_bridge_port *bridge_port) 1907 { 1908 struct mlxsw_sp_bridge_device *bridge_device; 1909 struct mlxsw_sp_mid *mid, *tmp; 1910 1911 bridge_device = bridge_port->bridge_device; 1912 1913 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { 1914 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { 1915 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, 1916 mid); 1917 } else if (bridge_device->multicast_enabled && 1918 bridge_port->mrouter) { 1919 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1920 } 1921 } 1922 } 1923 1924 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1925 const struct switchdev_obj *obj) 1926 { 1927 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1928 int err = 0; 1929 1930 switch (obj->id) { 1931 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1932 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1933 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1934 break; 1935 case SWITCHDEV_OBJ_ID_PORT_MDB: 1936 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1937 SWITCHDEV_OBJ_PORT_MDB(obj)); 1938 break; 1939 default: 1940 err = -EOPNOTSUPP; 1941 break; 1942 } 1943 1944 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); 1945 1946 return err; 1947 } 1948 1949 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1950 u16 lag_id) 1951 { 1952 struct mlxsw_sp_port *mlxsw_sp_port; 1953 u64 max_lag_members; 1954 int i; 1955 1956 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1957 MAX_LAG_MEMBERS); 1958 for (i = 0; i < max_lag_members; i++) { 1959 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1960 if (mlxsw_sp_port) 1961 return mlxsw_sp_port; 1962 } 1963 return NULL; 1964 } 1965 1966 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1967 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1968 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1969 }; 1970 1971 static int 1972 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1973 struct mlxsw_sp_bridge_port *bridge_port, 1974 struct mlxsw_sp_port *mlxsw_sp_port, 1975 struct netlink_ext_ack *extack) 1976 { 1977 if (is_vlan_dev(bridge_port->dev)) { 1978 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); 1979 return -EINVAL; 1980 } 1981 1982 /* Port is no longer usable as a router interface */ 1983 if (mlxsw_sp_port->default_vlan->fid) 1984 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 1985 1986 return 0; 1987 } 1988 1989 static void 1990 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 1991 struct mlxsw_sp_bridge_port *bridge_port, 1992 struct mlxsw_sp_port *mlxsw_sp_port) 1993 { 1994 /* Make sure untagged frames are allowed to ingress */ 1995 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 1996 } 1997 1998 static int 1999 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, 2000 const struct net_device *vxlan_dev, u16 vid, 2001 struct netlink_ext_ack *extack) 2002 { 2003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2004 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 2005 struct mlxsw_sp_nve_params params = { 2006 .type = MLXSW_SP_NVE_TYPE_VXLAN, 2007 .vni = vxlan->cfg.vni, 2008 .dev = vxlan_dev, 2009 }; 2010 struct mlxsw_sp_fid *fid; 2011 int err; 2012 2013 /* If the VLAN is 0, we need to find the VLAN that is configured as 2014 * PVID and egress untagged on the bridge port of the VxLAN device. 2015 * It is possible no such VLAN exists 2016 */ 2017 if (!vid) { 2018 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid); 2019 if (err || !vid) 2020 return err; 2021 } 2022 2023 /* If no other port is member in the VLAN, then the FID does not exist. 2024 * NVE will be enabled on the FID once a port joins the VLAN 2025 */ 2026 fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); 2027 if (!fid) 2028 return 0; 2029 2030 if (mlxsw_sp_fid_vni_is_set(fid)) { 2031 err = -EINVAL; 2032 goto err_vni_exists; 2033 } 2034 2035 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); 2036 if (err) 2037 goto err_nve_fid_enable; 2038 2039 /* The tunnel port does not hold a reference on the FID. Only 2040 * local ports and the router port 2041 */ 2042 mlxsw_sp_fid_put(fid); 2043 2044 return 0; 2045 2046 err_nve_fid_enable: 2047 err_vni_exists: 2048 mlxsw_sp_fid_put(fid); 2049 return err; 2050 } 2051 2052 static struct net_device * 2053 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) 2054 { 2055 struct net_device *dev; 2056 struct list_head *iter; 2057 2058 netdev_for_each_lower_dev(br_dev, dev, iter) { 2059 u16 pvid; 2060 int err; 2061 2062 if (!netif_is_vxlan(dev)) 2063 continue; 2064 2065 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 2066 if (err || pvid != vid) 2067 continue; 2068 2069 return dev; 2070 } 2071 2072 return NULL; 2073 } 2074 2075 static struct mlxsw_sp_fid * 2076 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 2077 u16 vid, struct netlink_ext_ack *extack) 2078 { 2079 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2080 struct net_device *vxlan_dev; 2081 struct mlxsw_sp_fid *fid; 2082 int err; 2083 2084 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); 2085 if (IS_ERR(fid)) 2086 return fid; 2087 2088 if (mlxsw_sp_fid_vni_is_set(fid)) 2089 return fid; 2090 2091 /* Find the VxLAN device that has the specified VLAN configured as 2092 * PVID and egress untagged. There can be at most one such device 2093 */ 2094 vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, 2095 vid); 2096 if (!vxlan_dev) 2097 return fid; 2098 2099 if (!netif_running(vxlan_dev)) 2100 return fid; 2101 2102 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, 2103 extack); 2104 if (err) 2105 goto err_vxlan_join; 2106 2107 return fid; 2108 2109 err_vxlan_join: 2110 mlxsw_sp_fid_put(fid); 2111 return ERR_PTR(err); 2112 } 2113 2114 static struct mlxsw_sp_fid * 2115 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, 2116 u16 vid) 2117 { 2118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2119 2120 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); 2121 } 2122 2123 static u16 2124 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, 2125 const struct mlxsw_sp_fid *fid) 2126 { 2127 return mlxsw_sp_fid_8021q_vid(fid); 2128 } 2129 2130 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { 2131 .port_join = mlxsw_sp_bridge_8021q_port_join, 2132 .port_leave = mlxsw_sp_bridge_8021q_port_leave, 2133 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join, 2134 .fid_get = mlxsw_sp_bridge_8021q_fid_get, 2135 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, 2136 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, 2137 }; 2138 2139 static bool 2140 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, 2141 const struct net_device *br_dev) 2142 { 2143 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2144 2145 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 2146 list) { 2147 if (mlxsw_sp_port_vlan->bridge_port && 2148 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev == 2149 br_dev) 2150 return true; 2151 } 2152 2153 return false; 2154 } 2155 2156 static int 2157 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, 2158 struct mlxsw_sp_bridge_port *bridge_port, 2159 struct mlxsw_sp_port *mlxsw_sp_port, 2160 struct netlink_ext_ack *extack) 2161 { 2162 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2163 struct net_device *dev = bridge_port->dev; 2164 u16 vid; 2165 2166 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; 2167 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 2168 if (WARN_ON(!mlxsw_sp_port_vlan)) 2169 return -EINVAL; 2170 2171 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { 2172 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); 2173 return -EINVAL; 2174 } 2175 2176 /* Port is no longer usable as a router interface */ 2177 if (mlxsw_sp_port_vlan->fid) 2178 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 2179 2180 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, 2181 extack); 2182 } 2183 2184 static void 2185 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 2186 struct mlxsw_sp_bridge_port *bridge_port, 2187 struct mlxsw_sp_port *mlxsw_sp_port) 2188 { 2189 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2190 struct net_device *dev = bridge_port->dev; 2191 u16 vid; 2192 2193 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; 2194 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 2195 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port) 2196 return; 2197 2198 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 2199 } 2200 2201 static int 2202 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, 2203 const struct net_device *vxlan_dev, u16 vid, 2204 struct netlink_ext_ack *extack) 2205 { 2206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2207 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 2208 struct mlxsw_sp_nve_params params = { 2209 .type = MLXSW_SP_NVE_TYPE_VXLAN, 2210 .vni = vxlan->cfg.vni, 2211 .dev = vxlan_dev, 2212 }; 2213 struct mlxsw_sp_fid *fid; 2214 int err; 2215 2216 fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); 2217 if (!fid) 2218 return -EINVAL; 2219 2220 if (mlxsw_sp_fid_vni_is_set(fid)) { 2221 err = -EINVAL; 2222 goto err_vni_exists; 2223 } 2224 2225 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); 2226 if (err) 2227 goto err_nve_fid_enable; 2228 2229 /* The tunnel port does not hold a reference on the FID. Only 2230 * local ports and the router port 2231 */ 2232 mlxsw_sp_fid_put(fid); 2233 2234 return 0; 2235 2236 err_nve_fid_enable: 2237 err_vni_exists: 2238 mlxsw_sp_fid_put(fid); 2239 return err; 2240 } 2241 2242 static struct mlxsw_sp_fid * 2243 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 2244 u16 vid, struct netlink_ext_ack *extack) 2245 { 2246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2247 struct net_device *vxlan_dev; 2248 struct mlxsw_sp_fid *fid; 2249 int err; 2250 2251 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); 2252 if (IS_ERR(fid)) 2253 return fid; 2254 2255 if (mlxsw_sp_fid_vni_is_set(fid)) 2256 return fid; 2257 2258 vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev); 2259 if (!vxlan_dev) 2260 return fid; 2261 2262 if (!netif_running(vxlan_dev)) 2263 return fid; 2264 2265 err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0, 2266 extack); 2267 if (err) 2268 goto err_vxlan_join; 2269 2270 return fid; 2271 2272 err_vxlan_join: 2273 mlxsw_sp_fid_put(fid); 2274 return ERR_PTR(err); 2275 } 2276 2277 static struct mlxsw_sp_fid * 2278 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, 2279 u16 vid) 2280 { 2281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 2282 2283 /* The only valid VLAN for a VLAN-unaware bridge is 0 */ 2284 if (vid) 2285 return NULL; 2286 2287 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); 2288 } 2289 2290 static u16 2291 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, 2292 const struct mlxsw_sp_fid *fid) 2293 { 2294 return 0; 2295 } 2296 2297 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { 2298 .port_join = mlxsw_sp_bridge_8021d_port_join, 2299 .port_leave = mlxsw_sp_bridge_8021d_port_leave, 2300 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join, 2301 .fid_get = mlxsw_sp_bridge_8021d_fid_get, 2302 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup, 2303 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, 2304 }; 2305 2306 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 2307 struct net_device *brport_dev, 2308 struct net_device *br_dev, 2309 struct netlink_ext_ack *extack) 2310 { 2311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2312 struct mlxsw_sp_bridge_device *bridge_device; 2313 struct mlxsw_sp_bridge_port *bridge_port; 2314 int err; 2315 2316 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); 2317 if (IS_ERR(bridge_port)) 2318 return PTR_ERR(bridge_port); 2319 bridge_device = bridge_port->bridge_device; 2320 2321 err = bridge_device->ops->port_join(bridge_device, bridge_port, 2322 mlxsw_sp_port, extack); 2323 if (err) 2324 goto err_port_join; 2325 2326 return 0; 2327 2328 err_port_join: 2329 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2330 return err; 2331 } 2332 2333 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2334 struct net_device *brport_dev, 2335 struct net_device *br_dev) 2336 { 2337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2338 struct mlxsw_sp_bridge_device *bridge_device; 2339 struct mlxsw_sp_bridge_port *bridge_port; 2340 2341 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2342 if (!bridge_device) 2343 return; 2344 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 2345 if (!bridge_port) 2346 return; 2347 2348 bridge_device->ops->port_leave(bridge_device, bridge_port, 2349 mlxsw_sp_port); 2350 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 2351 } 2352 2353 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, 2354 const struct net_device *br_dev, 2355 const struct net_device *vxlan_dev, u16 vid, 2356 struct netlink_ext_ack *extack) 2357 { 2358 struct mlxsw_sp_bridge_device *bridge_device; 2359 2360 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2361 if (WARN_ON(!bridge_device)) 2362 return -EINVAL; 2363 2364 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, 2365 extack); 2366 } 2367 2368 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, 2369 const struct net_device *vxlan_dev) 2370 { 2371 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 2372 struct mlxsw_sp_fid *fid; 2373 2374 /* If the VxLAN device is down, then the FID does not have a VNI */ 2375 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni); 2376 if (!fid) 2377 return; 2378 2379 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); 2380 mlxsw_sp_fid_put(fid); 2381 } 2382 2383 struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 2384 const struct net_device *br_dev, 2385 u16 vid, 2386 struct netlink_ext_ack *extack) 2387 { 2388 struct mlxsw_sp_bridge_device *bridge_device; 2389 2390 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2391 if (WARN_ON(!bridge_device)) 2392 return ERR_PTR(-EINVAL); 2393 2394 return bridge_device->ops->fid_get(bridge_device, vid, extack); 2395 } 2396 2397 static void 2398 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, 2399 enum mlxsw_sp_l3proto *proto, 2400 union mlxsw_sp_l3addr *addr) 2401 { 2402 if (vxlan_addr->sa.sa_family == AF_INET) { 2403 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr; 2404 *proto = MLXSW_SP_L3_PROTO_IPV4; 2405 } else { 2406 addr->addr6 = vxlan_addr->sin6.sin6_addr; 2407 *proto = MLXSW_SP_L3_PROTO_IPV6; 2408 } 2409 } 2410 2411 static void 2412 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto, 2413 const union mlxsw_sp_l3addr *addr, 2414 union vxlan_addr *vxlan_addr) 2415 { 2416 switch (proto) { 2417 case MLXSW_SP_L3_PROTO_IPV4: 2418 vxlan_addr->sa.sa_family = AF_INET; 2419 vxlan_addr->sin.sin_addr.s_addr = addr->addr4; 2420 break; 2421 case MLXSW_SP_L3_PROTO_IPV6: 2422 vxlan_addr->sa.sa_family = AF_INET6; 2423 vxlan_addr->sin6.sin6_addr = addr->addr6; 2424 break; 2425 } 2426 } 2427 2428 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev, 2429 const char *mac, 2430 enum mlxsw_sp_l3proto proto, 2431 union mlxsw_sp_l3addr *addr, 2432 __be32 vni, bool adding) 2433 { 2434 struct switchdev_notifier_vxlan_fdb_info info; 2435 struct vxlan_dev *vxlan = netdev_priv(dev); 2436 enum switchdev_notifier_type type; 2437 2438 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE : 2439 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE; 2440 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip); 2441 info.remote_port = vxlan->cfg.dst_port; 2442 info.remote_vni = vni; 2443 info.remote_ifindex = 0; 2444 ether_addr_copy(info.eth_addr, mac); 2445 info.vni = vni; 2446 info.offloaded = adding; 2447 call_switchdev_notifiers(type, dev, &info.info); 2448 } 2449 2450 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev, 2451 const char *mac, 2452 enum mlxsw_sp_l3proto proto, 2453 union mlxsw_sp_l3addr *addr, 2454 __be32 vni, 2455 bool adding) 2456 { 2457 if (netif_is_vxlan(dev)) 2458 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni, 2459 adding); 2460 } 2461 2462 static void 2463 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, 2464 const char *mac, u16 vid, 2465 struct net_device *dev, bool offloaded) 2466 { 2467 struct switchdev_notifier_fdb_info info; 2468 2469 info.addr = mac; 2470 info.vid = vid; 2471 info.offloaded = offloaded; 2472 call_switchdev_notifiers(type, dev, &info.info); 2473 } 2474 2475 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 2476 char *sfn_pl, int rec_index, 2477 bool adding) 2478 { 2479 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2480 struct mlxsw_sp_bridge_device *bridge_device; 2481 struct mlxsw_sp_bridge_port *bridge_port; 2482 struct mlxsw_sp_port *mlxsw_sp_port; 2483 enum switchdev_notifier_type type; 2484 char mac[ETH_ALEN]; 2485 u8 local_port; 2486 u16 vid, fid; 2487 bool do_notification = true; 2488 int err; 2489 2490 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 2491 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2492 if (!mlxsw_sp_port) { 2493 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 2494 goto just_remove; 2495 } 2496 2497 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2498 if (!mlxsw_sp_port_vlan) { 2499 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2500 goto just_remove; 2501 } 2502 2503 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2504 if (!bridge_port) { 2505 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2506 goto just_remove; 2507 } 2508 2509 bridge_device = bridge_port->bridge_device; 2510 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2511 2512 do_fdb_op: 2513 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 2514 adding, true); 2515 if (err) { 2516 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2517 return; 2518 } 2519 2520 if (!do_notification) 2521 return; 2522 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2523 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); 2524 2525 return; 2526 2527 just_remove: 2528 adding = false; 2529 do_notification = false; 2530 goto do_fdb_op; 2531 } 2532 2533 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 2534 char *sfn_pl, int rec_index, 2535 bool adding) 2536 { 2537 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2538 struct mlxsw_sp_bridge_device *bridge_device; 2539 struct mlxsw_sp_bridge_port *bridge_port; 2540 struct mlxsw_sp_port *mlxsw_sp_port; 2541 enum switchdev_notifier_type type; 2542 char mac[ETH_ALEN]; 2543 u16 lag_vid = 0; 2544 u16 lag_id; 2545 u16 vid, fid; 2546 bool do_notification = true; 2547 int err; 2548 2549 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 2550 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 2551 if (!mlxsw_sp_port) { 2552 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 2553 goto just_remove; 2554 } 2555 2556 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2557 if (!mlxsw_sp_port_vlan) { 2558 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2559 goto just_remove; 2560 } 2561 2562 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2563 if (!bridge_port) { 2564 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2565 goto just_remove; 2566 } 2567 2568 bridge_device = bridge_port->bridge_device; 2569 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2570 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ? 2571 mlxsw_sp_port_vlan->vid : 0; 2572 2573 do_fdb_op: 2574 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 2575 adding, true); 2576 if (err) { 2577 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2578 return; 2579 } 2580 2581 if (!do_notification) 2582 return; 2583 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2584 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); 2585 2586 return; 2587 2588 just_remove: 2589 adding = false; 2590 do_notification = false; 2591 goto do_fdb_op; 2592 } 2593 2594 static int 2595 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, 2596 const struct mlxsw_sp_fid *fid, 2597 bool adding, 2598 struct net_device **nve_dev, 2599 u16 *p_vid, __be32 *p_vni) 2600 { 2601 struct mlxsw_sp_bridge_device *bridge_device; 2602 struct net_device *br_dev, *dev; 2603 int nve_ifindex; 2604 int err; 2605 2606 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex); 2607 if (err) 2608 return err; 2609 2610 err = mlxsw_sp_fid_vni(fid, p_vni); 2611 if (err) 2612 return err; 2613 2614 dev = __dev_get_by_index(&init_net, nve_ifindex); 2615 if (!dev) 2616 return -EINVAL; 2617 *nve_dev = dev; 2618 2619 if (!netif_running(dev)) 2620 return -EINVAL; 2621 2622 if (adding && !br_port_flag_is_set(dev, BR_LEARNING)) 2623 return -EINVAL; 2624 2625 if (adding && netif_is_vxlan(dev)) { 2626 struct vxlan_dev *vxlan = netdev_priv(dev); 2627 2628 if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) 2629 return -EINVAL; 2630 } 2631 2632 br_dev = netdev_master_upper_dev_get(dev); 2633 if (!br_dev) 2634 return -EINVAL; 2635 2636 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2637 if (!bridge_device) 2638 return -EINVAL; 2639 2640 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid); 2641 2642 return 0; 2643 } 2644 2645 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, 2646 char *sfn_pl, 2647 int rec_index, 2648 bool adding) 2649 { 2650 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto; 2651 enum switchdev_notifier_type type; 2652 struct net_device *nve_dev; 2653 union mlxsw_sp_l3addr addr; 2654 struct mlxsw_sp_fid *fid; 2655 char mac[ETH_ALEN]; 2656 u16 fid_index, vid; 2657 __be32 vni; 2658 u32 uip; 2659 int err; 2660 2661 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index, 2662 &uip, &sfn_proto); 2663 2664 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index); 2665 if (!fid) 2666 goto err_fid_lookup; 2667 2668 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip, 2669 (enum mlxsw_sp_l3proto) sfn_proto, 2670 &addr); 2671 if (err) 2672 goto err_ip_resolve; 2673 2674 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding, 2675 &nve_dev, &vid, &vni); 2676 if (err) 2677 goto err_fdb_process; 2678 2679 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, 2680 (enum mlxsw_sp_l3proto) sfn_proto, 2681 &addr, adding, true); 2682 if (err) 2683 goto err_fdb_op; 2684 2685 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac, 2686 (enum mlxsw_sp_l3proto) sfn_proto, 2687 &addr, vni, adding); 2688 2689 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : 2690 SWITCHDEV_FDB_DEL_TO_BRIDGE; 2691 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding); 2692 2693 mlxsw_sp_fid_put(fid); 2694 2695 return; 2696 2697 err_fdb_op: 2698 err_fdb_process: 2699 err_ip_resolve: 2700 mlxsw_sp_fid_put(fid); 2701 err_fid_lookup: 2702 /* Remove an FDB entry in case we cannot process it. Otherwise the 2703 * device will keep sending the same notification over and over again. 2704 */ 2705 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, 2706 (enum mlxsw_sp_l3proto) sfn_proto, &addr, 2707 false, true); 2708 } 2709 2710 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 2711 char *sfn_pl, int rec_index) 2712 { 2713 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 2714 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 2715 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2716 rec_index, true); 2717 break; 2718 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 2719 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2720 rec_index, false); 2721 break; 2722 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 2723 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2724 rec_index, true); 2725 break; 2726 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 2727 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2728 rec_index, false); 2729 break; 2730 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL: 2731 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, 2732 rec_index, true); 2733 break; 2734 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL: 2735 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, 2736 rec_index, false); 2737 break; 2738 } 2739 } 2740 2741 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 2742 { 2743 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2744 2745 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, 2746 msecs_to_jiffies(bridge->fdb_notify.interval)); 2747 } 2748 2749 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 2750 { 2751 struct mlxsw_sp_bridge *bridge; 2752 struct mlxsw_sp *mlxsw_sp; 2753 char *sfn_pl; 2754 u8 num_rec; 2755 int i; 2756 int err; 2757 2758 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 2759 if (!sfn_pl) 2760 return; 2761 2762 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work); 2763 mlxsw_sp = bridge->mlxsw_sp; 2764 2765 rtnl_lock(); 2766 mlxsw_reg_sfn_pack(sfn_pl); 2767 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 2768 if (err) { 2769 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 2770 goto out; 2771 } 2772 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 2773 for (i = 0; i < num_rec; i++) 2774 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 2775 2776 out: 2777 rtnl_unlock(); 2778 kfree(sfn_pl); 2779 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2780 } 2781 2782 struct mlxsw_sp_switchdev_event_work { 2783 struct work_struct work; 2784 union { 2785 struct switchdev_notifier_fdb_info fdb_info; 2786 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; 2787 }; 2788 struct net_device *dev; 2789 unsigned long event; 2790 }; 2791 2792 static void 2793 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, 2794 struct mlxsw_sp_switchdev_event_work * 2795 switchdev_work, 2796 struct mlxsw_sp_fid *fid, __be32 vni) 2797 { 2798 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; 2799 struct switchdev_notifier_fdb_info *fdb_info; 2800 struct net_device *dev = switchdev_work->dev; 2801 enum mlxsw_sp_l3proto proto; 2802 union mlxsw_sp_l3addr addr; 2803 int err; 2804 2805 fdb_info = &switchdev_work->fdb_info; 2806 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info); 2807 if (err) 2808 return; 2809 2810 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip, 2811 &proto, &addr); 2812 2813 switch (switchdev_work->event) { 2814 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2815 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, 2816 vxlan_fdb_info.eth_addr, 2817 mlxsw_sp_fid_index(fid), 2818 proto, &addr, true, false); 2819 if (err) 2820 return; 2821 vxlan_fdb_info.offloaded = true; 2822 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2823 &vxlan_fdb_info.info); 2824 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2825 vxlan_fdb_info.eth_addr, 2826 fdb_info->vid, dev, true); 2827 break; 2828 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2829 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, 2830 vxlan_fdb_info.eth_addr, 2831 mlxsw_sp_fid_index(fid), 2832 proto, &addr, false, 2833 false); 2834 vxlan_fdb_info.offloaded = false; 2835 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2836 &vxlan_fdb_info.info); 2837 break; 2838 } 2839 } 2840 2841 static void 2842 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * 2843 switchdev_work) 2844 { 2845 struct mlxsw_sp_bridge_device *bridge_device; 2846 struct net_device *dev = switchdev_work->dev; 2847 struct net_device *br_dev; 2848 struct mlxsw_sp *mlxsw_sp; 2849 struct mlxsw_sp_fid *fid; 2850 __be32 vni; 2851 int err; 2852 2853 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE && 2854 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE) 2855 return; 2856 2857 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && 2858 !switchdev_work->fdb_info.added_by_user) 2859 return; 2860 2861 if (!netif_running(dev)) 2862 return; 2863 br_dev = netdev_master_upper_dev_get(dev); 2864 if (!br_dev) 2865 return; 2866 if (!netif_is_bridge_master(br_dev)) 2867 return; 2868 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 2869 if (!mlxsw_sp) 2870 return; 2871 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2872 if (!bridge_device) 2873 return; 2874 2875 fid = bridge_device->ops->fid_lookup(bridge_device, 2876 switchdev_work->fdb_info.vid); 2877 if (!fid) 2878 return; 2879 2880 err = mlxsw_sp_fid_vni(fid, &vni); 2881 if (err) 2882 goto out; 2883 2884 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid, 2885 vni); 2886 2887 out: 2888 mlxsw_sp_fid_put(fid); 2889 } 2890 2891 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) 2892 { 2893 struct mlxsw_sp_switchdev_event_work *switchdev_work = 2894 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 2895 struct net_device *dev = switchdev_work->dev; 2896 struct switchdev_notifier_fdb_info *fdb_info; 2897 struct mlxsw_sp_port *mlxsw_sp_port; 2898 int err; 2899 2900 rtnl_lock(); 2901 if (netif_is_vxlan(dev)) { 2902 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work); 2903 goto out; 2904 } 2905 2906 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 2907 if (!mlxsw_sp_port) 2908 goto out; 2909 2910 switch (switchdev_work->event) { 2911 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2912 fdb_info = &switchdev_work->fdb_info; 2913 if (!fdb_info->added_by_user) 2914 break; 2915 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); 2916 if (err) 2917 break; 2918 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2919 fdb_info->addr, 2920 fdb_info->vid, dev, true); 2921 break; 2922 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2923 fdb_info = &switchdev_work->fdb_info; 2924 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); 2925 break; 2926 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 2927 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 2928 /* These events are only used to potentially update an existing 2929 * SPAN mirror. 2930 */ 2931 break; 2932 } 2933 2934 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); 2935 2936 out: 2937 rtnl_unlock(); 2938 kfree(switchdev_work->fdb_info.addr); 2939 kfree(switchdev_work); 2940 dev_put(dev); 2941 } 2942 2943 static void 2944 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, 2945 struct mlxsw_sp_switchdev_event_work * 2946 switchdev_work) 2947 { 2948 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 2949 struct mlxsw_sp_bridge_device *bridge_device; 2950 struct net_device *dev = switchdev_work->dev; 2951 u8 all_zeros_mac[ETH_ALEN] = { 0 }; 2952 enum mlxsw_sp_l3proto proto; 2953 union mlxsw_sp_l3addr addr; 2954 struct net_device *br_dev; 2955 struct mlxsw_sp_fid *fid; 2956 u16 vid; 2957 int err; 2958 2959 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; 2960 br_dev = netdev_master_upper_dev_get(dev); 2961 2962 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 2963 if (!bridge_device) 2964 return; 2965 2966 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); 2967 if (!fid) 2968 return; 2969 2970 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, 2971 &proto, &addr); 2972 2973 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { 2974 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr); 2975 if (err) { 2976 mlxsw_sp_fid_put(fid); 2977 return; 2978 } 2979 vxlan_fdb_info->offloaded = true; 2980 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 2981 &vxlan_fdb_info->info); 2982 mlxsw_sp_fid_put(fid); 2983 return; 2984 } 2985 2986 /* The device has a single FDB table, whereas Linux has two - one 2987 * in the bridge driver and another in the VxLAN driver. We only 2988 * program an entry to the device if the MAC points to the VxLAN 2989 * device in the bridge's FDB table 2990 */ 2991 vid = bridge_device->ops->fid_vid(bridge_device, fid); 2992 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev) 2993 goto err_br_fdb_find; 2994 2995 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, 2996 mlxsw_sp_fid_index(fid), proto, 2997 &addr, true, false); 2998 if (err) 2999 goto err_fdb_tunnel_uc_op; 3000 vxlan_fdb_info->offloaded = true; 3001 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, 3002 &vxlan_fdb_info->info); 3003 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 3004 vxlan_fdb_info->eth_addr, vid, dev, true); 3005 3006 mlxsw_sp_fid_put(fid); 3007 3008 return; 3009 3010 err_fdb_tunnel_uc_op: 3011 err_br_fdb_find: 3012 mlxsw_sp_fid_put(fid); 3013 } 3014 3015 static void 3016 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, 3017 struct mlxsw_sp_switchdev_event_work * 3018 switchdev_work) 3019 { 3020 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 3021 struct mlxsw_sp_bridge_device *bridge_device; 3022 struct net_device *dev = switchdev_work->dev; 3023 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3024 u8 all_zeros_mac[ETH_ALEN] = { 0 }; 3025 enum mlxsw_sp_l3proto proto; 3026 union mlxsw_sp_l3addr addr; 3027 struct mlxsw_sp_fid *fid; 3028 u16 vid; 3029 3030 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; 3031 3032 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3033 if (!bridge_device) 3034 return; 3035 3036 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); 3037 if (!fid) 3038 return; 3039 3040 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, 3041 &proto, &addr); 3042 3043 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { 3044 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr); 3045 mlxsw_sp_fid_put(fid); 3046 return; 3047 } 3048 3049 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, 3050 mlxsw_sp_fid_index(fid), proto, &addr, 3051 false, false); 3052 vid = bridge_device->ops->fid_vid(bridge_device, fid); 3053 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 3054 vxlan_fdb_info->eth_addr, vid, dev, false); 3055 3056 mlxsw_sp_fid_put(fid); 3057 } 3058 3059 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) 3060 { 3061 struct mlxsw_sp_switchdev_event_work *switchdev_work = 3062 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 3063 struct net_device *dev = switchdev_work->dev; 3064 struct mlxsw_sp *mlxsw_sp; 3065 struct net_device *br_dev; 3066 3067 rtnl_lock(); 3068 3069 if (!netif_running(dev)) 3070 goto out; 3071 br_dev = netdev_master_upper_dev_get(dev); 3072 if (!br_dev) 3073 goto out; 3074 if (!netif_is_bridge_master(br_dev)) 3075 goto out; 3076 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3077 if (!mlxsw_sp) 3078 goto out; 3079 3080 switch (switchdev_work->event) { 3081 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: 3082 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work); 3083 break; 3084 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: 3085 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work); 3086 break; 3087 } 3088 3089 out: 3090 rtnl_unlock(); 3091 kfree(switchdev_work); 3092 dev_put(dev); 3093 } 3094 3095 static int 3096 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * 3097 switchdev_work, 3098 struct switchdev_notifier_info *info) 3099 { 3100 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev); 3101 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; 3102 struct vxlan_config *cfg = &vxlan->cfg; 3103 3104 vxlan_fdb_info = container_of(info, 3105 struct switchdev_notifier_vxlan_fdb_info, 3106 info); 3107 3108 if (vxlan_fdb_info->remote_port != cfg->dst_port) 3109 return -EOPNOTSUPP; 3110 if (vxlan_fdb_info->remote_vni != cfg->vni) 3111 return -EOPNOTSUPP; 3112 if (vxlan_fdb_info->vni != cfg->vni) 3113 return -EOPNOTSUPP; 3114 if (vxlan_fdb_info->remote_ifindex) 3115 return -EOPNOTSUPP; 3116 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) 3117 return -EOPNOTSUPP; 3118 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) 3119 return -EOPNOTSUPP; 3120 3121 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info; 3122 3123 return 0; 3124 } 3125 3126 /* Called under rcu_read_lock() */ 3127 static int mlxsw_sp_switchdev_event(struct notifier_block *unused, 3128 unsigned long event, void *ptr) 3129 { 3130 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3131 struct mlxsw_sp_switchdev_event_work *switchdev_work; 3132 struct switchdev_notifier_fdb_info *fdb_info; 3133 struct switchdev_notifier_info *info = ptr; 3134 struct net_device *br_dev; 3135 int err; 3136 3137 /* Tunnel devices are not our uppers, so check their master instead */ 3138 br_dev = netdev_master_upper_dev_get_rcu(dev); 3139 if (!br_dev) 3140 return NOTIFY_DONE; 3141 if (!netif_is_bridge_master(br_dev)) 3142 return NOTIFY_DONE; 3143 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) 3144 return NOTIFY_DONE; 3145 3146 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 3147 if (!switchdev_work) 3148 return NOTIFY_BAD; 3149 3150 switchdev_work->dev = dev; 3151 switchdev_work->event = event; 3152 3153 switch (event) { 3154 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 3155 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ 3156 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ 3157 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 3158 fdb_info = container_of(info, 3159 struct switchdev_notifier_fdb_info, 3160 info); 3161 INIT_WORK(&switchdev_work->work, 3162 mlxsw_sp_switchdev_bridge_fdb_event_work); 3163 memcpy(&switchdev_work->fdb_info, ptr, 3164 sizeof(switchdev_work->fdb_info)); 3165 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 3166 if (!switchdev_work->fdb_info.addr) 3167 goto err_addr_alloc; 3168 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 3169 fdb_info->addr); 3170 /* Take a reference on the device. This can be either 3171 * upper device containig mlxsw_sp_port or just a 3172 * mlxsw_sp_port 3173 */ 3174 dev_hold(dev); 3175 break; 3176 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ 3177 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: 3178 INIT_WORK(&switchdev_work->work, 3179 mlxsw_sp_switchdev_vxlan_fdb_event_work); 3180 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work, 3181 info); 3182 if (err) 3183 goto err_vxlan_work_prepare; 3184 dev_hold(dev); 3185 break; 3186 default: 3187 kfree(switchdev_work); 3188 return NOTIFY_DONE; 3189 } 3190 3191 mlxsw_core_schedule_work(&switchdev_work->work); 3192 3193 return NOTIFY_DONE; 3194 3195 err_vxlan_work_prepare: 3196 err_addr_alloc: 3197 kfree(switchdev_work); 3198 return NOTIFY_BAD; 3199 } 3200 3201 struct notifier_block mlxsw_sp_switchdev_notifier = { 3202 .notifier_call = mlxsw_sp_switchdev_event, 3203 }; 3204 3205 static int 3206 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, 3207 struct mlxsw_sp_bridge_device *bridge_device, 3208 const struct net_device *vxlan_dev, u16 vid, 3209 bool flag_untagged, bool flag_pvid, 3210 struct switchdev_trans *trans, 3211 struct netlink_ext_ack *extack) 3212 { 3213 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3214 __be32 vni = vxlan->cfg.vni; 3215 struct mlxsw_sp_fid *fid; 3216 u16 old_vid; 3217 int err; 3218 3219 /* We cannot have the same VLAN as PVID and egress untagged on multiple 3220 * VxLAN devices. Note that we get this notification before the VLAN is 3221 * actually added to the bridge's database, so it is not possible for 3222 * the lookup function to return 'vxlan_dev' 3223 */ 3224 if (flag_untagged && flag_pvid && 3225 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) 3226 return -EINVAL; 3227 3228 if (switchdev_trans_ph_prepare(trans)) 3229 return 0; 3230 3231 if (!netif_running(vxlan_dev)) 3232 return 0; 3233 3234 /* First case: FID is not associated with this VNI, but the new VLAN 3235 * is both PVID and egress untagged. Need to enable NVE on the FID, if 3236 * it exists 3237 */ 3238 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); 3239 if (!fid) { 3240 if (!flag_untagged || !flag_pvid) 3241 return 0; 3242 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, 3243 vxlan_dev, vid, extack); 3244 } 3245 3246 /* Second case: FID is associated with the VNI and the VLAN associated 3247 * with the FID is the same as the notified VLAN. This means the flags 3248 * (PVID / egress untagged) were toggled and that NVE should be 3249 * disabled on the FID 3250 */ 3251 old_vid = mlxsw_sp_fid_8021q_vid(fid); 3252 if (vid == old_vid) { 3253 if (WARN_ON(flag_untagged && flag_pvid)) { 3254 mlxsw_sp_fid_put(fid); 3255 return -EINVAL; 3256 } 3257 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3258 mlxsw_sp_fid_put(fid); 3259 return 0; 3260 } 3261 3262 /* Third case: A new VLAN was configured on the VxLAN device, but this 3263 * VLAN is not PVID, so there is nothing to do. 3264 */ 3265 if (!flag_pvid) { 3266 mlxsw_sp_fid_put(fid); 3267 return 0; 3268 } 3269 3270 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently 3271 * mapped to the VNI should be unmapped 3272 */ 3273 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3274 mlxsw_sp_fid_put(fid); 3275 3276 /* Fifth case: The new VLAN is also egress untagged, which means the 3277 * VLAN needs to be mapped to the VNI 3278 */ 3279 if (!flag_untagged) 3280 return 0; 3281 3282 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, 3283 extack); 3284 if (err) 3285 goto err_vxlan_join; 3286 3287 return 0; 3288 3289 err_vxlan_join: 3290 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, 3291 NULL); 3292 return err; 3293 } 3294 3295 static void 3296 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp, 3297 struct mlxsw_sp_bridge_device *bridge_device, 3298 const struct net_device *vxlan_dev, u16 vid) 3299 { 3300 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3301 __be32 vni = vxlan->cfg.vni; 3302 struct mlxsw_sp_fid *fid; 3303 3304 if (!netif_running(vxlan_dev)) 3305 return; 3306 3307 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); 3308 if (!fid) 3309 return; 3310 3311 /* A different VLAN than the one mapped to the VNI is deleted */ 3312 if (mlxsw_sp_fid_8021q_vid(fid) != vid) 3313 goto out; 3314 3315 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); 3316 3317 out: 3318 mlxsw_sp_fid_put(fid); 3319 } 3320 3321 static int 3322 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, 3323 struct switchdev_notifier_port_obj_info * 3324 port_obj_info) 3325 { 3326 struct switchdev_obj_port_vlan *vlan = 3327 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); 3328 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 3329 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 3330 struct switchdev_trans *trans = port_obj_info->trans; 3331 struct mlxsw_sp_bridge_device *bridge_device; 3332 struct netlink_ext_ack *extack; 3333 struct mlxsw_sp *mlxsw_sp; 3334 struct net_device *br_dev; 3335 u16 vid; 3336 3337 extack = switchdev_notifier_info_to_extack(&port_obj_info->info); 3338 br_dev = netdev_master_upper_dev_get(vxlan_dev); 3339 if (!br_dev) 3340 return 0; 3341 3342 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3343 if (!mlxsw_sp) 3344 return 0; 3345 3346 port_obj_info->handled = true; 3347 3348 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3349 if (!bridge_device) 3350 return -EINVAL; 3351 3352 if (!bridge_device->vlan_enabled) 3353 return 0; 3354 3355 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 3356 int err; 3357 3358 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3359 vxlan_dev, vid, 3360 flag_untagged, 3361 flag_pvid, trans, 3362 extack); 3363 if (err) 3364 return err; 3365 } 3366 3367 return 0; 3368 } 3369 3370 static void 3371 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev, 3372 struct switchdev_notifier_port_obj_info * 3373 port_obj_info) 3374 { 3375 struct switchdev_obj_port_vlan *vlan = 3376 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); 3377 struct mlxsw_sp_bridge_device *bridge_device; 3378 struct mlxsw_sp *mlxsw_sp; 3379 struct net_device *br_dev; 3380 u16 vid; 3381 3382 br_dev = netdev_master_upper_dev_get(vxlan_dev); 3383 if (!br_dev) 3384 return; 3385 3386 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 3387 if (!mlxsw_sp) 3388 return; 3389 3390 port_obj_info->handled = true; 3391 3392 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3393 if (!bridge_device) 3394 return; 3395 3396 if (!bridge_device->vlan_enabled) 3397 return; 3398 3399 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 3400 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, 3401 vxlan_dev, vid); 3402 } 3403 3404 static int 3405 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev, 3406 struct switchdev_notifier_port_obj_info * 3407 port_obj_info) 3408 { 3409 int err = 0; 3410 3411 switch (port_obj_info->obj->id) { 3412 case SWITCHDEV_OBJ_ID_PORT_VLAN: 3413 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev, 3414 port_obj_info); 3415 break; 3416 default: 3417 break; 3418 } 3419 3420 return err; 3421 } 3422 3423 static void 3424 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev, 3425 struct switchdev_notifier_port_obj_info * 3426 port_obj_info) 3427 { 3428 switch (port_obj_info->obj->id) { 3429 case SWITCHDEV_OBJ_ID_PORT_VLAN: 3430 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info); 3431 break; 3432 default: 3433 break; 3434 } 3435 } 3436 3437 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused, 3438 unsigned long event, void *ptr) 3439 { 3440 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3441 int err = 0; 3442 3443 switch (event) { 3444 case SWITCHDEV_PORT_OBJ_ADD: 3445 if (netif_is_vxlan(dev)) 3446 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr); 3447 else 3448 err = switchdev_handle_port_obj_add(dev, ptr, 3449 mlxsw_sp_port_dev_check, 3450 mlxsw_sp_port_obj_add); 3451 return notifier_from_errno(err); 3452 case SWITCHDEV_PORT_OBJ_DEL: 3453 if (netif_is_vxlan(dev)) 3454 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr); 3455 else 3456 err = switchdev_handle_port_obj_del(dev, ptr, 3457 mlxsw_sp_port_dev_check, 3458 mlxsw_sp_port_obj_del); 3459 return notifier_from_errno(err); 3460 } 3461 3462 return NOTIFY_DONE; 3463 } 3464 3465 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = { 3466 .notifier_call = mlxsw_sp_switchdev_blocking_event, 3467 }; 3468 3469 u8 3470 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) 3471 { 3472 return bridge_port->stp_state; 3473 } 3474 3475 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 3476 { 3477 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 3478 struct notifier_block *nb; 3479 int err; 3480 3481 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 3482 if (err) { 3483 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 3484 return err; 3485 } 3486 3487 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3488 if (err) { 3489 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n"); 3490 return err; 3491 } 3492 3493 nb = &mlxsw_sp_switchdev_blocking_notifier; 3494 err = register_switchdev_blocking_notifier(nb); 3495 if (err) { 3496 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n"); 3497 goto err_register_switchdev_blocking_notifier; 3498 } 3499 3500 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 3501 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 3502 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 3503 return 0; 3504 3505 err_register_switchdev_blocking_notifier: 3506 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3507 return err; 3508 } 3509 3510 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 3511 { 3512 struct notifier_block *nb; 3513 3514 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw); 3515 3516 nb = &mlxsw_sp_switchdev_blocking_notifier; 3517 unregister_switchdev_blocking_notifier(nb); 3518 3519 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 3520 } 3521 3522 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 3523 { 3524 struct mlxsw_sp_bridge *bridge; 3525 3526 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL); 3527 if (!bridge) 3528 return -ENOMEM; 3529 mlxsw_sp->bridge = bridge; 3530 bridge->mlxsw_sp = mlxsw_sp; 3531 3532 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); 3533 3534 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; 3535 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; 3536 3537 return mlxsw_sp_fdb_init(mlxsw_sp); 3538 } 3539 3540 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 3541 { 3542 mlxsw_sp_fdb_fini(mlxsw_sp); 3543 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 3544 kfree(mlxsw_sp->bridge); 3545 } 3546 3547 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 3548 { 3549 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 3550 } 3551 3552 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 3553 { 3554 } 3555