1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <linux/rtnetlink.h> 49 #include <linux/netlink.h> 50 #include <net/switchdev.h> 51 52 #include "spectrum_router.h" 53 #include "spectrum.h" 54 #include "core.h" 55 #include "reg.h" 56 57 struct mlxsw_sp_bridge_ops; 58 59 struct mlxsw_sp_bridge { 60 struct mlxsw_sp *mlxsw_sp; 61 struct { 62 struct delayed_work dw; 63 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 64 unsigned int interval; /* ms */ 65 } fdb_notify; 66 #define MLXSW_SP_MIN_AGEING_TIME 10 67 #define MLXSW_SP_MAX_AGEING_TIME 1000000 68 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 69 u32 ageing_time; 70 bool vlan_enabled_exists; 71 struct list_head bridges_list; 72 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); 73 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; 74 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; 75 }; 76 77 struct mlxsw_sp_bridge_device { 78 struct net_device *dev; 79 struct list_head list; 80 struct list_head ports_list; 81 struct list_head mids_list; 82 u8 vlan_enabled:1, 83 multicast_enabled:1, 84 mrouter:1; 85 const struct mlxsw_sp_bridge_ops *ops; 86 }; 87 88 struct mlxsw_sp_bridge_port { 89 struct net_device *dev; 90 struct mlxsw_sp_bridge_device *bridge_device; 91 struct list_head list; 92 struct list_head vlans_list; 93 unsigned int ref_count; 94 u8 stp_state; 95 unsigned long flags; 96 bool mrouter; 97 bool lagged; 98 union { 99 u16 lag_id; 100 u16 system_port; 101 }; 102 }; 103 104 struct mlxsw_sp_bridge_vlan { 105 struct list_head list; 106 struct list_head port_vlan_list; 107 u16 vid; 108 }; 109 110 struct mlxsw_sp_bridge_ops { 111 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, 112 struct mlxsw_sp_bridge_port *bridge_port, 113 struct mlxsw_sp_port *mlxsw_sp_port, 114 struct netlink_ext_ack *extack); 115 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, 116 struct mlxsw_sp_bridge_port *bridge_port, 117 struct mlxsw_sp_port *mlxsw_sp_port); 118 struct mlxsw_sp_fid * 119 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, 120 u16 vid); 121 }; 122 123 static int 124 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 125 struct mlxsw_sp_bridge_port *bridge_port, 126 u16 fid_index); 127 128 static void 129 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 130 struct mlxsw_sp_bridge_port *bridge_port); 131 132 static void 133 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 134 struct mlxsw_sp_bridge_device 135 *bridge_device); 136 137 static void 138 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 139 struct mlxsw_sp_bridge_port *bridge_port, 140 bool add); 141 142 static struct mlxsw_sp_bridge_device * 143 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, 144 const struct net_device *br_dev) 145 { 146 struct mlxsw_sp_bridge_device *bridge_device; 147 148 list_for_each_entry(bridge_device, &bridge->bridges_list, list) 149 if (bridge_device->dev == br_dev) 150 return bridge_device; 151 152 return NULL; 153 } 154 155 static struct mlxsw_sp_bridge_device * 156 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 157 struct net_device *br_dev) 158 { 159 struct device *dev = bridge->mlxsw_sp->bus_info->dev; 160 struct mlxsw_sp_bridge_device *bridge_device; 161 bool vlan_enabled = br_vlan_enabled(br_dev); 162 163 if (vlan_enabled && bridge->vlan_enabled_exists) { 164 dev_err(dev, "Only one VLAN-aware bridge is supported\n"); 165 return ERR_PTR(-EINVAL); 166 } 167 168 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL); 169 if (!bridge_device) 170 return ERR_PTR(-ENOMEM); 171 172 bridge_device->dev = br_dev; 173 bridge_device->vlan_enabled = vlan_enabled; 174 bridge_device->multicast_enabled = br_multicast_enabled(br_dev); 175 bridge_device->mrouter = br_multicast_router(br_dev); 176 INIT_LIST_HEAD(&bridge_device->ports_list); 177 if (vlan_enabled) { 178 bridge->vlan_enabled_exists = true; 179 bridge_device->ops = bridge->bridge_8021q_ops; 180 } else { 181 bridge_device->ops = bridge->bridge_8021d_ops; 182 } 183 INIT_LIST_HEAD(&bridge_device->mids_list); 184 list_add(&bridge_device->list, &bridge->bridges_list); 185 186 return bridge_device; 187 } 188 189 static void 190 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 191 struct mlxsw_sp_bridge_device *bridge_device) 192 { 193 list_del(&bridge_device->list); 194 if (bridge_device->vlan_enabled) 195 bridge->vlan_enabled_exists = false; 196 WARN_ON(!list_empty(&bridge_device->ports_list)); 197 WARN_ON(!list_empty(&bridge_device->mids_list)); 198 kfree(bridge_device); 199 } 200 201 static struct mlxsw_sp_bridge_device * 202 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, 203 struct net_device *br_dev) 204 { 205 struct mlxsw_sp_bridge_device *bridge_device; 206 207 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 208 if (bridge_device) 209 return bridge_device; 210 211 return mlxsw_sp_bridge_device_create(bridge, br_dev); 212 } 213 214 static void 215 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge, 216 struct mlxsw_sp_bridge_device *bridge_device) 217 { 218 if (list_empty(&bridge_device->ports_list)) 219 mlxsw_sp_bridge_device_destroy(bridge, bridge_device); 220 } 221 222 static struct mlxsw_sp_bridge_port * 223 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, 224 const struct net_device *brport_dev) 225 { 226 struct mlxsw_sp_bridge_port *bridge_port; 227 228 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 229 if (bridge_port->dev == brport_dev) 230 return bridge_port; 231 } 232 233 return NULL; 234 } 235 236 static struct mlxsw_sp_bridge_port * 237 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, 238 struct net_device *brport_dev) 239 { 240 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 241 struct mlxsw_sp_bridge_device *bridge_device; 242 243 if (!br_dev) 244 return NULL; 245 246 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); 247 if (!bridge_device) 248 return NULL; 249 250 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 251 } 252 253 static struct mlxsw_sp_bridge_port * 254 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, 255 struct net_device *brport_dev) 256 { 257 struct mlxsw_sp_bridge_port *bridge_port; 258 struct mlxsw_sp_port *mlxsw_sp_port; 259 260 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); 261 if (!bridge_port) 262 return NULL; 263 264 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); 265 bridge_port->lagged = mlxsw_sp_port->lagged; 266 if (bridge_port->lagged) 267 bridge_port->lag_id = mlxsw_sp_port->lag_id; 268 else 269 bridge_port->system_port = mlxsw_sp_port->local_port; 270 bridge_port->dev = brport_dev; 271 bridge_port->bridge_device = bridge_device; 272 bridge_port->stp_state = BR_STATE_DISABLED; 273 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | 274 BR_MCAST_FLOOD; 275 INIT_LIST_HEAD(&bridge_port->vlans_list); 276 list_add(&bridge_port->list, &bridge_device->ports_list); 277 bridge_port->ref_count = 1; 278 279 return bridge_port; 280 } 281 282 static void 283 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) 284 { 285 list_del(&bridge_port->list); 286 WARN_ON(!list_empty(&bridge_port->vlans_list)); 287 kfree(bridge_port); 288 } 289 290 static bool 291 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * 292 bridge_port) 293 { 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); 295 296 /* In case ports were pulled from out of a bridged LAG, then 297 * it's possible the reference count isn't zero, yet the bridge 298 * port should be destroyed, as it's no longer an upper of ours. 299 */ 300 if (!mlxsw_sp && list_empty(&bridge_port->vlans_list)) 301 return true; 302 else if (bridge_port->ref_count == 0) 303 return true; 304 else 305 return false; 306 } 307 308 static struct mlxsw_sp_bridge_port * 309 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, 310 struct net_device *brport_dev) 311 { 312 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); 313 struct mlxsw_sp_bridge_device *bridge_device; 314 struct mlxsw_sp_bridge_port *bridge_port; 315 int err; 316 317 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); 318 if (bridge_port) { 319 bridge_port->ref_count++; 320 return bridge_port; 321 } 322 323 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); 324 if (IS_ERR(bridge_device)) 325 return ERR_CAST(bridge_device); 326 327 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); 328 if (!bridge_port) { 329 err = -ENOMEM; 330 goto err_bridge_port_create; 331 } 332 333 return bridge_port; 334 335 err_bridge_port_create: 336 mlxsw_sp_bridge_device_put(bridge, bridge_device); 337 return ERR_PTR(err); 338 } 339 340 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, 341 struct mlxsw_sp_bridge_port *bridge_port) 342 { 343 struct mlxsw_sp_bridge_device *bridge_device; 344 345 bridge_port->ref_count--; 346 if (!mlxsw_sp_bridge_port_should_destroy(bridge_port)) 347 return; 348 bridge_device = bridge_port->bridge_device; 349 mlxsw_sp_bridge_port_destroy(bridge_port); 350 mlxsw_sp_bridge_device_put(bridge, bridge_device); 351 } 352 353 static struct mlxsw_sp_port_vlan * 354 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port, 355 const struct mlxsw_sp_bridge_device * 356 bridge_device, 357 u16 vid) 358 { 359 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 360 361 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 362 list) { 363 if (!mlxsw_sp_port_vlan->bridge_port) 364 continue; 365 if (mlxsw_sp_port_vlan->bridge_port->bridge_device != 366 bridge_device) 367 continue; 368 if (bridge_device->vlan_enabled && 369 mlxsw_sp_port_vlan->vid != vid) 370 continue; 371 return mlxsw_sp_port_vlan; 372 } 373 374 return NULL; 375 } 376 377 static struct mlxsw_sp_port_vlan* 378 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port, 379 u16 fid_index) 380 { 381 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 382 383 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 384 list) { 385 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 386 387 if (fid && mlxsw_sp_fid_index(fid) == fid_index) 388 return mlxsw_sp_port_vlan; 389 } 390 391 return NULL; 392 } 393 394 static struct mlxsw_sp_bridge_vlan * 395 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port, 396 u16 vid) 397 { 398 struct mlxsw_sp_bridge_vlan *bridge_vlan; 399 400 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 401 if (bridge_vlan->vid == vid) 402 return bridge_vlan; 403 } 404 405 return NULL; 406 } 407 408 static struct mlxsw_sp_bridge_vlan * 409 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 410 { 411 struct mlxsw_sp_bridge_vlan *bridge_vlan; 412 413 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL); 414 if (!bridge_vlan) 415 return NULL; 416 417 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list); 418 bridge_vlan->vid = vid; 419 list_add(&bridge_vlan->list, &bridge_port->vlans_list); 420 421 return bridge_vlan; 422 } 423 424 static void 425 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan) 426 { 427 list_del(&bridge_vlan->list); 428 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list)); 429 kfree(bridge_vlan); 430 } 431 432 static struct mlxsw_sp_bridge_vlan * 433 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 434 { 435 struct mlxsw_sp_bridge_vlan *bridge_vlan; 436 437 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 438 if (bridge_vlan) 439 return bridge_vlan; 440 441 return mlxsw_sp_bridge_vlan_create(bridge_port, vid); 442 } 443 444 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) 445 { 446 if (list_empty(&bridge_vlan->port_vlan_list)) 447 mlxsw_sp_bridge_vlan_destroy(bridge_vlan); 448 } 449 450 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, 451 struct net_device *dev, 452 unsigned long *brport_flags) 453 { 454 struct mlxsw_sp_bridge_port *bridge_port; 455 456 bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); 457 if (WARN_ON(!bridge_port)) 458 return; 459 460 memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); 461 } 462 463 static int mlxsw_sp_port_attr_get(struct net_device *dev, 464 struct switchdev_attr *attr) 465 { 466 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 467 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 468 469 switch (attr->id) { 470 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 471 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 472 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 473 attr->u.ppid.id_len); 474 break; 475 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 476 mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, 477 &attr->u.brport_flags); 478 break; 479 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: 480 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | 481 BR_MCAST_FLOOD; 482 break; 483 default: 484 return -EOPNOTSUPP; 485 } 486 487 return 0; 488 } 489 490 static int 491 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 492 struct mlxsw_sp_bridge_vlan *bridge_vlan, 493 u8 state) 494 { 495 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 496 497 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 498 bridge_vlan_node) { 499 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 500 continue; 501 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, 502 bridge_vlan->vid, state); 503 } 504 505 return 0; 506 } 507 508 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 509 struct switchdev_trans *trans, 510 struct net_device *orig_dev, 511 u8 state) 512 { 513 struct mlxsw_sp_bridge_port *bridge_port; 514 struct mlxsw_sp_bridge_vlan *bridge_vlan; 515 int err; 516 517 if (switchdev_trans_ph_prepare(trans)) 518 return 0; 519 520 /* It's possible we failed to enslave the port, yet this 521 * operation is executed due to it being deferred. 522 */ 523 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 524 orig_dev); 525 if (!bridge_port) 526 return 0; 527 528 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 529 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, 530 bridge_vlan, state); 531 if (err) 532 goto err_port_bridge_vlan_stp_set; 533 } 534 535 bridge_port->stp_state = state; 536 537 return 0; 538 539 err_port_bridge_vlan_stp_set: 540 list_for_each_entry_continue_reverse(bridge_vlan, 541 &bridge_port->vlans_list, list) 542 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan, 543 bridge_port->stp_state); 544 return err; 545 } 546 547 static int 548 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 549 struct mlxsw_sp_bridge_vlan *bridge_vlan, 550 enum mlxsw_sp_flood_type packet_type, 551 bool member) 552 { 553 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 554 555 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 556 bridge_vlan_node) { 557 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 558 continue; 559 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, 560 packet_type, 561 mlxsw_sp_port->local_port, 562 member); 563 } 564 565 return 0; 566 } 567 568 static int 569 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 570 struct mlxsw_sp_bridge_port *bridge_port, 571 enum mlxsw_sp_flood_type packet_type, 572 bool member) 573 { 574 struct mlxsw_sp_bridge_vlan *bridge_vlan; 575 int err; 576 577 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 578 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, 579 bridge_vlan, 580 packet_type, 581 member); 582 if (err) 583 goto err_port_bridge_vlan_flood_set; 584 } 585 586 return 0; 587 588 err_port_bridge_vlan_flood_set: 589 list_for_each_entry_continue_reverse(bridge_vlan, 590 &bridge_port->vlans_list, list) 591 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan, 592 packet_type, !member); 593 return err; 594 } 595 596 static int 597 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 598 struct mlxsw_sp_bridge_vlan *bridge_vlan, 599 bool set) 600 { 601 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 602 u16 vid = bridge_vlan->vid; 603 604 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, 605 bridge_vlan_node) { 606 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) 607 continue; 608 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); 609 } 610 611 return 0; 612 } 613 614 static int 615 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 616 struct mlxsw_sp_bridge_port *bridge_port, 617 bool set) 618 { 619 struct mlxsw_sp_bridge_vlan *bridge_vlan; 620 int err; 621 622 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { 623 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 624 bridge_vlan, set); 625 if (err) 626 goto err_port_bridge_vlan_learning_set; 627 } 628 629 return 0; 630 631 err_port_bridge_vlan_learning_set: 632 list_for_each_entry_continue_reverse(bridge_vlan, 633 &bridge_port->vlans_list, list) 634 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, 635 bridge_vlan, !set); 636 return err; 637 } 638 639 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 640 struct switchdev_trans *trans, 641 struct net_device *orig_dev, 642 unsigned long brport_flags) 643 { 644 struct mlxsw_sp_bridge_port *bridge_port; 645 int err; 646 647 if (switchdev_trans_ph_prepare(trans)) 648 return 0; 649 650 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 651 orig_dev); 652 if (!bridge_port) 653 return 0; 654 655 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 656 MLXSW_SP_FLOOD_TYPE_UC, 657 brport_flags & BR_FLOOD); 658 if (err) 659 return err; 660 661 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, 662 brport_flags & BR_LEARNING); 663 if (err) 664 return err; 665 666 if (bridge_port->bridge_device->multicast_enabled) 667 goto out; 668 669 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 670 MLXSW_SP_FLOOD_TYPE_MC, 671 brport_flags & 672 BR_MCAST_FLOOD); 673 if (err) 674 return err; 675 676 out: 677 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); 678 return 0; 679 } 680 681 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 682 { 683 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 684 int err; 685 686 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 687 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 688 if (err) 689 return err; 690 mlxsw_sp->bridge->ageing_time = ageing_time; 691 return 0; 692 } 693 694 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 695 struct switchdev_trans *trans, 696 unsigned long ageing_clock_t) 697 { 698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 699 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 700 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 701 702 if (switchdev_trans_ph_prepare(trans)) { 703 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 704 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 705 return -ERANGE; 706 else 707 return 0; 708 } 709 710 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 711 } 712 713 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 714 struct switchdev_trans *trans, 715 struct net_device *orig_dev, 716 bool vlan_enabled) 717 { 718 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 719 struct mlxsw_sp_bridge_device *bridge_device; 720 721 if (!switchdev_trans_ph_prepare(trans)) 722 return 0; 723 724 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 725 if (WARN_ON(!bridge_device)) 726 return -EINVAL; 727 728 if (bridge_device->vlan_enabled == vlan_enabled) 729 return 0; 730 731 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n"); 732 return -EINVAL; 733 } 734 735 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 736 struct switchdev_trans *trans, 737 struct net_device *orig_dev, 738 bool is_port_mrouter) 739 { 740 struct mlxsw_sp_bridge_port *bridge_port; 741 int err; 742 743 if (switchdev_trans_ph_prepare(trans)) 744 return 0; 745 746 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 747 orig_dev); 748 if (!bridge_port) 749 return 0; 750 751 if (!bridge_port->bridge_device->multicast_enabled) 752 goto out; 753 754 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 755 MLXSW_SP_FLOOD_TYPE_MC, 756 is_port_mrouter); 757 if (err) 758 return err; 759 760 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, 761 is_port_mrouter); 762 out: 763 bridge_port->mrouter = is_port_mrouter; 764 return 0; 765 } 766 767 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) 768 { 769 const struct mlxsw_sp_bridge_device *bridge_device; 770 771 bridge_device = bridge_port->bridge_device; 772 return bridge_device->multicast_enabled ? bridge_port->mrouter : 773 bridge_port->flags & BR_MCAST_FLOOD; 774 } 775 776 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 777 struct switchdev_trans *trans, 778 struct net_device *orig_dev, 779 bool mc_disabled) 780 { 781 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 782 struct mlxsw_sp_bridge_device *bridge_device; 783 struct mlxsw_sp_bridge_port *bridge_port; 784 int err; 785 786 if (switchdev_trans_ph_prepare(trans)) 787 return 0; 788 789 /* It's possible we failed to enslave the port, yet this 790 * operation is executed due to it being deferred. 791 */ 792 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 793 if (!bridge_device) 794 return 0; 795 796 if (bridge_device->multicast_enabled != !mc_disabled) { 797 bridge_device->multicast_enabled = !mc_disabled; 798 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, 799 bridge_device); 800 } 801 802 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 803 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; 804 bool member = mlxsw_sp_mc_flood(bridge_port); 805 806 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, 807 bridge_port, 808 packet_type, member); 809 if (err) 810 return err; 811 } 812 813 bridge_device->multicast_enabled = !mc_disabled; 814 815 return 0; 816 } 817 818 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, 819 u16 mid_idx, bool add) 820 { 821 char *smid_pl; 822 int err; 823 824 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 825 if (!smid_pl) 826 return -ENOMEM; 827 828 mlxsw_reg_smid_pack(smid_pl, mid_idx, 829 mlxsw_sp_router_port(mlxsw_sp), add); 830 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 831 kfree(smid_pl); 832 return err; 833 } 834 835 static void 836 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, 837 struct mlxsw_sp_bridge_device *bridge_device, 838 bool add) 839 { 840 struct mlxsw_sp_mid *mid; 841 842 list_for_each_entry(mid, &bridge_device->mids_list, list) 843 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); 844 } 845 846 static int 847 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, 848 struct switchdev_trans *trans, 849 struct net_device *orig_dev, 850 bool is_mrouter) 851 { 852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 853 struct mlxsw_sp_bridge_device *bridge_device; 854 855 if (switchdev_trans_ph_prepare(trans)) 856 return 0; 857 858 /* It's possible we failed to enslave the port, yet this 859 * operation is executed due to it being deferred. 860 */ 861 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); 862 if (!bridge_device) 863 return 0; 864 865 if (bridge_device->mrouter != is_mrouter) 866 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device, 867 is_mrouter); 868 bridge_device->mrouter = is_mrouter; 869 return 0; 870 } 871 872 static int mlxsw_sp_port_attr_set(struct net_device *dev, 873 const struct switchdev_attr *attr, 874 struct switchdev_trans *trans) 875 { 876 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 877 int err; 878 879 switch (attr->id) { 880 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 881 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 882 attr->orig_dev, 883 attr->u.stp_state); 884 break; 885 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 886 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 887 attr->orig_dev, 888 attr->u.brport_flags); 889 break; 890 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 891 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 892 attr->u.ageing_time); 893 break; 894 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 895 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 896 attr->orig_dev, 897 attr->u.vlan_filtering); 898 break; 899 case SWITCHDEV_ATTR_ID_PORT_MROUTER: 900 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, 901 attr->orig_dev, 902 attr->u.mrouter); 903 break; 904 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 905 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, 906 attr->orig_dev, 907 attr->u.mc_disabled); 908 break; 909 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: 910 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, 911 attr->orig_dev, 912 attr->u.mrouter); 913 break; 914 default: 915 err = -EOPNOTSUPP; 916 break; 917 } 918 919 return err; 920 } 921 922 static int 923 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 924 struct mlxsw_sp_bridge_port *bridge_port) 925 { 926 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 927 struct mlxsw_sp_bridge_device *bridge_device; 928 u8 local_port = mlxsw_sp_port->local_port; 929 u16 vid = mlxsw_sp_port_vlan->vid; 930 struct mlxsw_sp_fid *fid; 931 int err; 932 933 bridge_device = bridge_port->bridge_device; 934 fid = bridge_device->ops->fid_get(bridge_device, vid); 935 if (IS_ERR(fid)) 936 return PTR_ERR(fid); 937 938 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, 939 bridge_port->flags & BR_FLOOD); 940 if (err) 941 goto err_fid_uc_flood_set; 942 943 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, 944 mlxsw_sp_mc_flood(bridge_port)); 945 if (err) 946 goto err_fid_mc_flood_set; 947 948 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, 949 true); 950 if (err) 951 goto err_fid_bc_flood_set; 952 953 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 954 if (err) 955 goto err_fid_port_vid_map; 956 957 mlxsw_sp_port_vlan->fid = fid; 958 959 return 0; 960 961 err_fid_port_vid_map: 962 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 963 err_fid_bc_flood_set: 964 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 965 err_fid_mc_flood_set: 966 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 967 err_fid_uc_flood_set: 968 mlxsw_sp_fid_put(fid); 969 return err; 970 } 971 972 static void 973 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 974 { 975 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 976 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 977 u8 local_port = mlxsw_sp_port->local_port; 978 u16 vid = mlxsw_sp_port_vlan->vid; 979 980 mlxsw_sp_port_vlan->fid = NULL; 981 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 982 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false); 983 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false); 984 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false); 985 mlxsw_sp_fid_put(fid); 986 } 987 988 static u16 989 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, 990 u16 vid, bool is_pvid) 991 { 992 if (is_pvid) 993 return vid; 994 else if (mlxsw_sp_port->pvid == vid) 995 return 0; /* Dis-allow untagged packets */ 996 else 997 return mlxsw_sp_port->pvid; 998 } 999 1000 static int 1001 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 1002 struct mlxsw_sp_bridge_port *bridge_port) 1003 { 1004 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1005 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1006 u16 vid = mlxsw_sp_port_vlan->vid; 1007 int err; 1008 1009 /* No need to continue if only VLAN flags were changed */ 1010 if (mlxsw_sp_port_vlan->bridge_port) 1011 return 0; 1012 1013 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); 1014 if (err) 1015 return err; 1016 1017 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, 1018 bridge_port->flags & BR_LEARNING); 1019 if (err) 1020 goto err_port_vid_learning_set; 1021 1022 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 1023 bridge_port->stp_state); 1024 if (err) 1025 goto err_port_vid_stp_set; 1026 1027 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid); 1028 if (!bridge_vlan) { 1029 err = -ENOMEM; 1030 goto err_bridge_vlan_get; 1031 } 1032 1033 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node, 1034 &bridge_vlan->port_vlan_list); 1035 1036 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, 1037 bridge_port->dev); 1038 mlxsw_sp_port_vlan->bridge_port = bridge_port; 1039 1040 return 0; 1041 1042 err_bridge_vlan_get: 1043 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1044 err_port_vid_stp_set: 1045 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1046 err_port_vid_learning_set: 1047 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1048 return err; 1049 } 1050 1051 void 1052 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1053 { 1054 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1055 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1056 struct mlxsw_sp_bridge_vlan *bridge_vlan; 1057 struct mlxsw_sp_bridge_port *bridge_port; 1058 u16 vid = mlxsw_sp_port_vlan->vid; 1059 bool last_port, last_vlan; 1060 1061 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && 1062 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) 1063 return; 1064 1065 bridge_port = mlxsw_sp_port_vlan->bridge_port; 1066 last_vlan = list_is_singular(&bridge_port->vlans_list); 1067 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 1068 last_port = list_is_singular(&bridge_vlan->port_vlan_list); 1069 1070 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); 1071 mlxsw_sp_bridge_vlan_put(bridge_vlan); 1072 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); 1073 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 1074 if (last_port) 1075 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, 1076 bridge_port, 1077 mlxsw_sp_fid_index(fid)); 1078 if (last_vlan) 1079 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); 1080 1081 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); 1082 1083 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); 1084 mlxsw_sp_port_vlan->bridge_port = NULL; 1085 } 1086 1087 static int 1088 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1089 struct mlxsw_sp_bridge_port *bridge_port, 1090 u16 vid, bool is_untagged, bool is_pvid) 1091 { 1092 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1093 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1094 u16 old_pvid = mlxsw_sp_port->pvid; 1095 int err; 1096 1097 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid); 1098 if (IS_ERR(mlxsw_sp_port_vlan)) 1099 return PTR_ERR(mlxsw_sp_port_vlan); 1100 1101 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, 1102 is_untagged); 1103 if (err) 1104 goto err_port_vlan_set; 1105 1106 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1107 if (err) 1108 goto err_port_pvid_set; 1109 1110 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); 1111 if (err) 1112 goto err_port_vlan_bridge_join; 1113 1114 return 0; 1115 1116 err_port_vlan_bridge_join: 1117 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 1118 err_port_pvid_set: 1119 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1120 err_port_vlan_set: 1121 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1122 return err; 1123 } 1124 1125 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 1126 const struct switchdev_obj_port_vlan *vlan, 1127 struct switchdev_trans *trans) 1128 { 1129 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1130 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1132 struct net_device *orig_dev = vlan->obj.orig_dev; 1133 struct mlxsw_sp_bridge_port *bridge_port; 1134 u16 vid; 1135 1136 if (switchdev_trans_ph_prepare(trans)) 1137 return 0; 1138 1139 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1140 if (WARN_ON(!bridge_port)) 1141 return -EINVAL; 1142 1143 if (!bridge_port->bridge_device->vlan_enabled) 1144 return 0; 1145 1146 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 1147 int err; 1148 1149 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1150 vid, flag_untagged, 1151 flag_pvid); 1152 if (err) 1153 return err; 1154 } 1155 1156 return 0; 1157 } 1158 1159 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) 1160 { 1161 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID : 1162 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID; 1163 } 1164 1165 static int 1166 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, 1167 struct mlxsw_sp_bridge_port *bridge_port, 1168 u16 fid_index) 1169 { 1170 bool lagged = bridge_port->lagged; 1171 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 1172 u16 system_port; 1173 1174 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port; 1175 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged)); 1176 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); 1177 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port); 1178 1179 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 1180 } 1181 1182 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1183 { 1184 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1185 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1186 } 1187 1188 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1189 { 1190 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 1191 MLXSW_REG_SFD_OP_WRITE_REMOVE; 1192 } 1193 1194 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1195 const char *mac, u16 fid, bool adding, 1196 enum mlxsw_reg_sfd_rec_action action, 1197 bool dynamic) 1198 { 1199 char *sfd_pl; 1200 int err; 1201 1202 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1203 if (!sfd_pl) 1204 return -ENOMEM; 1205 1206 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1207 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1208 mac, fid, action, local_port); 1209 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1210 kfree(sfd_pl); 1211 1212 return err; 1213 } 1214 1215 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1216 const char *mac, u16 fid, bool adding, 1217 bool dynamic) 1218 { 1219 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1220 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1221 } 1222 1223 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1224 bool adding) 1225 { 1226 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1227 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1228 false); 1229 } 1230 1231 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1232 const char *mac, u16 fid, u16 lag_vid, 1233 bool adding, bool dynamic) 1234 { 1235 char *sfd_pl; 1236 int err; 1237 1238 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1239 if (!sfd_pl) 1240 return -ENOMEM; 1241 1242 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1243 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1244 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1245 lag_vid, lag_id); 1246 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1247 kfree(sfd_pl); 1248 1249 return err; 1250 } 1251 1252 static int 1253 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, 1254 struct switchdev_notifier_fdb_info *fdb_info, bool adding) 1255 { 1256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1257 struct net_device *orig_dev = fdb_info->info.dev; 1258 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1259 struct mlxsw_sp_bridge_device *bridge_device; 1260 struct mlxsw_sp_bridge_port *bridge_port; 1261 u16 fid_index, vid; 1262 1263 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1264 if (!bridge_port) 1265 return -EINVAL; 1266 1267 bridge_device = bridge_port->bridge_device; 1268 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1269 bridge_device, 1270 fdb_info->vid); 1271 if (!mlxsw_sp_port_vlan) 1272 return 0; 1273 1274 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1275 vid = mlxsw_sp_port_vlan->vid; 1276 1277 if (!bridge_port->lagged) 1278 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 1279 bridge_port->system_port, 1280 fdb_info->addr, fid_index, 1281 adding, false); 1282 else 1283 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, 1284 bridge_port->lag_id, 1285 fdb_info->addr, fid_index, 1286 vid, adding, false); 1287 } 1288 1289 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 1290 u16 fid, u16 mid_idx, bool adding) 1291 { 1292 char *sfd_pl; 1293 int err; 1294 1295 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1296 if (!sfd_pl) 1297 return -ENOMEM; 1298 1299 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1300 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1301 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1302 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1303 kfree(sfd_pl); 1304 return err; 1305 } 1306 1307 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, 1308 long *ports_bitmap, 1309 bool set_router_port) 1310 { 1311 char *smid_pl; 1312 int err, i; 1313 1314 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1315 if (!smid_pl) 1316 return -ENOMEM; 1317 1318 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); 1319 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { 1320 if (mlxsw_sp->ports[i]) 1321 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 1322 } 1323 1324 mlxsw_reg_smid_port_mask_set(smid_pl, 1325 mlxsw_sp_router_port(mlxsw_sp), 1); 1326 1327 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) 1328 mlxsw_reg_smid_port_set(smid_pl, i, 1); 1329 1330 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), 1331 set_router_port); 1332 1333 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1334 kfree(smid_pl); 1335 return err; 1336 } 1337 1338 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, 1339 u16 mid_idx, bool add) 1340 { 1341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1342 char *smid_pl; 1343 int err; 1344 1345 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1346 if (!smid_pl) 1347 return -ENOMEM; 1348 1349 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); 1350 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1351 kfree(smid_pl); 1352 return err; 1353 } 1354 1355 static struct 1356 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, 1357 const unsigned char *addr, 1358 u16 fid) 1359 { 1360 struct mlxsw_sp_mid *mid; 1361 1362 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1363 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 1364 return mid; 1365 } 1366 return NULL; 1367 } 1368 1369 static void 1370 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, 1371 struct mlxsw_sp_bridge_port *bridge_port, 1372 unsigned long *ports_bitmap) 1373 { 1374 struct mlxsw_sp_port *mlxsw_sp_port; 1375 u64 max_lag_members, i; 1376 int lag_id; 1377 1378 if (!bridge_port->lagged) { 1379 set_bit(bridge_port->system_port, ports_bitmap); 1380 } else { 1381 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1382 MAX_LAG_MEMBERS); 1383 lag_id = bridge_port->lag_id; 1384 for (i = 0; i < max_lag_members; i++) { 1385 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, 1386 lag_id, i); 1387 if (mlxsw_sp_port) 1388 set_bit(mlxsw_sp_port->local_port, 1389 ports_bitmap); 1390 } 1391 } 1392 } 1393 1394 static void 1395 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, 1396 struct mlxsw_sp_bridge_device *bridge_device, 1397 struct mlxsw_sp *mlxsw_sp) 1398 { 1399 struct mlxsw_sp_bridge_port *bridge_port; 1400 1401 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { 1402 if (bridge_port->mrouter) { 1403 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, 1404 bridge_port, 1405 flood_bitmap); 1406 } 1407 } 1408 } 1409 1410 static bool 1411 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1412 struct mlxsw_sp_mid *mid, 1413 struct mlxsw_sp_bridge_device *bridge_device) 1414 { 1415 long *flood_bitmap; 1416 int num_of_ports; 1417 int alloc_size; 1418 u16 mid_idx; 1419 int err; 1420 1421 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, 1422 MLXSW_SP_MID_MAX); 1423 if (mid_idx == MLXSW_SP_MID_MAX) 1424 return false; 1425 1426 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1427 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); 1428 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); 1429 if (!flood_bitmap) 1430 return false; 1431 1432 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); 1433 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); 1434 1435 mid->mid = mid_idx; 1436 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, 1437 bridge_device->mrouter); 1438 kfree(flood_bitmap); 1439 if (err) 1440 return false; 1441 1442 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, 1443 true); 1444 if (err) 1445 return false; 1446 1447 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); 1448 mid->in_hw = true; 1449 return true; 1450 } 1451 1452 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, 1453 struct mlxsw_sp_mid *mid) 1454 { 1455 if (!mid->in_hw) 1456 return 0; 1457 1458 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1459 mid->in_hw = false; 1460 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, 1461 false); 1462 } 1463 1464 static struct 1465 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 1466 struct mlxsw_sp_bridge_device *bridge_device, 1467 const unsigned char *addr, 1468 u16 fid) 1469 { 1470 struct mlxsw_sp_mid *mid; 1471 size_t alloc_size; 1472 1473 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 1474 if (!mid) 1475 return NULL; 1476 1477 alloc_size = sizeof(unsigned long) * 1478 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); 1479 1480 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); 1481 if (!mid->ports_in_mid) 1482 goto err_ports_in_mid_alloc; 1483 1484 ether_addr_copy(mid->addr, addr); 1485 mid->fid = fid; 1486 mid->in_hw = false; 1487 1488 if (!bridge_device->multicast_enabled) 1489 goto out; 1490 1491 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) 1492 goto err_write_mdb_entry; 1493 1494 out: 1495 list_add_tail(&mid->list, &bridge_device->mids_list); 1496 return mid; 1497 1498 err_write_mdb_entry: 1499 kfree(mid->ports_in_mid); 1500 err_ports_in_mid_alloc: 1501 kfree(mid); 1502 return NULL; 1503 } 1504 1505 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, 1506 struct mlxsw_sp_mid *mid) 1507 { 1508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1509 int err = 0; 1510 1511 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1512 if (bitmap_empty(mid->ports_in_mid, 1513 mlxsw_core_max_ports(mlxsw_sp->core))) { 1514 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1515 list_del(&mid->list); 1516 kfree(mid->ports_in_mid); 1517 kfree(mid); 1518 } 1519 return err; 1520 } 1521 1522 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 1523 const struct switchdev_obj_port_mdb *mdb, 1524 struct switchdev_trans *trans) 1525 { 1526 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1527 struct net_device *orig_dev = mdb->obj.orig_dev; 1528 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1529 struct net_device *dev = mlxsw_sp_port->dev; 1530 struct mlxsw_sp_bridge_device *bridge_device; 1531 struct mlxsw_sp_bridge_port *bridge_port; 1532 struct mlxsw_sp_mid *mid; 1533 u16 fid_index; 1534 int err = 0; 1535 1536 if (switchdev_trans_ph_prepare(trans)) 1537 return 0; 1538 1539 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1540 if (!bridge_port) 1541 return 0; 1542 1543 bridge_device = bridge_port->bridge_device; 1544 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1545 bridge_device, 1546 mdb->vid); 1547 if (!mlxsw_sp_port_vlan) 1548 return 0; 1549 1550 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1551 1552 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1553 if (!mid) { 1554 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, 1555 fid_index); 1556 if (!mid) { 1557 netdev_err(dev, "Unable to allocate MC group\n"); 1558 return -ENOMEM; 1559 } 1560 } 1561 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); 1562 1563 if (!bridge_device->multicast_enabled) 1564 return 0; 1565 1566 if (bridge_port->mrouter) 1567 return 0; 1568 1569 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); 1570 if (err) { 1571 netdev_err(dev, "Unable to set SMID\n"); 1572 goto err_out; 1573 } 1574 1575 return 0; 1576 1577 err_out: 1578 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1579 return err; 1580 } 1581 1582 static void 1583 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, 1584 struct mlxsw_sp_bridge_device 1585 *bridge_device) 1586 { 1587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1588 struct mlxsw_sp_mid *mid; 1589 bool mc_enabled; 1590 1591 mc_enabled = bridge_device->multicast_enabled; 1592 1593 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1594 if (mc_enabled) 1595 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, 1596 bridge_device); 1597 else 1598 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); 1599 } 1600 } 1601 1602 static void 1603 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, 1604 struct mlxsw_sp_bridge_port *bridge_port, 1605 bool add) 1606 { 1607 struct mlxsw_sp_bridge_device *bridge_device; 1608 struct mlxsw_sp_mid *mid; 1609 1610 bridge_device = bridge_port->bridge_device; 1611 1612 list_for_each_entry(mid, &bridge_device->mids_list, list) { 1613 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) 1614 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); 1615 } 1616 } 1617 1618 static int mlxsw_sp_port_obj_add(struct net_device *dev, 1619 const struct switchdev_obj *obj, 1620 struct switchdev_trans *trans) 1621 { 1622 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1623 int err = 0; 1624 1625 switch (obj->id) { 1626 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1627 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 1628 SWITCHDEV_OBJ_PORT_VLAN(obj), 1629 trans); 1630 break; 1631 case SWITCHDEV_OBJ_ID_PORT_MDB: 1632 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1633 SWITCHDEV_OBJ_PORT_MDB(obj), 1634 trans); 1635 break; 1636 default: 1637 err = -EOPNOTSUPP; 1638 break; 1639 } 1640 1641 return err; 1642 } 1643 1644 static void 1645 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1646 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1647 { 1648 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1649 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1650 1651 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1652 if (WARN_ON(!mlxsw_sp_port_vlan)) 1653 return; 1654 1655 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1656 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); 1657 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1658 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1659 } 1660 1661 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1662 const struct switchdev_obj_port_vlan *vlan) 1663 { 1664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1665 struct net_device *orig_dev = vlan->obj.orig_dev; 1666 struct mlxsw_sp_bridge_port *bridge_port; 1667 u16 vid; 1668 1669 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1670 if (WARN_ON(!bridge_port)) 1671 return -EINVAL; 1672 1673 if (!bridge_port->bridge_device->vlan_enabled) 1674 return 0; 1675 1676 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) 1677 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); 1678 1679 return 0; 1680 } 1681 1682 static int 1683 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1684 struct mlxsw_sp_bridge_port *bridge_port, 1685 struct mlxsw_sp_mid *mid) 1686 { 1687 struct net_device *dev = mlxsw_sp_port->dev; 1688 int err; 1689 1690 if (bridge_port->bridge_device->multicast_enabled) { 1691 if (bridge_port->bridge_device->multicast_enabled) { 1692 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, 1693 false); 1694 if (err) 1695 netdev_err(dev, "Unable to remove port from SMID\n"); 1696 } 1697 } 1698 1699 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1700 if (err) 1701 netdev_err(dev, "Unable to remove MC SFD\n"); 1702 1703 return err; 1704 } 1705 1706 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1707 const struct switchdev_obj_port_mdb *mdb) 1708 { 1709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1710 struct net_device *orig_dev = mdb->obj.orig_dev; 1711 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1712 struct mlxsw_sp_bridge_device *bridge_device; 1713 struct net_device *dev = mlxsw_sp_port->dev; 1714 struct mlxsw_sp_bridge_port *bridge_port; 1715 struct mlxsw_sp_mid *mid; 1716 u16 fid_index; 1717 1718 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1719 if (!bridge_port) 1720 return 0; 1721 1722 bridge_device = bridge_port->bridge_device; 1723 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1724 bridge_device, 1725 mdb->vid); 1726 if (!mlxsw_sp_port_vlan) 1727 return 0; 1728 1729 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1730 1731 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); 1732 if (!mid) { 1733 netdev_err(dev, "Unable to remove port from MC DB\n"); 1734 return -EINVAL; 1735 } 1736 1737 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); 1738 } 1739 1740 static void 1741 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1742 struct mlxsw_sp_bridge_port *bridge_port) 1743 { 1744 struct mlxsw_sp_bridge_device *bridge_device; 1745 struct mlxsw_sp_mid *mid, *tmp; 1746 1747 bridge_device = bridge_port->bridge_device; 1748 1749 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { 1750 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { 1751 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, 1752 mid); 1753 } else if (bridge_device->multicast_enabled && 1754 bridge_port->mrouter) { 1755 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1756 } 1757 } 1758 } 1759 1760 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1761 const struct switchdev_obj *obj) 1762 { 1763 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1764 int err = 0; 1765 1766 switch (obj->id) { 1767 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1768 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1769 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1770 break; 1771 case SWITCHDEV_OBJ_ID_PORT_MDB: 1772 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1773 SWITCHDEV_OBJ_PORT_MDB(obj)); 1774 break; 1775 default: 1776 err = -EOPNOTSUPP; 1777 break; 1778 } 1779 1780 return err; 1781 } 1782 1783 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1784 u16 lag_id) 1785 { 1786 struct mlxsw_sp_port *mlxsw_sp_port; 1787 u64 max_lag_members; 1788 int i; 1789 1790 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1791 MAX_LAG_MEMBERS); 1792 for (i = 0; i < max_lag_members; i++) { 1793 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1794 if (mlxsw_sp_port) 1795 return mlxsw_sp_port; 1796 } 1797 return NULL; 1798 } 1799 1800 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1801 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1802 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1803 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1804 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1805 }; 1806 1807 static int 1808 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1809 struct mlxsw_sp_bridge_port *bridge_port, 1810 struct mlxsw_sp_port *mlxsw_sp_port, 1811 struct netlink_ext_ack *extack) 1812 { 1813 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1814 1815 if (is_vlan_dev(bridge_port->dev)) { 1816 NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge"); 1817 return -EINVAL; 1818 } 1819 1820 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 1821 if (WARN_ON(!mlxsw_sp_port_vlan)) 1822 return -EINVAL; 1823 1824 /* Let VLAN-aware bridge take care of its own VLANs */ 1825 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1826 1827 return 0; 1828 } 1829 1830 static void 1831 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 1832 struct mlxsw_sp_bridge_port *bridge_port, 1833 struct mlxsw_sp_port *mlxsw_sp_port) 1834 { 1835 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 1836 /* Make sure untagged frames are allowed to ingress */ 1837 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 1838 } 1839 1840 static struct mlxsw_sp_fid * 1841 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 1842 u16 vid) 1843 { 1844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 1845 1846 return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); 1847 } 1848 1849 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { 1850 .port_join = mlxsw_sp_bridge_8021q_port_join, 1851 .port_leave = mlxsw_sp_bridge_8021q_port_leave, 1852 .fid_get = mlxsw_sp_bridge_8021q_fid_get, 1853 }; 1854 1855 static bool 1856 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, 1857 const struct net_device *br_dev) 1858 { 1859 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1860 1861 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, 1862 list) { 1863 if (mlxsw_sp_port_vlan->bridge_port && 1864 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev == 1865 br_dev) 1866 return true; 1867 } 1868 1869 return false; 1870 } 1871 1872 static int 1873 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, 1874 struct mlxsw_sp_bridge_port *bridge_port, 1875 struct mlxsw_sp_port *mlxsw_sp_port, 1876 struct netlink_ext_ack *extack) 1877 { 1878 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1879 u16 vid; 1880 1881 if (!is_vlan_dev(bridge_port->dev)) { 1882 NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge"); 1883 return -EINVAL; 1884 } 1885 vid = vlan_dev_vlan_id(bridge_port->dev); 1886 1887 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1888 if (WARN_ON(!mlxsw_sp_port_vlan)) 1889 return -EINVAL; 1890 1891 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { 1892 NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port"); 1893 return -EINVAL; 1894 } 1895 1896 /* Port is no longer usable as a router interface */ 1897 if (mlxsw_sp_port_vlan->fid) 1898 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1899 1900 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); 1901 } 1902 1903 static void 1904 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, 1905 struct mlxsw_sp_bridge_port *bridge_port, 1906 struct mlxsw_sp_port *mlxsw_sp_port) 1907 { 1908 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1909 u16 vid = vlan_dev_vlan_id(bridge_port->dev); 1910 1911 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1912 if (WARN_ON(!mlxsw_sp_port_vlan)) 1913 return; 1914 1915 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1916 } 1917 1918 static struct mlxsw_sp_fid * 1919 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, 1920 u16 vid) 1921 { 1922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); 1923 1924 return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); 1925 } 1926 1927 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { 1928 .port_join = mlxsw_sp_bridge_8021d_port_join, 1929 .port_leave = mlxsw_sp_bridge_8021d_port_leave, 1930 .fid_get = mlxsw_sp_bridge_8021d_fid_get, 1931 }; 1932 1933 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 1934 struct net_device *brport_dev, 1935 struct net_device *br_dev, 1936 struct netlink_ext_ack *extack) 1937 { 1938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1939 struct mlxsw_sp_bridge_device *bridge_device; 1940 struct mlxsw_sp_bridge_port *bridge_port; 1941 int err; 1942 1943 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); 1944 if (IS_ERR(bridge_port)) 1945 return PTR_ERR(bridge_port); 1946 bridge_device = bridge_port->bridge_device; 1947 1948 err = bridge_device->ops->port_join(bridge_device, bridge_port, 1949 mlxsw_sp_port, extack); 1950 if (err) 1951 goto err_port_join; 1952 1953 return 0; 1954 1955 err_port_join: 1956 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 1957 return err; 1958 } 1959 1960 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 1961 struct net_device *brport_dev, 1962 struct net_device *br_dev) 1963 { 1964 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1965 struct mlxsw_sp_bridge_device *bridge_device; 1966 struct mlxsw_sp_bridge_port *bridge_port; 1967 1968 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 1969 if (!bridge_device) 1970 return; 1971 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); 1972 if (!bridge_port) 1973 return; 1974 1975 bridge_device->ops->port_leave(bridge_device, bridge_port, 1976 mlxsw_sp_port); 1977 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); 1978 } 1979 1980 static void 1981 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, 1982 const char *mac, u16 vid, 1983 struct net_device *dev) 1984 { 1985 struct switchdev_notifier_fdb_info info; 1986 1987 info.addr = mac; 1988 info.vid = vid; 1989 call_switchdev_notifiers(type, dev, &info.info); 1990 } 1991 1992 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1993 char *sfn_pl, int rec_index, 1994 bool adding) 1995 { 1996 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1997 struct mlxsw_sp_bridge_device *bridge_device; 1998 struct mlxsw_sp_bridge_port *bridge_port; 1999 struct mlxsw_sp_port *mlxsw_sp_port; 2000 enum switchdev_notifier_type type; 2001 char mac[ETH_ALEN]; 2002 u8 local_port; 2003 u16 vid, fid; 2004 bool do_notification = true; 2005 int err; 2006 2007 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 2008 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2009 if (!mlxsw_sp_port) { 2010 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 2011 goto just_remove; 2012 } 2013 2014 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2015 if (!mlxsw_sp_port_vlan) { 2016 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2017 goto just_remove; 2018 } 2019 2020 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2021 if (!bridge_port) { 2022 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2023 goto just_remove; 2024 } 2025 2026 bridge_device = bridge_port->bridge_device; 2027 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2028 2029 do_fdb_op: 2030 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 2031 adding, true); 2032 if (err) { 2033 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2034 return; 2035 } 2036 2037 if (!do_notification) 2038 return; 2039 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2040 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); 2041 2042 return; 2043 2044 just_remove: 2045 adding = false; 2046 do_notification = false; 2047 goto do_fdb_op; 2048 } 2049 2050 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 2051 char *sfn_pl, int rec_index, 2052 bool adding) 2053 { 2054 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2055 struct mlxsw_sp_bridge_device *bridge_device; 2056 struct mlxsw_sp_bridge_port *bridge_port; 2057 struct mlxsw_sp_port *mlxsw_sp_port; 2058 enum switchdev_notifier_type type; 2059 char mac[ETH_ALEN]; 2060 u16 lag_vid = 0; 2061 u16 lag_id; 2062 u16 vid, fid; 2063 bool do_notification = true; 2064 int err; 2065 2066 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 2067 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 2068 if (!mlxsw_sp_port) { 2069 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 2070 goto just_remove; 2071 } 2072 2073 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); 2074 if (!mlxsw_sp_port_vlan) { 2075 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); 2076 goto just_remove; 2077 } 2078 2079 bridge_port = mlxsw_sp_port_vlan->bridge_port; 2080 if (!bridge_port) { 2081 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); 2082 goto just_remove; 2083 } 2084 2085 bridge_device = bridge_port->bridge_device; 2086 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; 2087 lag_vid = mlxsw_sp_port_vlan->vid; 2088 2089 do_fdb_op: 2090 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 2091 adding, true); 2092 if (err) { 2093 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); 2094 return; 2095 } 2096 2097 if (!do_notification) 2098 return; 2099 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; 2100 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); 2101 2102 return; 2103 2104 just_remove: 2105 adding = false; 2106 do_notification = false; 2107 goto do_fdb_op; 2108 } 2109 2110 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 2111 char *sfn_pl, int rec_index) 2112 { 2113 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 2114 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 2115 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2116 rec_index, true); 2117 break; 2118 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 2119 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 2120 rec_index, false); 2121 break; 2122 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 2123 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2124 rec_index, true); 2125 break; 2126 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 2127 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 2128 rec_index, false); 2129 break; 2130 } 2131 } 2132 2133 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 2134 { 2135 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2136 2137 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, 2138 msecs_to_jiffies(bridge->fdb_notify.interval)); 2139 } 2140 2141 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 2142 { 2143 struct mlxsw_sp_bridge *bridge; 2144 struct mlxsw_sp *mlxsw_sp; 2145 char *sfn_pl; 2146 u8 num_rec; 2147 int i; 2148 int err; 2149 2150 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 2151 if (!sfn_pl) 2152 return; 2153 2154 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work); 2155 mlxsw_sp = bridge->mlxsw_sp; 2156 2157 rtnl_lock(); 2158 mlxsw_reg_sfn_pack(sfn_pl); 2159 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 2160 if (err) { 2161 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 2162 goto out; 2163 } 2164 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 2165 for (i = 0; i < num_rec; i++) 2166 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 2167 2168 out: 2169 rtnl_unlock(); 2170 kfree(sfn_pl); 2171 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2172 } 2173 2174 struct mlxsw_sp_switchdev_event_work { 2175 struct work_struct work; 2176 struct switchdev_notifier_fdb_info fdb_info; 2177 struct net_device *dev; 2178 unsigned long event; 2179 }; 2180 2181 static void mlxsw_sp_switchdev_event_work(struct work_struct *work) 2182 { 2183 struct mlxsw_sp_switchdev_event_work *switchdev_work = 2184 container_of(work, struct mlxsw_sp_switchdev_event_work, work); 2185 struct net_device *dev = switchdev_work->dev; 2186 struct switchdev_notifier_fdb_info *fdb_info; 2187 struct mlxsw_sp_port *mlxsw_sp_port; 2188 int err; 2189 2190 rtnl_lock(); 2191 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 2192 if (!mlxsw_sp_port) 2193 goto out; 2194 2195 switch (switchdev_work->event) { 2196 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2197 fdb_info = &switchdev_work->fdb_info; 2198 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); 2199 if (err) 2200 break; 2201 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, 2202 fdb_info->addr, 2203 fdb_info->vid, dev); 2204 break; 2205 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2206 fdb_info = &switchdev_work->fdb_info; 2207 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); 2208 break; 2209 } 2210 2211 out: 2212 rtnl_unlock(); 2213 kfree(switchdev_work->fdb_info.addr); 2214 kfree(switchdev_work); 2215 dev_put(dev); 2216 } 2217 2218 /* Called under rcu_read_lock() */ 2219 static int mlxsw_sp_switchdev_event(struct notifier_block *unused, 2220 unsigned long event, void *ptr) 2221 { 2222 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2223 struct mlxsw_sp_switchdev_event_work *switchdev_work; 2224 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2225 2226 if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) 2227 return NOTIFY_DONE; 2228 2229 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2230 if (!switchdev_work) 2231 return NOTIFY_BAD; 2232 2233 INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work); 2234 switchdev_work->dev = dev; 2235 switchdev_work->event = event; 2236 2237 switch (event) { 2238 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 2239 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2240 memcpy(&switchdev_work->fdb_info, ptr, 2241 sizeof(switchdev_work->fdb_info)); 2242 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2243 if (!switchdev_work->fdb_info.addr) 2244 goto err_addr_alloc; 2245 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2246 fdb_info->addr); 2247 /* Take a reference on the device. This can be either 2248 * upper device containig mlxsw_sp_port or just a 2249 * mlxsw_sp_port 2250 */ 2251 dev_hold(dev); 2252 break; 2253 default: 2254 kfree(switchdev_work); 2255 return NOTIFY_DONE; 2256 } 2257 2258 mlxsw_core_schedule_work(&switchdev_work->work); 2259 2260 return NOTIFY_DONE; 2261 2262 err_addr_alloc: 2263 kfree(switchdev_work); 2264 return NOTIFY_BAD; 2265 } 2266 2267 static struct notifier_block mlxsw_sp_switchdev_notifier = { 2268 .notifier_call = mlxsw_sp_switchdev_event, 2269 }; 2270 2271 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 2272 { 2273 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2274 int err; 2275 2276 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 2277 if (err) { 2278 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 2279 return err; 2280 } 2281 2282 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 2283 if (err) { 2284 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n"); 2285 return err; 2286 } 2287 2288 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 2289 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 2290 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 2291 return 0; 2292 } 2293 2294 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 2295 { 2296 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw); 2297 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); 2298 2299 } 2300 2301 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 2302 { 2303 struct mlxsw_sp_bridge *bridge; 2304 2305 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL); 2306 if (!bridge) 2307 return -ENOMEM; 2308 mlxsw_sp->bridge = bridge; 2309 bridge->mlxsw_sp = mlxsw_sp; 2310 2311 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); 2312 2313 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; 2314 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; 2315 2316 return mlxsw_sp_fdb_init(mlxsw_sp); 2317 } 2318 2319 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2320 { 2321 mlxsw_sp_fdb_fini(mlxsw_sp); 2322 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2323 kfree(mlxsw_sp->bridge); 2324 } 2325 2326 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 2327 { 2328 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 2329 } 2330 2331 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 2332 { 2333 } 2334