1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2021 Mellanox Technologies. */ 3 4 #include <linux/netdevice.h> 5 #include <linux/if_bridge.h> 6 #include <net/netevent.h> 7 #include <net/switchdev.h> 8 #include "bridge.h" 9 #include "esw/bridge.h" 10 #include "en_rep.h" 11 12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000 13 14 struct mlx5_bridge_switchdev_fdb_work { 15 struct work_struct work; 16 struct switchdev_notifier_fdb_info fdb_info; 17 struct net_device *dev; 18 struct mlx5_esw_bridge_offloads *br_offloads; 19 bool add; 20 }; 21 22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw) 23 { 24 struct mlx5e_priv *priv = netdev_priv(dev); 25 26 return esw == priv->mdev->priv.eswitch; 27 } 28 29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw) 30 { 31 struct mlx5e_priv *priv = netdev_priv(dev); 32 struct mlx5_core_dev *mdev, *esw_mdev; 33 u64 system_guid, esw_system_guid; 34 35 mdev = priv->mdev; 36 esw_mdev = esw->dev; 37 38 system_guid = mlx5_query_nic_system_image_guid(mdev); 39 esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev); 40 41 return system_guid == esw_system_guid; 42 } 43 44 static struct net_device * 45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) 46 { 47 struct net_device *lower; 48 struct list_head *iter; 49 50 netdev_for_each_lower_dev(dev, lower, iter) { 51 struct mlx5_core_dev *mdev; 52 struct mlx5e_priv *priv; 53 54 if (!mlx5e_eswitch_rep(lower)) 55 continue; 56 57 priv = netdev_priv(lower); 58 mdev = priv->mdev; 59 if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) 60 return lower; 61 } 62 63 return NULL; 64 } 65 66 static struct net_device * 67 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, 68 u16 *vport_num, u16 *esw_owner_vhca_id) 69 { 70 struct mlx5e_rep_priv *rpriv; 71 struct mlx5e_priv *priv; 72 73 if (netif_is_lag_master(dev)) 74 dev = mlx5_esw_bridge_lag_rep_get(dev, esw); 75 76 if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw)) 77 return NULL; 78 79 priv = netdev_priv(dev); 80 rpriv = priv->ppriv; 81 *vport_num = rpriv->rep->vport; 82 *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id); 83 return dev; 84 } 85 86 static struct net_device * 87 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, 88 u16 *vport_num, u16 *esw_owner_vhca_id) 89 { 90 struct net_device *lower_dev; 91 struct list_head *iter; 92 93 if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev)) 94 return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num, 95 esw_owner_vhca_id); 96 97 netdev_for_each_lower_dev(dev, lower_dev, iter) { 98 struct net_device *rep; 99 100 if (netif_is_bridge_master(lower_dev)) 101 continue; 102 103 rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num, 104 esw_owner_vhca_id); 105 if (rep) 106 return rep; 107 } 108 109 return NULL; 110 } 111 112 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep, 113 struct mlx5_eswitch *esw) 114 { 115 struct mlx5_core_dev *mdev; 116 struct mlx5e_priv *priv; 117 118 if (!mlx5_esw_bridge_dev_same_esw(rep, esw)) 119 return false; 120 121 priv = netdev_priv(rep); 122 mdev = priv->mdev; 123 if (netif_is_lag_master(dev)) 124 return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); 125 return true; 126 } 127 128 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr) 129 { 130 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, 131 struct mlx5_esw_bridge_offloads, 132 netdev_nb); 133 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 134 struct netdev_notifier_changeupper_info *info = ptr; 135 struct net_device *upper = info->upper_dev, *rep; 136 struct mlx5_eswitch *esw = br_offloads->esw; 137 u16 vport_num, esw_owner_vhca_id; 138 struct netlink_ext_ack *extack; 139 int ifindex = upper->ifindex; 140 int err = 0; 141 142 if (!netif_is_bridge_master(upper)) 143 return 0; 144 145 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); 146 if (!rep) 147 return 0; 148 149 extack = netdev_notifier_info_to_extack(&info->info); 150 151 if (mlx5_esw_bridge_is_local(dev, rep, esw)) 152 err = info->linking ? 153 mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id, 154 br_offloads, extack) : 155 mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, 156 br_offloads, extack); 157 else if (mlx5_esw_bridge_dev_same_hw(rep, esw)) 158 err = info->linking ? 159 mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id, 160 br_offloads, extack) : 161 mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id, 162 br_offloads, extack); 163 164 return err; 165 } 166 167 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, 168 unsigned long event, void *ptr) 169 { 170 int err = 0; 171 172 switch (event) { 173 case NETDEV_PRECHANGEUPPER: 174 break; 175 176 case NETDEV_CHANGEUPPER: 177 err = mlx5_esw_bridge_port_changeupper(nb, ptr); 178 break; 179 } 180 181 return notifier_from_errno(err); 182 } 183 184 static int 185 mlx5_esw_bridge_port_obj_add(struct net_device *dev, 186 struct switchdev_notifier_port_obj_info *port_obj_info, 187 struct mlx5_esw_bridge_offloads *br_offloads) 188 { 189 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info); 190 const struct switchdev_obj *obj = port_obj_info->obj; 191 const struct switchdev_obj_port_vlan *vlan; 192 u16 vport_num, esw_owner_vhca_id; 193 int err; 194 195 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, 196 &esw_owner_vhca_id)) 197 return 0; 198 199 port_obj_info->handled = true; 200 201 switch (obj->id) { 202 case SWITCHDEV_OBJ_ID_PORT_VLAN: 203 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 204 err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid, 205 vlan->flags, br_offloads, extack); 206 break; 207 default: 208 return -EOPNOTSUPP; 209 } 210 return err; 211 } 212 213 static int 214 mlx5_esw_bridge_port_obj_del(struct net_device *dev, 215 struct switchdev_notifier_port_obj_info *port_obj_info, 216 struct mlx5_esw_bridge_offloads *br_offloads) 217 { 218 const struct switchdev_obj *obj = port_obj_info->obj; 219 const struct switchdev_obj_port_vlan *vlan; 220 u16 vport_num, esw_owner_vhca_id; 221 222 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, 223 &esw_owner_vhca_id)) 224 return 0; 225 226 port_obj_info->handled = true; 227 228 switch (obj->id) { 229 case SWITCHDEV_OBJ_ID_PORT_VLAN: 230 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 231 mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads); 232 break; 233 default: 234 return -EOPNOTSUPP; 235 } 236 return 0; 237 } 238 239 static int 240 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, 241 struct switchdev_notifier_port_attr_info *port_attr_info, 242 struct mlx5_esw_bridge_offloads *br_offloads) 243 { 244 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); 245 const struct switchdev_attr *attr = port_attr_info->attr; 246 u16 vport_num, esw_owner_vhca_id; 247 int err = 0; 248 249 if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, 250 &esw_owner_vhca_id)) 251 return 0; 252 253 port_attr_info->handled = true; 254 255 switch (attr->id) { 256 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 257 if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) { 258 NL_SET_ERR_MSG_MOD(extack, "Flag is not supported"); 259 err = -EINVAL; 260 } 261 break; 262 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 263 break; 264 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 265 err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id, 266 attr->u.ageing_time, br_offloads); 267 break; 268 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 269 err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id, 270 attr->u.vlan_filtering, br_offloads); 271 break; 272 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL: 273 err = mlx5_esw_bridge_vlan_proto_set(vport_num, 274 esw_owner_vhca_id, 275 attr->u.vlan_protocol, 276 br_offloads); 277 break; 278 default: 279 err = -EOPNOTSUPP; 280 } 281 282 return err; 283 } 284 285 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb, 286 unsigned long event, void *ptr) 287 { 288 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, 289 struct mlx5_esw_bridge_offloads, 290 nb_blk); 291 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 292 int err; 293 294 switch (event) { 295 case SWITCHDEV_PORT_OBJ_ADD: 296 err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads); 297 break; 298 case SWITCHDEV_PORT_OBJ_DEL: 299 err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads); 300 break; 301 case SWITCHDEV_PORT_ATTR_SET: 302 err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); 303 break; 304 default: 305 err = 0; 306 } 307 308 return notifier_from_errno(err); 309 } 310 311 static void 312 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work) 313 { 314 dev_put(fdb_work->dev); 315 kfree(fdb_work->fdb_info.addr); 316 kfree(fdb_work); 317 } 318 319 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work) 320 { 321 struct mlx5_bridge_switchdev_fdb_work *fdb_work = 322 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work); 323 struct switchdev_notifier_fdb_info *fdb_info = 324 &fdb_work->fdb_info; 325 struct mlx5_esw_bridge_offloads *br_offloads = 326 fdb_work->br_offloads; 327 struct net_device *dev = fdb_work->dev; 328 u16 vport_num, esw_owner_vhca_id; 329 330 rtnl_lock(); 331 332 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, 333 &esw_owner_vhca_id)) 334 goto out; 335 336 if (fdb_work->add) 337 mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads, 338 fdb_info); 339 else 340 mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads, 341 fdb_info); 342 343 out: 344 rtnl_unlock(); 345 mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work); 346 } 347 348 static struct mlx5_bridge_switchdev_fdb_work * 349 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add, 350 struct switchdev_notifier_fdb_info *fdb_info, 351 struct mlx5_esw_bridge_offloads *br_offloads) 352 { 353 struct mlx5_bridge_switchdev_fdb_work *work; 354 u8 *addr; 355 356 work = kzalloc(sizeof(*work), GFP_ATOMIC); 357 if (!work) 358 return ERR_PTR(-ENOMEM); 359 360 INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work); 361 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 362 363 addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 364 if (!addr) { 365 kfree(work); 366 return ERR_PTR(-ENOMEM); 367 } 368 ether_addr_copy(addr, fdb_info->addr); 369 work->fdb_info.addr = addr; 370 371 dev_hold(dev); 372 work->dev = dev; 373 work->br_offloads = br_offloads; 374 work->add = add; 375 return work; 376 } 377 378 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, 379 unsigned long event, void *ptr) 380 { 381 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, 382 struct mlx5_esw_bridge_offloads, 383 nb); 384 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 385 struct switchdev_notifier_fdb_info *fdb_info; 386 struct mlx5_bridge_switchdev_fdb_work *work; 387 struct mlx5_eswitch *esw = br_offloads->esw; 388 struct switchdev_notifier_info *info = ptr; 389 u16 vport_num, esw_owner_vhca_id; 390 struct net_device *upper, *rep; 391 392 if (event == SWITCHDEV_PORT_ATTR_SET) { 393 int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); 394 395 return notifier_from_errno(err); 396 } 397 398 upper = netdev_master_upper_dev_get_rcu(dev); 399 if (!upper) 400 return NOTIFY_DONE; 401 if (!netif_is_bridge_master(upper)) 402 return NOTIFY_DONE; 403 404 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); 405 if (!rep) 406 return NOTIFY_DONE; 407 408 switch (event) { 409 case SWITCHDEV_FDB_ADD_TO_BRIDGE: 410 /* only handle the event on native eswtich of representor */ 411 if (!mlx5_esw_bridge_is_local(dev, rep, esw)) 412 break; 413 414 fdb_info = container_of(info, 415 struct switchdev_notifier_fdb_info, 416 info); 417 mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads, 418 fdb_info); 419 break; 420 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 421 /* only handle the event on peers */ 422 if (mlx5_esw_bridge_is_local(dev, rep, esw)) 423 break; 424 fallthrough; 425 case SWITCHDEV_FDB_ADD_TO_DEVICE: 426 case SWITCHDEV_FDB_DEL_TO_DEVICE: 427 fdb_info = container_of(info, 428 struct switchdev_notifier_fdb_info, 429 info); 430 431 work = mlx5_esw_bridge_init_switchdev_fdb_work(dev, 432 event == SWITCHDEV_FDB_ADD_TO_DEVICE, 433 fdb_info, 434 br_offloads); 435 if (IS_ERR(work)) { 436 WARN_ONCE(1, "Failed to init switchdev work, err=%ld", 437 PTR_ERR(work)); 438 return notifier_from_errno(PTR_ERR(work)); 439 } 440 441 queue_work(br_offloads->wq, &work->work); 442 break; 443 default: 444 break; 445 } 446 return NOTIFY_DONE; 447 } 448 449 static void mlx5_esw_bridge_update_work(struct work_struct *work) 450 { 451 struct mlx5_esw_bridge_offloads *br_offloads = container_of(work, 452 struct mlx5_esw_bridge_offloads, 453 update_work.work); 454 455 rtnl_lock(); 456 mlx5_esw_bridge_update(br_offloads); 457 rtnl_unlock(); 458 459 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 460 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 461 } 462 463 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) 464 { 465 struct mlx5_esw_bridge_offloads *br_offloads; 466 struct mlx5_core_dev *mdev = priv->mdev; 467 struct mlx5_eswitch *esw = 468 mdev->priv.eswitch; 469 int err; 470 471 rtnl_lock(); 472 br_offloads = mlx5_esw_bridge_init(esw); 473 rtnl_unlock(); 474 if (IS_ERR(br_offloads)) { 475 esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads)); 476 return; 477 } 478 479 br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0); 480 if (!br_offloads->wq) { 481 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n"); 482 goto err_alloc_wq; 483 } 484 485 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event; 486 err = register_switchdev_notifier(&br_offloads->nb); 487 if (err) { 488 esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err); 489 goto err_register_swdev; 490 } 491 492 br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking; 493 err = register_switchdev_blocking_notifier(&br_offloads->nb_blk); 494 if (err) { 495 esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err); 496 goto err_register_swdev_blk; 497 } 498 499 br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event; 500 err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); 501 if (err) { 502 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n", 503 err); 504 goto err_register_netdev; 505 } 506 INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work); 507 queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 508 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 509 return; 510 511 err_register_netdev: 512 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); 513 err_register_swdev_blk: 514 unregister_switchdev_notifier(&br_offloads->nb); 515 err_register_swdev: 516 destroy_workqueue(br_offloads->wq); 517 err_alloc_wq: 518 rtnl_lock(); 519 mlx5_esw_bridge_cleanup(esw); 520 rtnl_unlock(); 521 } 522 523 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) 524 { 525 struct mlx5_esw_bridge_offloads *br_offloads; 526 struct mlx5_core_dev *mdev = priv->mdev; 527 struct mlx5_eswitch *esw = 528 mdev->priv.eswitch; 529 530 br_offloads = esw->br_offloads; 531 if (!br_offloads) 532 return; 533 534 cancel_delayed_work_sync(&br_offloads->update_work); 535 unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); 536 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); 537 unregister_switchdev_notifier(&br_offloads->nb); 538 destroy_workqueue(br_offloads->wq); 539 rtnl_lock(); 540 mlx5_esw_bridge_cleanup(esw); 541 rtnl_unlock(); 542 } 543