1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <linux/rtnetlink.h> 49 #include <net/switchdev.h> 50 51 #include "spectrum.h" 52 #include "core.h" 53 #include "reg.h" 54 55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 56 u16 vid) 57 { 58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); 59 u16 fid = vid; 60 61 fid = f ? f->fid : fid; 62 63 if (!fid) 64 fid = mlxsw_sp_port->pvid; 65 66 return fid; 67 } 68 69 static struct mlxsw_sp_port * 70 mlxsw_sp_port_orig_get(struct net_device *dev, 71 struct mlxsw_sp_port *mlxsw_sp_port) 72 { 73 struct mlxsw_sp_port *mlxsw_sp_vport; 74 struct mlxsw_sp_fid *fid; 75 u16 vid; 76 77 if (netif_is_bridge_master(dev)) { 78 fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp, 79 dev); 80 if (fid) { 81 mlxsw_sp_vport = 82 mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, 83 fid->fid); 84 WARN_ON(!mlxsw_sp_vport); 85 return mlxsw_sp_vport; 86 } 87 } 88 89 if (!is_vlan_dev(dev)) 90 return mlxsw_sp_port; 91 92 vid = vlan_dev_vlan_id(dev); 93 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 94 WARN_ON(!mlxsw_sp_vport); 95 96 return mlxsw_sp_vport; 97 } 98 99 static int mlxsw_sp_port_attr_get(struct net_device *dev, 100 struct switchdev_attr *attr) 101 { 102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 104 105 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 106 if (!mlxsw_sp_port) 107 return -EINVAL; 108 109 switch (attr->id) { 110 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 111 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 112 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 113 attr->u.ppid.id_len); 114 break; 115 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 116 attr->u.brport_flags = 117 (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 118 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | 119 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); 120 break; 121 default: 122 return -EOPNOTSUPP; 123 } 124 125 return 0; 126 } 127 128 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 129 u8 state) 130 { 131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 132 enum mlxsw_reg_spms_state spms_state; 133 char *spms_pl; 134 u16 vid; 135 int err; 136 137 switch (state) { 138 case BR_STATE_FORWARDING: 139 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 140 break; 141 case BR_STATE_LEARNING: 142 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 143 break; 144 case BR_STATE_LISTENING: /* fall-through */ 145 case BR_STATE_DISABLED: /* fall-through */ 146 case BR_STATE_BLOCKING: 147 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 148 break; 149 default: 150 BUG(); 151 } 152 153 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 154 if (!spms_pl) 155 return -ENOMEM; 156 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 157 158 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 159 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 160 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 161 } else { 162 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 163 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 164 } 165 166 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 167 kfree(spms_pl); 168 return err; 169 } 170 171 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 172 struct switchdev_trans *trans, 173 u8 state) 174 { 175 if (switchdev_trans_ph_prepare(trans)) 176 return 0; 177 178 mlxsw_sp_port->stp_state = state; 179 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 180 } 181 182 static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 183 u16 idx_begin, u16 idx_end, 184 enum mlxsw_sp_flood_table table, 185 bool set) 186 { 187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 188 u16 local_port = mlxsw_sp_port->local_port; 189 enum mlxsw_flood_table_type table_type; 190 u16 range = idx_end - idx_begin + 1; 191 char *sftr_pl; 192 int err; 193 194 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 196 else 197 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 198 199 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 200 if (!sftr_pl) 201 return -ENOMEM; 202 203 mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin, 204 table_type, range, local_port, set); 205 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 206 207 kfree(sftr_pl); 208 return err; 209 } 210 211 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 212 u16 idx_begin, u16 idx_end, bool uc_set, 213 bool bc_set, bool mc_set) 214 { 215 int err; 216 217 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, 218 MLXSW_SP_FLOOD_TABLE_UC, uc_set); 219 if (err) 220 return err; 221 222 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, 223 MLXSW_SP_FLOOD_TABLE_BC, bc_set); 224 if (err) 225 goto err_flood_bm_set; 226 227 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, 228 MLXSW_SP_FLOOD_TABLE_MC, mc_set); 229 if (err) 230 goto err_flood_mc_set; 231 return 0; 232 233 err_flood_mc_set: 234 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, 235 MLXSW_SP_FLOOD_TABLE_BC, !bc_set); 236 err_flood_bm_set: 237 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, 238 MLXSW_SP_FLOOD_TABLE_UC, !uc_set); 239 return err; 240 } 241 242 static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, 243 enum mlxsw_sp_flood_table table, 244 bool set) 245 { 246 struct net_device *dev = mlxsw_sp_port->dev; 247 u16 vid, last_visited_vid; 248 int err; 249 250 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 251 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; 252 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 253 254 return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid, 255 vfid, table, set); 256 } 257 258 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 259 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, 260 table, set); 261 if (err) { 262 last_visited_vid = vid; 263 goto err_port_flood_set; 264 } 265 } 266 267 return 0; 268 269 err_port_flood_set: 270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 271 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table, 272 !set); 273 netdev_err(dev, "Failed to configure unicast flooding\n"); 274 return err; 275 } 276 277 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, 278 struct switchdev_trans *trans, 279 bool mc_disabled) 280 { 281 int set; 282 int err = 0; 283 284 if (switchdev_trans_ph_prepare(trans)) 285 return 0; 286 287 if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) { 288 set = mc_disabled ? 289 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; 290 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, 291 MLXSW_SP_FLOOD_TABLE_MC, 292 set); 293 } 294 295 if (!err) 296 mlxsw_sp_port->mc_disabled = mc_disabled; 297 298 return err; 299 } 300 301 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 302 bool set) 303 { 304 bool mc_set = set; 305 u16 vfid; 306 307 /* In case of vFIDs, index into the flooding table is relative to 308 * the start of the vFIDs range. 309 */ 310 vfid = mlxsw_sp_fid_to_vfid(fid); 311 312 if (set) 313 mc_set = mlxsw_sp_vport->mc_disabled ? 314 mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router; 315 316 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set, 317 mc_set); 318 } 319 320 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 321 bool set) 322 { 323 u16 vid; 324 int err; 325 326 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 327 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 328 329 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, 330 set); 331 } 332 333 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 334 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, 335 set); 336 if (err) 337 goto err_port_vid_learning_set; 338 } 339 340 return 0; 341 342 err_port_vid_learning_set: 343 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 344 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set); 345 return err; 346 } 347 348 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 349 struct switchdev_trans *trans, 350 unsigned long brport_flags) 351 { 352 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0; 353 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; 354 int err; 355 356 if (!mlxsw_sp_port->bridged) 357 return -EINVAL; 358 359 if (switchdev_trans_ph_prepare(trans)) 360 return 0; 361 362 if ((uc_flood ^ brport_flags) & BR_FLOOD) { 363 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, 364 MLXSW_SP_FLOOD_TABLE_UC, 365 !mlxsw_sp_port->uc_flood); 366 if (err) 367 return err; 368 } 369 370 if ((learning ^ brport_flags) & BR_LEARNING) { 371 err = mlxsw_sp_port_learning_set(mlxsw_sp_port, 372 !mlxsw_sp_port->learning); 373 if (err) 374 goto err_port_learning_set; 375 } 376 377 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; 378 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 379 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 380 381 return 0; 382 383 err_port_learning_set: 384 if ((uc_flood ^ brport_flags) & BR_FLOOD) 385 mlxsw_sp_port_flood_table_set(mlxsw_sp_port, 386 MLXSW_SP_FLOOD_TABLE_UC, 387 mlxsw_sp_port->uc_flood); 388 return err; 389 } 390 391 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 392 { 393 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 394 int err; 395 396 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 397 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 398 if (err) 399 return err; 400 mlxsw_sp->ageing_time = ageing_time; 401 return 0; 402 } 403 404 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 405 struct switchdev_trans *trans, 406 unsigned long ageing_clock_t) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 410 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 411 412 if (switchdev_trans_ph_prepare(trans)) { 413 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 414 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 415 return -ERANGE; 416 else 417 return 0; 418 } 419 420 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 421 } 422 423 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 424 struct switchdev_trans *trans, 425 struct net_device *orig_dev, 426 bool vlan_enabled) 427 { 428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 429 430 /* SWITCHDEV_TRANS_PREPARE phase */ 431 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { 432 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); 433 return -EINVAL; 434 } 435 436 return 0; 437 } 438 439 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, 440 struct switchdev_trans *trans, 441 bool is_port_mc_router) 442 { 443 if (switchdev_trans_ph_prepare(trans)) 444 return 0; 445 446 mlxsw_sp_port->mc_router = is_port_mc_router; 447 if (!mlxsw_sp_port->mc_disabled) 448 return mlxsw_sp_port_flood_table_set(mlxsw_sp_port, 449 MLXSW_SP_FLOOD_TABLE_MC, 450 is_port_mc_router); 451 452 return 0; 453 } 454 455 static int mlxsw_sp_port_attr_set(struct net_device *dev, 456 const struct switchdev_attr *attr, 457 struct switchdev_trans *trans) 458 { 459 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 460 int err = 0; 461 462 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 463 if (!mlxsw_sp_port) 464 return -EINVAL; 465 466 switch (attr->id) { 467 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 468 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 469 attr->u.stp_state); 470 break; 471 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 472 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 473 attr->u.brport_flags); 474 break; 475 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 476 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 477 attr->u.ageing_time); 478 break; 479 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 480 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 481 attr->orig_dev, 482 attr->u.vlan_filtering); 483 break; 484 case SWITCHDEV_ATTR_ID_PORT_MROUTER: 485 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, 486 attr->u.mrouter); 487 break; 488 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: 489 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, 490 attr->u.mc_disabled); 491 break; 492 default: 493 err = -EOPNOTSUPP; 494 break; 495 } 496 497 return err; 498 } 499 500 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 501 { 502 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 503 504 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 506 } 507 508 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid) 509 { 510 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 511 char svfa_pl[MLXSW_REG_SVFA_LEN]; 512 513 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid); 514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 515 } 516 517 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) 518 { 519 struct mlxsw_sp_fid *f; 520 521 f = kzalloc(sizeof(*f), GFP_KERNEL); 522 if (!f) 523 return NULL; 524 525 f->fid = fid; 526 527 return f; 528 } 529 530 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 531 { 532 struct mlxsw_sp_fid *f; 533 int err; 534 535 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true); 536 if (err) 537 return ERR_PTR(err); 538 539 /* Although all the ports member in the FID might be using a 540 * {Port, VID} to FID mapping, we create a global VID-to-FID 541 * mapping. This allows a port to transition to VLAN mode, 542 * knowing the global mapping exists. 543 */ 544 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true); 545 if (err) 546 goto err_fid_map; 547 548 f = mlxsw_sp_fid_alloc(fid); 549 if (!f) { 550 err = -ENOMEM; 551 goto err_allocate_fid; 552 } 553 554 list_add(&f->list, &mlxsw_sp->fids); 555 556 return f; 557 558 err_allocate_fid: 559 mlxsw_sp_fid_map(mlxsw_sp, fid, false); 560 err_fid_map: 561 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 562 return ERR_PTR(err); 563 } 564 565 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) 566 { 567 u16 fid = f->fid; 568 569 list_del(&f->list); 570 571 if (f->r) 572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 573 574 kfree(f); 575 576 mlxsw_sp_fid_map(mlxsw_sp, fid, false); 577 578 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 579 } 580 581 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, 582 u16 fid) 583 { 584 struct mlxsw_sp_fid *f; 585 586 if (test_bit(fid, mlxsw_sp_port->active_vlans)) 587 return 0; 588 589 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 590 if (!f) { 591 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); 592 if (IS_ERR(f)) 593 return PTR_ERR(f); 594 } 595 596 f->ref_count++; 597 598 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid); 599 600 return 0; 601 } 602 603 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, 604 u16 fid) 605 { 606 struct mlxsw_sp_fid *f; 607 608 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 609 if (WARN_ON(!f)) 610 return; 611 612 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid); 613 614 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid); 615 616 if (--f->ref_count == 0) 617 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f); 618 } 619 620 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, 621 bool valid) 622 { 623 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 624 625 /* If port doesn't have vPorts, then it can use the global 626 * VID-to-FID mapping. 627 */ 628 if (list_empty(&mlxsw_sp_port->vports_list)) 629 return 0; 630 631 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid); 632 } 633 634 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, 635 u16 fid_begin, u16 fid_end) 636 { 637 bool mc_flood; 638 int fid, err; 639 640 for (fid = fid_begin; fid <= fid_end; fid++) { 641 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid); 642 if (err) 643 goto err_port_fid_join; 644 } 645 646 mc_flood = mlxsw_sp_port->mc_disabled ? 647 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; 648 649 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, 650 mlxsw_sp_port->uc_flood, true, 651 mc_flood); 652 if (err) 653 goto err_port_flood_set; 654 655 for (fid = fid_begin; fid <= fid_end; fid++) { 656 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true); 657 if (err) 658 goto err_port_fid_map; 659 } 660 661 return 0; 662 663 err_port_fid_map: 664 for (fid--; fid >= fid_begin; fid--) 665 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); 666 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, 667 false, false); 668 err_port_flood_set: 669 fid = fid_end; 670 err_port_fid_join: 671 for (fid--; fid >= fid_begin; fid--) 672 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); 673 return err; 674 } 675 676 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, 677 u16 fid_begin, u16 fid_end) 678 { 679 int fid; 680 681 for (fid = fid_begin; fid <= fid_end; fid++) 682 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); 683 684 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, 685 false, false); 686 687 for (fid = fid_begin; fid <= fid_end; fid++) 688 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); 689 } 690 691 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 692 u16 vid) 693 { 694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 695 char spvid_pl[MLXSW_REG_SPVID_LEN]; 696 697 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 699 } 700 701 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 702 bool allow) 703 { 704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 705 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 706 707 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 709 } 710 711 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 712 { 713 struct net_device *dev = mlxsw_sp_port->dev; 714 int err; 715 716 if (!vid) { 717 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 718 if (err) { 719 netdev_err(dev, "Failed to disallow untagged traffic\n"); 720 return err; 721 } 722 } else { 723 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 724 if (err) { 725 netdev_err(dev, "Failed to set PVID\n"); 726 return err; 727 } 728 729 /* Only allow if not already allowed. */ 730 if (!mlxsw_sp_port->pvid) { 731 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, 732 true); 733 if (err) { 734 netdev_err(dev, "Failed to allow untagged traffic\n"); 735 goto err_port_allow_untagged_set; 736 } 737 } 738 } 739 740 mlxsw_sp_port->pvid = vid; 741 return 0; 742 743 err_port_allow_untagged_set: 744 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 745 return err; 746 } 747 748 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 749 u16 vid_begin, u16 vid_end, bool is_member, 750 bool untagged) 751 { 752 u16 vid, vid_e; 753 int err; 754 755 for (vid = vid_begin; vid <= vid_end; 756 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 757 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 758 vid_end); 759 760 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 761 is_member, untagged); 762 if (err) 763 return err; 764 } 765 766 return 0; 767 } 768 769 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 770 u16 vid_begin, u16 vid_end, 771 bool learn_enable) 772 { 773 u16 vid, vid_e; 774 int err; 775 776 for (vid = vid_begin; vid <= vid_end; 777 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) { 778 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1), 779 vid_end); 780 781 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, 782 vid_e, learn_enable); 783 if (err) 784 return err; 785 } 786 787 return 0; 788 } 789 790 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 791 u16 vid_begin, u16 vid_end, 792 bool flag_untagged, bool flag_pvid) 793 { 794 struct net_device *dev = mlxsw_sp_port->dev; 795 u16 vid, old_pvid; 796 int err; 797 798 if (!mlxsw_sp_port->bridged) 799 return -EINVAL; 800 801 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end); 802 if (err) { 803 netdev_err(dev, "Failed to join FIDs\n"); 804 return err; 805 } 806 807 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 808 true, flag_untagged); 809 if (err) { 810 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, 811 vid_end); 812 goto err_port_vlans_set; 813 } 814 815 old_pvid = mlxsw_sp_port->pvid; 816 if (flag_pvid && old_pvid != vid_begin) { 817 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); 818 if (err) { 819 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 820 goto err_port_pvid_set; 821 } 822 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) { 823 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 824 if (err) { 825 netdev_err(dev, "Unable to del PVID\n"); 826 goto err_port_pvid_set; 827 } 828 } 829 830 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, 831 mlxsw_sp_port->learning); 832 if (err) { 833 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n", 834 vid_begin, vid_end); 835 goto err_port_vid_learning_set; 836 } 837 838 /* Changing activity bits only if HW operation succeded */ 839 for (vid = vid_begin; vid <= vid_end; vid++) { 840 set_bit(vid, mlxsw_sp_port->active_vlans); 841 if (flag_untagged) 842 set_bit(vid, mlxsw_sp_port->untagged_vlans); 843 else 844 clear_bit(vid, mlxsw_sp_port->untagged_vlans); 845 } 846 847 /* STP state change must be done after we set active VLANs */ 848 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 849 mlxsw_sp_port->stp_state); 850 if (err) { 851 netdev_err(dev, "Failed to set STP state\n"); 852 goto err_port_stp_state_set; 853 } 854 855 return 0; 856 857 err_port_stp_state_set: 858 for (vid = vid_begin; vid <= vid_end; vid++) 859 clear_bit(vid, mlxsw_sp_port->active_vlans); 860 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, 861 false); 862 err_port_vid_learning_set: 863 if (old_pvid != mlxsw_sp_port->pvid) 864 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 865 err_port_pvid_set: 866 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 867 false); 868 err_port_vlans_set: 869 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 870 return err; 871 } 872 873 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 874 const struct switchdev_obj_port_vlan *vlan, 875 struct switchdev_trans *trans) 876 { 877 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 878 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 879 880 if (switchdev_trans_ph_prepare(trans)) 881 return 0; 882 883 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 884 vlan->vid_begin, vlan->vid_end, 885 flag_untagged, flag_pvid); 886 } 887 888 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 889 { 890 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 891 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 892 } 893 894 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 895 { 896 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 897 MLXSW_REG_SFD_OP_WRITE_REMOVE; 898 } 899 900 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 901 const char *mac, u16 fid, bool adding, 902 enum mlxsw_reg_sfd_rec_action action, 903 bool dynamic) 904 { 905 char *sfd_pl; 906 int err; 907 908 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 909 if (!sfd_pl) 910 return -ENOMEM; 911 912 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 913 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 914 mac, fid, action, local_port); 915 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 916 kfree(sfd_pl); 917 918 return err; 919 } 920 921 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 922 const char *mac, u16 fid, bool adding, 923 bool dynamic) 924 { 925 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 926 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 927 } 928 929 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 930 bool adding) 931 { 932 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 933 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 934 false); 935 } 936 937 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 938 const char *mac, u16 fid, u16 lag_vid, 939 bool adding, bool dynamic) 940 { 941 char *sfd_pl; 942 int err; 943 944 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 945 if (!sfd_pl) 946 return -ENOMEM; 947 948 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 949 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 950 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 951 lag_vid, lag_id); 952 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 953 kfree(sfd_pl); 954 955 return err; 956 } 957 958 static int 959 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 960 const struct switchdev_obj_port_fdb *fdb, 961 struct switchdev_trans *trans) 962 { 963 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 964 u16 lag_vid = 0; 965 966 if (switchdev_trans_ph_prepare(trans)) 967 return 0; 968 969 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 970 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 971 } 972 973 if (!mlxsw_sp_port->lagged) 974 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 975 mlxsw_sp_port->local_port, 976 fdb->addr, fid, true, false); 977 else 978 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 979 mlxsw_sp_port->lag_id, 980 fdb->addr, fid, lag_vid, 981 true, false); 982 } 983 984 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 985 u16 fid, u16 mid, bool adding) 986 { 987 char *sfd_pl; 988 int err; 989 990 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 991 if (!sfd_pl) 992 return -ENOMEM; 993 994 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 995 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 996 MLXSW_REG_SFD_REC_ACTION_NOP, mid); 997 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 998 kfree(sfd_pl); 999 return err; 1000 } 1001 1002 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, 1003 bool add, bool clear_all_ports) 1004 { 1005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1006 char *smid_pl; 1007 int err, i; 1008 1009 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 1010 if (!smid_pl) 1011 return -ENOMEM; 1012 1013 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); 1014 if (clear_all_ports) { 1015 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1016 if (mlxsw_sp->ports[i]) 1017 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 1018 } 1019 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 1020 kfree(smid_pl); 1021 return err; 1022 } 1023 1024 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 1025 const unsigned char *addr, 1026 u16 fid) 1027 { 1028 struct mlxsw_sp_mid *mid; 1029 1030 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 1031 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) 1032 return mid; 1033 } 1034 return NULL; 1035 } 1036 1037 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 1038 const unsigned char *addr, 1039 u16 fid) 1040 { 1041 struct mlxsw_sp_mid *mid; 1042 u16 mid_idx; 1043 1044 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, 1045 MLXSW_SP_MID_MAX); 1046 if (mid_idx == MLXSW_SP_MID_MAX) 1047 return NULL; 1048 1049 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 1050 if (!mid) 1051 return NULL; 1052 1053 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 1054 ether_addr_copy(mid->addr, addr); 1055 mid->fid = fid; 1056 mid->mid = mid_idx; 1057 mid->ref_count = 0; 1058 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 1059 1060 return mid; 1061 } 1062 1063 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, 1064 struct mlxsw_sp_mid *mid) 1065 { 1066 if (--mid->ref_count == 0) { 1067 list_del(&mid->list); 1068 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); 1069 kfree(mid); 1070 return 1; 1071 } 1072 return 0; 1073 } 1074 1075 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 1076 const struct switchdev_obj_port_mdb *mdb, 1077 struct switchdev_trans *trans) 1078 { 1079 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1080 struct net_device *dev = mlxsw_sp_port->dev; 1081 struct mlxsw_sp_mid *mid; 1082 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 1083 int err = 0; 1084 1085 if (switchdev_trans_ph_prepare(trans)) 1086 return 0; 1087 1088 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); 1089 if (!mid) { 1090 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); 1091 if (!mid) { 1092 netdev_err(dev, "Unable to allocate MC group\n"); 1093 return -ENOMEM; 1094 } 1095 } 1096 mid->ref_count++; 1097 1098 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, 1099 mid->ref_count == 1); 1100 if (err) { 1101 netdev_err(dev, "Unable to set SMID\n"); 1102 goto err_out; 1103 } 1104 1105 if (mid->ref_count == 1) { 1106 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, 1107 true); 1108 if (err) { 1109 netdev_err(dev, "Unable to set MC SFD\n"); 1110 goto err_out; 1111 } 1112 } 1113 1114 return 0; 1115 1116 err_out: 1117 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); 1118 return err; 1119 } 1120 1121 static int mlxsw_sp_port_obj_add(struct net_device *dev, 1122 const struct switchdev_obj *obj, 1123 struct switchdev_trans *trans) 1124 { 1125 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1126 int err = 0; 1127 1128 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1129 if (!mlxsw_sp_port) 1130 return -EINVAL; 1131 1132 switch (obj->id) { 1133 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1134 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1135 return 0; 1136 1137 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 1138 SWITCHDEV_OBJ_PORT_VLAN(obj), 1139 trans); 1140 break; 1141 case SWITCHDEV_OBJ_ID_PORT_FDB: 1142 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 1143 SWITCHDEV_OBJ_PORT_FDB(obj), 1144 trans); 1145 break; 1146 case SWITCHDEV_OBJ_ID_PORT_MDB: 1147 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1148 SWITCHDEV_OBJ_PORT_MDB(obj), 1149 trans); 1150 break; 1151 default: 1152 err = -EOPNOTSUPP; 1153 break; 1154 } 1155 1156 return err; 1157 } 1158 1159 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1160 u16 vid_begin, u16 vid_end) 1161 { 1162 u16 vid, pvid; 1163 1164 if (!mlxsw_sp_port->bridged) 1165 return -EINVAL; 1166 1167 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, 1168 false); 1169 1170 pvid = mlxsw_sp_port->pvid; 1171 if (pvid >= vid_begin && pvid <= vid_end) 1172 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 1173 1174 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 1175 false); 1176 1177 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 1178 1179 /* Changing activity bits only if HW operation succeded */ 1180 for (vid = vid_begin; vid <= vid_end; vid++) 1181 clear_bit(vid, mlxsw_sp_port->active_vlans); 1182 1183 return 0; 1184 } 1185 1186 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1187 const struct switchdev_obj_port_vlan *vlan) 1188 { 1189 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, 1190 vlan->vid_end); 1191 } 1192 1193 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 1194 { 1195 u16 vid; 1196 1197 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 1198 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); 1199 } 1200 1201 static int 1202 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 1203 const struct switchdev_obj_port_fdb *fdb) 1204 { 1205 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 1206 u16 lag_vid = 0; 1207 1208 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1209 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1210 } 1211 1212 if (!mlxsw_sp_port->lagged) 1213 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 1214 mlxsw_sp_port->local_port, 1215 fdb->addr, fid, 1216 false, false); 1217 else 1218 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 1219 mlxsw_sp_port->lag_id, 1220 fdb->addr, fid, lag_vid, 1221 false, false); 1222 } 1223 1224 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1225 const struct switchdev_obj_port_mdb *mdb) 1226 { 1227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1228 struct net_device *dev = mlxsw_sp_port->dev; 1229 struct mlxsw_sp_mid *mid; 1230 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 1231 u16 mid_idx; 1232 int err = 0; 1233 1234 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); 1235 if (!mid) { 1236 netdev_err(dev, "Unable to remove port from MC DB\n"); 1237 return -EINVAL; 1238 } 1239 1240 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); 1241 if (err) 1242 netdev_err(dev, "Unable to remove port from SMID\n"); 1243 1244 mid_idx = mid->mid; 1245 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { 1246 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, 1247 false); 1248 if (err) 1249 netdev_err(dev, "Unable to remove MC SFD\n"); 1250 } 1251 1252 return err; 1253 } 1254 1255 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1256 const struct switchdev_obj *obj) 1257 { 1258 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1259 int err = 0; 1260 1261 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1262 if (!mlxsw_sp_port) 1263 return -EINVAL; 1264 1265 switch (obj->id) { 1266 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1267 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1268 return 0; 1269 1270 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1271 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1272 break; 1273 case SWITCHDEV_OBJ_ID_PORT_FDB: 1274 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1275 SWITCHDEV_OBJ_PORT_FDB(obj)); 1276 break; 1277 case SWITCHDEV_OBJ_ID_PORT_MDB: 1278 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1279 SWITCHDEV_OBJ_PORT_MDB(obj)); 1280 break; 1281 default: 1282 err = -EOPNOTSUPP; 1283 break; 1284 } 1285 1286 return err; 1287 } 1288 1289 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1290 u16 lag_id) 1291 { 1292 struct mlxsw_sp_port *mlxsw_sp_port; 1293 u64 max_lag_members; 1294 int i; 1295 1296 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1297 MAX_LAG_MEMBERS); 1298 for (i = 0; i < max_lag_members; i++) { 1299 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1300 if (mlxsw_sp_port) 1301 return mlxsw_sp_port; 1302 } 1303 return NULL; 1304 } 1305 1306 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1307 struct switchdev_obj_port_fdb *fdb, 1308 switchdev_obj_dump_cb_t *cb, 1309 struct net_device *orig_dev) 1310 { 1311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1312 struct mlxsw_sp_port *tmp; 1313 struct mlxsw_sp_fid *f; 1314 u16 vport_fid; 1315 char *sfd_pl; 1316 char mac[ETH_ALEN]; 1317 u16 fid; 1318 u8 local_port; 1319 u16 lag_id; 1320 u8 num_rec; 1321 int stored_err = 0; 1322 int i; 1323 int err; 1324 1325 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1326 if (!sfd_pl) 1327 return -ENOMEM; 1328 1329 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); 1330 vport_fid = f ? f->fid : 0; 1331 1332 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1333 do { 1334 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 1335 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1336 if (err) 1337 goto out; 1338 1339 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1340 1341 /* Even in case of error, we have to run the dump to the end 1342 * so the session in firmware is finished. 1343 */ 1344 if (stored_err) 1345 continue; 1346 1347 for (i = 0; i < num_rec; i++) { 1348 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 1349 case MLXSW_REG_SFD_REC_TYPE_UNICAST: 1350 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1351 &local_port); 1352 if (local_port == mlxsw_sp_port->local_port) { 1353 if (vport_fid && vport_fid == fid) 1354 fdb->vid = 0; 1355 else if (!vport_fid && 1356 !mlxsw_sp_fid_is_vfid(fid)) 1357 fdb->vid = fid; 1358 else 1359 continue; 1360 ether_addr_copy(fdb->addr, mac); 1361 fdb->ndm_state = NUD_REACHABLE; 1362 err = cb(&fdb->obj); 1363 if (err) 1364 stored_err = err; 1365 } 1366 break; 1367 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1368 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1369 mac, &fid, &lag_id); 1370 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1371 if (tmp && tmp->local_port == 1372 mlxsw_sp_port->local_port) { 1373 /* LAG records can only point to LAG 1374 * devices or VLAN devices on top. 1375 */ 1376 if (!netif_is_lag_master(orig_dev) && 1377 !is_vlan_dev(orig_dev)) 1378 continue; 1379 if (vport_fid && vport_fid == fid) 1380 fdb->vid = 0; 1381 else if (!vport_fid && 1382 !mlxsw_sp_fid_is_vfid(fid)) 1383 fdb->vid = fid; 1384 else 1385 continue; 1386 ether_addr_copy(fdb->addr, mac); 1387 fdb->ndm_state = NUD_REACHABLE; 1388 err = cb(&fdb->obj); 1389 if (err) 1390 stored_err = err; 1391 } 1392 break; 1393 } 1394 } 1395 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1396 1397 out: 1398 kfree(sfd_pl); 1399 return stored_err ? stored_err : err; 1400 } 1401 1402 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1403 struct switchdev_obj_port_vlan *vlan, 1404 switchdev_obj_dump_cb_t *cb) 1405 { 1406 u16 vid; 1407 int err = 0; 1408 1409 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1410 vlan->flags = 0; 1411 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1412 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1413 return cb(&vlan->obj); 1414 } 1415 1416 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1417 vlan->flags = 0; 1418 if (vid == mlxsw_sp_port->pvid) 1419 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1420 if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) 1421 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1422 vlan->vid_begin = vid; 1423 vlan->vid_end = vid; 1424 err = cb(&vlan->obj); 1425 if (err) 1426 break; 1427 } 1428 return err; 1429 } 1430 1431 static int mlxsw_sp_port_obj_dump(struct net_device *dev, 1432 struct switchdev_obj *obj, 1433 switchdev_obj_dump_cb_t *cb) 1434 { 1435 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1436 int err = 0; 1437 1438 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1439 if (!mlxsw_sp_port) 1440 return -EINVAL; 1441 1442 switch (obj->id) { 1443 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1444 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 1445 SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 1446 break; 1447 case SWITCHDEV_OBJ_ID_PORT_FDB: 1448 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1449 SWITCHDEV_OBJ_PORT_FDB(obj), cb, 1450 obj->orig_dev); 1451 break; 1452 default: 1453 err = -EOPNOTSUPP; 1454 break; 1455 } 1456 1457 return err; 1458 } 1459 1460 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1461 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1462 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1463 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1464 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1465 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1466 }; 1467 1468 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, 1469 char *mac, u16 vid, 1470 struct net_device *dev) 1471 { 1472 struct switchdev_notifier_fdb_info info; 1473 unsigned long notifier_type; 1474 1475 if (learning_sync) { 1476 info.addr = mac; 1477 info.vid = vid; 1478 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1479 call_switchdev_notifiers(notifier_type, dev, &info.info); 1480 } 1481 } 1482 1483 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1484 char *sfn_pl, int rec_index, 1485 bool adding) 1486 { 1487 struct mlxsw_sp_port *mlxsw_sp_port; 1488 char mac[ETH_ALEN]; 1489 u8 local_port; 1490 u16 vid, fid; 1491 bool do_notification = true; 1492 int err; 1493 1494 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 1495 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1496 if (!mlxsw_sp_port) { 1497 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 1498 goto just_remove; 1499 } 1500 1501 if (mlxsw_sp_fid_is_vfid(fid)) { 1502 struct mlxsw_sp_port *mlxsw_sp_vport; 1503 1504 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, 1505 fid); 1506 if (!mlxsw_sp_vport) { 1507 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1508 goto just_remove; 1509 } 1510 vid = 0; 1511 /* Override the physical port with the vPort. */ 1512 mlxsw_sp_port = mlxsw_sp_vport; 1513 } else { 1514 vid = fid; 1515 } 1516 1517 do_fdb_op: 1518 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1519 adding, true); 1520 if (err) { 1521 if (net_ratelimit()) 1522 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1523 return; 1524 } 1525 1526 if (!do_notification) 1527 return; 1528 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, 1529 adding, mac, vid, mlxsw_sp_port->dev); 1530 return; 1531 1532 just_remove: 1533 adding = false; 1534 do_notification = false; 1535 goto do_fdb_op; 1536 } 1537 1538 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 1539 char *sfn_pl, int rec_index, 1540 bool adding) 1541 { 1542 struct mlxsw_sp_port *mlxsw_sp_port; 1543 struct net_device *dev; 1544 char mac[ETH_ALEN]; 1545 u16 lag_vid = 0; 1546 u16 lag_id; 1547 u16 vid, fid; 1548 bool do_notification = true; 1549 int err; 1550 1551 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 1552 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1553 if (!mlxsw_sp_port) { 1554 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 1555 goto just_remove; 1556 } 1557 1558 if (mlxsw_sp_fid_is_vfid(fid)) { 1559 struct mlxsw_sp_port *mlxsw_sp_vport; 1560 1561 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, 1562 fid); 1563 if (!mlxsw_sp_vport) { 1564 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1565 goto just_remove; 1566 } 1567 1568 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1569 dev = mlxsw_sp_vport->dev; 1570 vid = 0; 1571 /* Override the physical port with the vPort. */ 1572 mlxsw_sp_port = mlxsw_sp_vport; 1573 } else { 1574 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; 1575 vid = fid; 1576 } 1577 1578 do_fdb_op: 1579 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1580 adding, true); 1581 if (err) { 1582 if (net_ratelimit()) 1583 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1584 return; 1585 } 1586 1587 if (!do_notification) 1588 return; 1589 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, 1590 vid, dev); 1591 return; 1592 1593 just_remove: 1594 adding = false; 1595 do_notification = false; 1596 goto do_fdb_op; 1597 } 1598 1599 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 1600 char *sfn_pl, int rec_index) 1601 { 1602 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 1603 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 1604 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1605 rec_index, true); 1606 break; 1607 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 1608 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1609 rec_index, false); 1610 break; 1611 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 1612 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1613 rec_index, true); 1614 break; 1615 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 1616 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1617 rec_index, false); 1618 break; 1619 } 1620 } 1621 1622 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1623 { 1624 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw, 1625 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1626 } 1627 1628 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 1629 { 1630 struct mlxsw_sp *mlxsw_sp; 1631 char *sfn_pl; 1632 u8 num_rec; 1633 int i; 1634 int err; 1635 1636 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 1637 if (!sfn_pl) 1638 return; 1639 1640 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1641 1642 rtnl_lock(); 1643 mlxsw_reg_sfn_pack(sfn_pl); 1644 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1645 if (err) { 1646 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 1647 goto out; 1648 } 1649 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 1650 for (i = 0; i < num_rec; i++) 1651 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1652 1653 out: 1654 rtnl_unlock(); 1655 kfree(sfn_pl); 1656 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1657 } 1658 1659 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 1660 { 1661 int err; 1662 1663 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 1664 if (err) { 1665 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1666 return err; 1667 } 1668 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1669 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1670 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1671 return 0; 1672 } 1673 1674 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 1675 { 1676 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1677 } 1678 1679 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1680 { 1681 return mlxsw_sp_fdb_init(mlxsw_sp); 1682 } 1683 1684 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1685 { 1686 mlxsw_sp_fdb_fini(mlxsw_sp); 1687 } 1688 1689 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1690 { 1691 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1692 } 1693 1694 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1695 { 1696 } 1697