1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <net/switchdev.h> 49 50 #include "spectrum.h" 51 #include "core.h" 52 #include "reg.h" 53 54 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 55 u16 vid) 56 { 57 u16 fid = vid; 58 59 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 60 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 61 62 fid = mlxsw_sp_vfid_to_fid(vfid); 63 } 64 65 if (!fid) 66 fid = mlxsw_sp_port->pvid; 67 68 return fid; 69 } 70 71 static struct mlxsw_sp_port * 72 mlxsw_sp_port_orig_get(struct net_device *dev, 73 struct mlxsw_sp_port *mlxsw_sp_port) 74 { 75 struct mlxsw_sp_port *mlxsw_sp_vport; 76 u16 vid; 77 78 if (!is_vlan_dev(dev)) 79 return mlxsw_sp_port; 80 81 vid = vlan_dev_vlan_id(dev); 82 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 83 WARN_ON(!mlxsw_sp_vport); 84 85 return mlxsw_sp_vport; 86 } 87 88 static int mlxsw_sp_port_attr_get(struct net_device *dev, 89 struct switchdev_attr *attr) 90 { 91 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 92 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 93 94 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 95 if (!mlxsw_sp_port) 96 return -EINVAL; 97 98 switch (attr->id) { 99 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 100 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 101 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 102 attr->u.ppid.id_len); 103 break; 104 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 105 attr->u.brport_flags = 106 (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 107 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | 108 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); 109 break; 110 default: 111 return -EOPNOTSUPP; 112 } 113 114 return 0; 115 } 116 117 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 118 u8 state) 119 { 120 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 121 enum mlxsw_reg_spms_state spms_state; 122 char *spms_pl; 123 u16 vid; 124 int err; 125 126 switch (state) { 127 case BR_STATE_DISABLED: /* fall-through */ 128 case BR_STATE_FORWARDING: 129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 130 break; 131 case BR_STATE_LISTENING: /* fall-through */ 132 case BR_STATE_LEARNING: 133 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 134 break; 135 case BR_STATE_BLOCKING: 136 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 137 break; 138 default: 139 BUG(); 140 } 141 142 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 143 if (!spms_pl) 144 return -ENOMEM; 145 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 146 147 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 148 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 149 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 150 } else { 151 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 152 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 153 } 154 155 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 156 kfree(spms_pl); 157 return err; 158 } 159 160 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 161 struct switchdev_trans *trans, 162 u8 state) 163 { 164 if (switchdev_trans_ph_prepare(trans)) 165 return 0; 166 167 mlxsw_sp_port->stp_state = state; 168 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 169 } 170 171 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) 172 { 173 return vfid >= MLXSW_SP_VFID_PORT_MAX; 174 } 175 176 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 177 u16 idx_begin, u16 idx_end, bool set, 178 bool only_uc) 179 { 180 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 181 u16 local_port = mlxsw_sp_port->local_port; 182 enum mlxsw_flood_table_type table_type; 183 u16 range = idx_end - idx_begin + 1; 184 char *sftr_pl; 185 int err; 186 187 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 188 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 189 if (mlxsw_sp_vfid_is_vport_br(idx_begin)) 190 local_port = mlxsw_sp_port->local_port; 191 else 192 local_port = MLXSW_PORT_CPU_PORT; 193 } else { 194 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 195 } 196 197 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 198 if (!sftr_pl) 199 return -ENOMEM; 200 201 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 202 table_type, range, local_port, set); 203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 204 if (err) 205 goto buffer_out; 206 207 /* Flooding control allows one to decide whether a given port will 208 * flood unicast traffic for which there is no FDB entry. 209 */ 210 if (only_uc) 211 goto buffer_out; 212 213 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 214 table_type, range, local_port, set); 215 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 216 217 buffer_out: 218 kfree(sftr_pl); 219 return err; 220 } 221 222 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 223 bool set) 224 { 225 struct net_device *dev = mlxsw_sp_port->dev; 226 u16 vid, last_visited_vid; 227 int err; 228 229 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 230 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 231 232 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, 233 set, true); 234 } 235 236 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 237 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, 238 true); 239 if (err) { 240 last_visited_vid = vid; 241 goto err_port_flood_set; 242 } 243 } 244 245 return 0; 246 247 err_port_flood_set: 248 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 249 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); 250 netdev_err(dev, "Failed to configure unicast flooding\n"); 251 return err; 252 } 253 254 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 255 bool set, bool only_uc) 256 { 257 /* In case of vFIDs, index into the flooding table is relative to 258 * the start of the vFIDs range. 259 */ 260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 261 only_uc); 262 } 263 264 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 265 struct switchdev_trans *trans, 266 unsigned long brport_flags) 267 { 268 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; 269 bool set; 270 int err; 271 272 if (!mlxsw_sp_port->bridged) 273 return -EINVAL; 274 275 if (switchdev_trans_ph_prepare(trans)) 276 return 0; 277 278 if ((uc_flood ^ brport_flags) & BR_FLOOD) { 279 set = mlxsw_sp_port->uc_flood ? false : true; 280 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); 281 if (err) 282 return err; 283 } 284 285 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; 286 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 287 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 288 289 return 0; 290 } 291 292 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 293 { 294 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 295 int err; 296 297 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 298 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 299 if (err) 300 return err; 301 mlxsw_sp->ageing_time = ageing_time; 302 return 0; 303 } 304 305 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 struct switchdev_trans *trans, 307 unsigned long ageing_clock_t) 308 { 309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 310 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 311 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 312 313 if (switchdev_trans_ph_prepare(trans)) 314 return 0; 315 316 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 317 } 318 319 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 320 struct switchdev_trans *trans, 321 struct net_device *orig_dev, 322 bool vlan_enabled) 323 { 324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 325 326 /* SWITCHDEV_TRANS_PREPARE phase */ 327 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { 328 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); 329 return -EINVAL; 330 } 331 332 return 0; 333 } 334 335 static int mlxsw_sp_port_attr_set(struct net_device *dev, 336 const struct switchdev_attr *attr, 337 struct switchdev_trans *trans) 338 { 339 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 340 int err = 0; 341 342 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 343 if (!mlxsw_sp_port) 344 return -EINVAL; 345 346 switch (attr->id) { 347 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 348 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 349 attr->u.stp_state); 350 break; 351 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 352 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 353 attr->u.brport_flags); 354 break; 355 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 356 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 357 attr->u.ageing_time); 358 break; 359 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 360 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 361 attr->orig_dev, 362 attr->u.vlan_filtering); 363 break; 364 default: 365 err = -EOPNOTSUPP; 366 break; 367 } 368 369 return err; 370 } 371 372 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373 { 374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 375 char spvid_pl[MLXSW_REG_SPVID_LEN]; 376 377 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 379 } 380 381 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 382 { 383 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 384 int err; 385 386 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); 387 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 388 389 if (err) 390 return err; 391 392 set_bit(fid, mlxsw_sp->active_fids); 393 return 0; 394 } 395 396 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) 397 { 398 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 399 400 clear_bit(fid, mlxsw_sp->active_fids); 401 402 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 403 fid, fid); 404 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 405 } 406 407 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 408 { 409 enum mlxsw_reg_svfa_mt mt; 410 411 if (!list_empty(&mlxsw_sp_port->vports_list)) 412 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 413 else 414 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 415 416 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); 417 } 418 419 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 420 { 421 enum mlxsw_reg_svfa_mt mt; 422 423 if (list_empty(&mlxsw_sp_port->vports_list)) 424 return 0; 425 426 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 427 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); 428 } 429 430 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, 431 u16 vid_end) 432 { 433 u16 vid; 434 int err; 435 436 for (vid = vid_begin; vid <= vid_end; vid++) { 437 err = mlxsw_sp_port_add_vid(dev, 0, vid); 438 if (err) 439 goto err_port_add_vid; 440 } 441 return 0; 442 443 err_port_add_vid: 444 for (vid--; vid >= vid_begin; vid--) 445 mlxsw_sp_port_kill_vid(dev, 0, vid); 446 return err; 447 } 448 449 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 450 u16 vid_begin, u16 vid_end, bool is_member, 451 bool untagged) 452 { 453 u16 vid, vid_e; 454 int err; 455 456 for (vid = vid_begin; vid <= vid_end; 457 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 458 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 459 vid_end); 460 461 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 462 is_member, untagged); 463 if (err) 464 return err; 465 } 466 467 return 0; 468 } 469 470 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 471 u16 vid_begin, u16 vid_end, 472 bool flag_untagged, bool flag_pvid) 473 { 474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 475 struct net_device *dev = mlxsw_sp_port->dev; 476 u16 vid, last_visited_vid, old_pvid; 477 enum mlxsw_reg_svfa_mt mt; 478 int err; 479 480 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 481 * not bridged, then packets ingressing through the port with 482 * the specified VIDs will be directed to CPU. 483 */ 484 if (!mlxsw_sp_port->bridged) 485 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 486 487 for (vid = vid_begin; vid <= vid_end; vid++) { 488 if (!test_bit(vid, mlxsw_sp->active_fids)) { 489 err = mlxsw_sp_fid_create(mlxsw_sp, vid); 490 if (err) { 491 netdev_err(dev, "Failed to create FID=%d\n", 492 vid); 493 return err; 494 } 495 496 /* When creating a FID, we set a VID to FID mapping 497 * regardless of the port's mode. 498 */ 499 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 500 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, 501 true, vid, vid); 502 if (err) { 503 netdev_err(dev, "Failed to create FID=VID=%d mapping\n", 504 vid); 505 goto err_port_vid_to_fid_set; 506 } 507 } 508 } 509 510 /* Set FID mapping according to port's mode */ 511 for (vid = vid_begin; vid <= vid_end; vid++) { 512 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); 513 if (err) { 514 netdev_err(dev, "Failed to map FID=%d", vid); 515 last_visited_vid = --vid; 516 goto err_port_fid_map; 517 } 518 } 519 520 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 521 true, false); 522 if (err) { 523 netdev_err(dev, "Failed to configure flooding\n"); 524 goto err_port_flood_set; 525 } 526 527 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 528 true, flag_untagged); 529 if (err) { 530 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, 531 vid_end); 532 goto err_port_vlans_set; 533 } 534 535 old_pvid = mlxsw_sp_port->pvid; 536 if (flag_pvid && old_pvid != vid_begin) { 537 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); 538 if (err) { 539 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 540 goto err_port_pvid_set; 541 } 542 mlxsw_sp_port->pvid = vid_begin; 543 } 544 545 /* Changing activity bits only if HW operation succeded */ 546 for (vid = vid_begin; vid <= vid_end; vid++) { 547 set_bit(vid, mlxsw_sp_port->active_vlans); 548 if (flag_untagged) 549 set_bit(vid, mlxsw_sp_port->untagged_vlans); 550 else 551 clear_bit(vid, mlxsw_sp_port->untagged_vlans); 552 } 553 554 /* STP state change must be done after we set active VLANs */ 555 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 556 mlxsw_sp_port->stp_state); 557 if (err) { 558 netdev_err(dev, "Failed to set STP state\n"); 559 goto err_port_stp_state_set; 560 } 561 562 return 0; 563 564 err_port_vid_to_fid_set: 565 mlxsw_sp_fid_destroy(mlxsw_sp, vid); 566 return err; 567 568 err_port_stp_state_set: 569 for (vid = vid_begin; vid <= vid_end; vid++) 570 clear_bit(vid, mlxsw_sp_port->active_vlans); 571 if (old_pvid != mlxsw_sp_port->pvid) 572 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 573 err_port_pvid_set: 574 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 575 false); 576 err_port_vlans_set: 577 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, 578 false); 579 err_port_flood_set: 580 last_visited_vid = vid_end; 581 err_port_fid_map: 582 for (vid = last_visited_vid; vid >= vid_begin; vid--) 583 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 584 return err; 585 } 586 587 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 588 const struct switchdev_obj_port_vlan *vlan, 589 struct switchdev_trans *trans) 590 { 591 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 592 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 593 594 if (switchdev_trans_ph_prepare(trans)) 595 return 0; 596 597 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 598 vlan->vid_begin, vlan->vid_end, 599 flag_untagged, flag_pvid); 600 } 601 602 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 603 { 604 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 605 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 606 } 607 608 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 609 { 610 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 611 MLXSW_REG_SFD_OP_WRITE_REMOVE; 612 } 613 614 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 615 const char *mac, u16 fid, bool adding, 616 bool dynamic) 617 { 618 char *sfd_pl; 619 int err; 620 621 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 622 if (!sfd_pl) 623 return -ENOMEM; 624 625 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 626 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 627 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 628 local_port); 629 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 630 kfree(sfd_pl); 631 632 return err; 633 } 634 635 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 636 const char *mac, u16 fid, u16 lag_vid, 637 bool adding, bool dynamic) 638 { 639 char *sfd_pl; 640 int err; 641 642 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 643 if (!sfd_pl) 644 return -ENOMEM; 645 646 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 647 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 648 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 649 lag_vid, lag_id); 650 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 651 kfree(sfd_pl); 652 653 return err; 654 } 655 656 static int 657 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 658 const struct switchdev_obj_port_fdb *fdb, 659 struct switchdev_trans *trans) 660 { 661 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 662 u16 lag_vid = 0; 663 664 if (switchdev_trans_ph_prepare(trans)) 665 return 0; 666 667 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 668 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 669 } 670 671 if (!mlxsw_sp_port->lagged) 672 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 673 mlxsw_sp_port->local_port, 674 fdb->addr, fid, true, false); 675 else 676 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 677 mlxsw_sp_port->lag_id, 678 fdb->addr, fid, lag_vid, 679 true, false); 680 } 681 682 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 683 u16 fid, u16 mid, bool adding) 684 { 685 char *sfd_pl; 686 int err; 687 688 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 689 if (!sfd_pl) 690 return -ENOMEM; 691 692 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 693 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 694 MLXSW_REG_SFD_REC_ACTION_NOP, mid); 695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 696 kfree(sfd_pl); 697 return err; 698 } 699 700 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, 701 bool add, bool clear_all_ports) 702 { 703 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 704 char *smid_pl; 705 int err, i; 706 707 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 708 if (!smid_pl) 709 return -ENOMEM; 710 711 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); 712 if (clear_all_ports) { 713 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 714 if (mlxsw_sp->ports[i]) 715 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 716 } 717 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 718 kfree(smid_pl); 719 return err; 720 } 721 722 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 723 const unsigned char *addr, 724 u16 vid) 725 { 726 struct mlxsw_sp_mid *mid; 727 728 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 729 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 730 return mid; 731 } 732 return NULL; 733 } 734 735 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 736 const unsigned char *addr, 737 u16 vid) 738 { 739 struct mlxsw_sp_mid *mid; 740 u16 mid_idx; 741 742 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, 743 MLXSW_SP_MID_MAX); 744 if (mid_idx == MLXSW_SP_MID_MAX) 745 return NULL; 746 747 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 748 if (!mid) 749 return NULL; 750 751 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 752 ether_addr_copy(mid->addr, addr); 753 mid->vid = vid; 754 mid->mid = mid_idx; 755 mid->ref_count = 0; 756 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 757 758 return mid; 759 } 760 761 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, 762 struct mlxsw_sp_mid *mid) 763 { 764 if (--mid->ref_count == 0) { 765 list_del(&mid->list); 766 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); 767 kfree(mid); 768 return 1; 769 } 770 return 0; 771 } 772 773 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 774 const struct switchdev_obj_port_mdb *mdb, 775 struct switchdev_trans *trans) 776 { 777 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 778 struct net_device *dev = mlxsw_sp_port->dev; 779 struct mlxsw_sp_mid *mid; 780 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 781 int err = 0; 782 783 if (switchdev_trans_ph_prepare(trans)) 784 return 0; 785 786 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 787 if (!mid) { 788 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 789 if (!mid) { 790 netdev_err(dev, "Unable to allocate MC group\n"); 791 return -ENOMEM; 792 } 793 } 794 mid->ref_count++; 795 796 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, 797 mid->ref_count == 1); 798 if (err) { 799 netdev_err(dev, "Unable to set SMID\n"); 800 goto err_out; 801 } 802 803 if (mid->ref_count == 1) { 804 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, 805 true); 806 if (err) { 807 netdev_err(dev, "Unable to set MC SFD\n"); 808 goto err_out; 809 } 810 } 811 812 return 0; 813 814 err_out: 815 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); 816 return err; 817 } 818 819 static int mlxsw_sp_port_obj_add(struct net_device *dev, 820 const struct switchdev_obj *obj, 821 struct switchdev_trans *trans) 822 { 823 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 824 int err = 0; 825 826 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 827 if (!mlxsw_sp_port) 828 return -EINVAL; 829 830 switch (obj->id) { 831 case SWITCHDEV_OBJ_ID_PORT_VLAN: 832 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 833 return 0; 834 835 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 836 SWITCHDEV_OBJ_PORT_VLAN(obj), 837 trans); 838 break; 839 case SWITCHDEV_OBJ_ID_PORT_FDB: 840 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 841 SWITCHDEV_OBJ_PORT_FDB(obj), 842 trans); 843 break; 844 case SWITCHDEV_OBJ_ID_PORT_MDB: 845 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 846 SWITCHDEV_OBJ_PORT_MDB(obj), 847 trans); 848 break; 849 default: 850 err = -EOPNOTSUPP; 851 break; 852 } 853 854 return err; 855 } 856 857 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, 858 u16 vid_end) 859 { 860 u16 vid; 861 int err; 862 863 for (vid = vid_begin; vid <= vid_end; vid++) { 864 err = mlxsw_sp_port_kill_vid(dev, 0, vid); 865 if (err) 866 return err; 867 } 868 869 return 0; 870 } 871 872 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 873 u16 vid_begin, u16 vid_end, bool init) 874 { 875 struct net_device *dev = mlxsw_sp_port->dev; 876 u16 vid, pvid; 877 int err; 878 879 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 880 * not bridged, then prevent packets ingressing through the 881 * port with the specified VIDs from being trapped to CPU. 882 */ 883 if (!init && !mlxsw_sp_port->bridged) 884 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); 885 886 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 887 false, false); 888 if (err) { 889 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, 890 vid_end); 891 return err; 892 } 893 894 pvid = mlxsw_sp_port->pvid; 895 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 896 /* Default VLAN is always 1 */ 897 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 898 if (err) { 899 netdev_err(dev, "Unable to del PVID %d\n", pvid); 900 return err; 901 } 902 mlxsw_sp_port->pvid = 1; 903 } 904 905 if (init) 906 goto out; 907 908 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 909 false, false); 910 if (err) { 911 netdev_err(dev, "Failed to clear flooding\n"); 912 return err; 913 } 914 915 for (vid = vid_begin; vid <= vid_end; vid++) { 916 /* Remove FID mapping in case of Virtual mode */ 917 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 918 if (err) { 919 netdev_err(dev, "Failed to unmap FID=%d", vid); 920 return err; 921 } 922 } 923 924 out: 925 /* Changing activity bits only if HW operation succeded */ 926 for (vid = vid_begin; vid <= vid_end; vid++) 927 clear_bit(vid, mlxsw_sp_port->active_vlans); 928 929 return 0; 930 } 931 932 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 933 const struct switchdev_obj_port_vlan *vlan) 934 { 935 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 936 vlan->vid_begin, vlan->vid_end, false); 937 } 938 939 static int 940 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 941 const struct switchdev_obj_port_fdb *fdb) 942 { 943 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 944 u16 lag_vid = 0; 945 946 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 947 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 948 } 949 950 if (!mlxsw_sp_port->lagged) 951 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 952 mlxsw_sp_port->local_port, 953 fdb->addr, fid, 954 false, false); 955 else 956 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 957 mlxsw_sp_port->lag_id, 958 fdb->addr, fid, lag_vid, 959 false, false); 960 } 961 962 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 963 const struct switchdev_obj_port_mdb *mdb) 964 { 965 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 966 struct net_device *dev = mlxsw_sp_port->dev; 967 struct mlxsw_sp_mid *mid; 968 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 969 u16 mid_idx; 970 int err = 0; 971 972 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 973 if (!mid) { 974 netdev_err(dev, "Unable to remove port from MC DB\n"); 975 return -EINVAL; 976 } 977 978 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); 979 if (err) 980 netdev_err(dev, "Unable to remove port from SMID\n"); 981 982 mid_idx = mid->mid; 983 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { 984 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, 985 false); 986 if (err) 987 netdev_err(dev, "Unable to remove MC SFD\n"); 988 } 989 990 return err; 991 } 992 993 static int mlxsw_sp_port_obj_del(struct net_device *dev, 994 const struct switchdev_obj *obj) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 int err = 0; 998 999 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1000 if (!mlxsw_sp_port) 1001 return -EINVAL; 1002 1003 switch (obj->id) { 1004 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1005 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1006 return 0; 1007 1008 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1009 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1010 break; 1011 case SWITCHDEV_OBJ_ID_PORT_FDB: 1012 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1013 SWITCHDEV_OBJ_PORT_FDB(obj)); 1014 break; 1015 case SWITCHDEV_OBJ_ID_PORT_MDB: 1016 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1017 SWITCHDEV_OBJ_PORT_MDB(obj)); 1018 break; 1019 default: 1020 err = -EOPNOTSUPP; 1021 break; 1022 } 1023 1024 return err; 1025 } 1026 1027 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1028 u16 lag_id) 1029 { 1030 struct mlxsw_sp_port *mlxsw_sp_port; 1031 int i; 1032 1033 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 1034 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1035 if (mlxsw_sp_port) 1036 return mlxsw_sp_port; 1037 } 1038 return NULL; 1039 } 1040 1041 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1042 struct switchdev_obj_port_fdb *fdb, 1043 switchdev_obj_dump_cb_t *cb) 1044 { 1045 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1046 u16 vport_vid = 0, vport_fid = 0; 1047 char *sfd_pl; 1048 char mac[ETH_ALEN]; 1049 u16 fid; 1050 u8 local_port; 1051 u16 lag_id; 1052 u8 num_rec; 1053 int stored_err = 0; 1054 int i; 1055 int err; 1056 1057 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1058 if (!sfd_pl) 1059 return -ENOMEM; 1060 1061 mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); 1062 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1063 u16 tmp; 1064 1065 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 1066 vport_fid = mlxsw_sp_vfid_to_fid(tmp); 1067 vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1068 } 1069 1070 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1071 do { 1072 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 1073 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1074 if (err) 1075 goto out; 1076 1077 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1078 1079 /* Even in case of error, we have to run the dump to the end 1080 * so the session in firmware is finished. 1081 */ 1082 if (stored_err) 1083 continue; 1084 1085 for (i = 0; i < num_rec; i++) { 1086 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 1087 case MLXSW_REG_SFD_REC_TYPE_UNICAST: 1088 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1089 &local_port); 1090 if (local_port == mlxsw_sp_port->local_port) { 1091 if (vport_fid && vport_fid != fid) 1092 continue; 1093 else if (vport_fid) 1094 fdb->vid = vport_vid; 1095 else 1096 fdb->vid = fid; 1097 ether_addr_copy(fdb->addr, mac); 1098 fdb->ndm_state = NUD_REACHABLE; 1099 err = cb(&fdb->obj); 1100 if (err) 1101 stored_err = err; 1102 } 1103 break; 1104 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1105 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1106 mac, &fid, &lag_id); 1107 if (mlxsw_sp_port == 1108 mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { 1109 if (vport_fid && vport_fid != fid) 1110 continue; 1111 else if (vport_fid) 1112 fdb->vid = vport_vid; 1113 else 1114 fdb->vid = fid; 1115 ether_addr_copy(fdb->addr, mac); 1116 fdb->ndm_state = NUD_REACHABLE; 1117 err = cb(&fdb->obj); 1118 if (err) 1119 stored_err = err; 1120 } 1121 break; 1122 } 1123 } 1124 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1125 1126 out: 1127 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); 1128 kfree(sfd_pl); 1129 return stored_err ? stored_err : err; 1130 } 1131 1132 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1133 struct switchdev_obj_port_vlan *vlan, 1134 switchdev_obj_dump_cb_t *cb) 1135 { 1136 u16 vid; 1137 int err = 0; 1138 1139 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1140 vlan->flags = 0; 1141 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1142 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1143 return cb(&vlan->obj); 1144 } 1145 1146 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1147 vlan->flags = 0; 1148 if (vid == mlxsw_sp_port->pvid) 1149 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1150 if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) 1151 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1152 vlan->vid_begin = vid; 1153 vlan->vid_end = vid; 1154 err = cb(&vlan->obj); 1155 if (err) 1156 break; 1157 } 1158 return err; 1159 } 1160 1161 static int mlxsw_sp_port_obj_dump(struct net_device *dev, 1162 struct switchdev_obj *obj, 1163 switchdev_obj_dump_cb_t *cb) 1164 { 1165 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1166 int err = 0; 1167 1168 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1169 if (!mlxsw_sp_port) 1170 return -EINVAL; 1171 1172 switch (obj->id) { 1173 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1174 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 1175 SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 1176 break; 1177 case SWITCHDEV_OBJ_ID_PORT_FDB: 1178 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1179 SWITCHDEV_OBJ_PORT_FDB(obj), cb); 1180 break; 1181 default: 1182 err = -EOPNOTSUPP; 1183 break; 1184 } 1185 1186 return err; 1187 } 1188 1189 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1190 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1191 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1192 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1193 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1194 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1195 }; 1196 1197 static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, 1198 bool adding, char *mac, u16 vid, 1199 struct net_device *dev) 1200 { 1201 struct switchdev_notifier_fdb_info info; 1202 unsigned long notifier_type; 1203 1204 if (learning && learning_sync) { 1205 info.addr = mac; 1206 info.vid = vid; 1207 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1208 call_switchdev_notifiers(notifier_type, dev, &info.info); 1209 } 1210 } 1211 1212 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1213 char *sfn_pl, int rec_index, 1214 bool adding) 1215 { 1216 struct mlxsw_sp_port *mlxsw_sp_port; 1217 char mac[ETH_ALEN]; 1218 u8 local_port; 1219 u16 vid, fid; 1220 bool do_notification = true; 1221 int err; 1222 1223 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 1224 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1225 if (!mlxsw_sp_port) { 1226 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 1227 goto just_remove; 1228 } 1229 1230 if (mlxsw_sp_fid_is_vfid(fid)) { 1231 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1232 struct mlxsw_sp_port *mlxsw_sp_vport; 1233 1234 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1235 vfid); 1236 if (!mlxsw_sp_vport) { 1237 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1238 goto just_remove; 1239 } 1240 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1241 /* Override the physical port with the vPort. */ 1242 mlxsw_sp_port = mlxsw_sp_vport; 1243 } else { 1244 vid = fid; 1245 } 1246 1247 adding = adding && mlxsw_sp_port->learning; 1248 1249 do_fdb_op: 1250 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1251 adding, true); 1252 if (err) { 1253 if (net_ratelimit()) 1254 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1255 return; 1256 } 1257 1258 if (!do_notification) 1259 return; 1260 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, 1261 mlxsw_sp_port->learning_sync, 1262 adding, mac, vid, mlxsw_sp_port->dev); 1263 return; 1264 1265 just_remove: 1266 adding = false; 1267 do_notification = false; 1268 goto do_fdb_op; 1269 } 1270 1271 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 1272 char *sfn_pl, int rec_index, 1273 bool adding) 1274 { 1275 struct mlxsw_sp_port *mlxsw_sp_port; 1276 char mac[ETH_ALEN]; 1277 u16 lag_vid = 0; 1278 u16 lag_id; 1279 u16 vid, fid; 1280 bool do_notification = true; 1281 int err; 1282 1283 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 1284 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1285 if (!mlxsw_sp_port) { 1286 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 1287 goto just_remove; 1288 } 1289 1290 if (mlxsw_sp_fid_is_vfid(fid)) { 1291 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1292 struct mlxsw_sp_port *mlxsw_sp_vport; 1293 1294 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1295 vfid); 1296 if (!mlxsw_sp_vport) { 1297 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1298 goto just_remove; 1299 } 1300 1301 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1302 lag_vid = vid; 1303 /* Override the physical port with the vPort. */ 1304 mlxsw_sp_port = mlxsw_sp_vport; 1305 } else { 1306 vid = fid; 1307 } 1308 1309 adding = adding && mlxsw_sp_port->learning; 1310 1311 do_fdb_op: 1312 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1313 adding, true); 1314 if (err) { 1315 if (net_ratelimit()) 1316 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1317 return; 1318 } 1319 1320 if (!do_notification) 1321 return; 1322 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, 1323 mlxsw_sp_port->learning_sync, 1324 adding, mac, vid, 1325 mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); 1326 return; 1327 1328 just_remove: 1329 adding = false; 1330 do_notification = false; 1331 goto do_fdb_op; 1332 } 1333 1334 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 1335 char *sfn_pl, int rec_index) 1336 { 1337 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 1338 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 1339 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1340 rec_index, true); 1341 break; 1342 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 1343 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1344 rec_index, false); 1345 break; 1346 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 1347 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1348 rec_index, true); 1349 break; 1350 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 1351 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1352 rec_index, false); 1353 break; 1354 } 1355 } 1356 1357 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1358 { 1359 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, 1360 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1361 } 1362 1363 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 1364 { 1365 struct mlxsw_sp *mlxsw_sp; 1366 char *sfn_pl; 1367 u8 num_rec; 1368 int i; 1369 int err; 1370 1371 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 1372 if (!sfn_pl) 1373 return; 1374 1375 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1376 1377 mutex_lock(&mlxsw_sp->fdb_lock); 1378 do { 1379 mlxsw_reg_sfn_pack(sfn_pl); 1380 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1381 if (err) { 1382 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 1383 break; 1384 } 1385 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 1386 for (i = 0; i < num_rec; i++) 1387 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1388 1389 } while (num_rec); 1390 mutex_unlock(&mlxsw_sp->fdb_lock); 1391 1392 kfree(sfn_pl); 1393 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1394 } 1395 1396 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 1397 { 1398 int err; 1399 1400 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 1401 if (err) { 1402 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1403 return err; 1404 } 1405 mutex_init(&mlxsw_sp->fdb_lock); 1406 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1407 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1408 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1409 return 0; 1410 } 1411 1412 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 1413 { 1414 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1415 } 1416 1417 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) 1418 { 1419 u16 fid; 1420 1421 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) 1422 mlxsw_sp_fid_destroy(mlxsw_sp, fid); 1423 } 1424 1425 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1426 { 1427 return mlxsw_sp_fdb_init(mlxsw_sp); 1428 } 1429 1430 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1431 { 1432 mlxsw_sp_fdb_fini(mlxsw_sp); 1433 mlxsw_sp_fids_fini(mlxsw_sp); 1434 } 1435 1436 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) 1437 { 1438 struct net_device *dev = mlxsw_sp_port->dev; 1439 int err; 1440 1441 /* Allow only untagged packets to ingress and tag them internally 1442 * with VID 1. 1443 */ 1444 mlxsw_sp_port->pvid = 1; 1445 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, 1446 true); 1447 if (err) { 1448 netdev_err(dev, "Unable to init VLANs\n"); 1449 return err; 1450 } 1451 1452 /* Add implicit VLAN interface in the device, so that untagged 1453 * packets will be classified to the default vFID. 1454 */ 1455 err = mlxsw_sp_port_add_vid(dev, 0, 1); 1456 if (err) 1457 netdev_err(dev, "Failed to configure default vFID\n"); 1458 1459 return err; 1460 } 1461 1462 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1463 { 1464 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1465 } 1466 1467 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1468 { 1469 } 1470