1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <linux/rtnetlink.h> 49 #include <net/switchdev.h> 50 51 #include "spectrum.h" 52 #include "core.h" 53 #include "reg.h" 54 55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 56 u16 vid) 57 { 58 u16 fid = vid; 59 60 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 61 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 62 63 fid = mlxsw_sp_vfid_to_fid(vfid); 64 } 65 66 if (!fid) 67 fid = mlxsw_sp_port->pvid; 68 69 return fid; 70 } 71 72 static struct mlxsw_sp_port * 73 mlxsw_sp_port_orig_get(struct net_device *dev, 74 struct mlxsw_sp_port *mlxsw_sp_port) 75 { 76 struct mlxsw_sp_port *mlxsw_sp_vport; 77 u16 vid; 78 79 if (!is_vlan_dev(dev)) 80 return mlxsw_sp_port; 81 82 vid = vlan_dev_vlan_id(dev); 83 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 84 WARN_ON(!mlxsw_sp_vport); 85 86 return mlxsw_sp_vport; 87 } 88 89 static int mlxsw_sp_port_attr_get(struct net_device *dev, 90 struct switchdev_attr *attr) 91 { 92 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 93 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 94 95 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 96 if (!mlxsw_sp_port) 97 return -EINVAL; 98 99 switch (attr->id) { 100 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 101 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 102 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 103 attr->u.ppid.id_len); 104 break; 105 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 106 attr->u.brport_flags = 107 (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 108 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | 109 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); 110 break; 111 default: 112 return -EOPNOTSUPP; 113 } 114 115 return 0; 116 } 117 118 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 119 u8 state) 120 { 121 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 122 enum mlxsw_reg_spms_state spms_state; 123 char *spms_pl; 124 u16 vid; 125 int err; 126 127 switch (state) { 128 case BR_STATE_FORWARDING: 129 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 130 break; 131 case BR_STATE_LEARNING: 132 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 133 break; 134 case BR_STATE_LISTENING: /* fall-through */ 135 case BR_STATE_DISABLED: /* fall-through */ 136 case BR_STATE_BLOCKING: 137 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 138 break; 139 default: 140 BUG(); 141 } 142 143 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 144 if (!spms_pl) 145 return -ENOMEM; 146 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 147 148 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 149 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 151 } else { 152 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 153 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 154 } 155 156 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 157 kfree(spms_pl); 158 return err; 159 } 160 161 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 162 struct switchdev_trans *trans, 163 u8 state) 164 { 165 if (switchdev_trans_ph_prepare(trans)) 166 return 0; 167 168 mlxsw_sp_port->stp_state = state; 169 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 170 } 171 172 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) 173 { 174 return vfid >= MLXSW_SP_VFID_PORT_MAX; 175 } 176 177 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 178 u16 idx_begin, u16 idx_end, bool set, 179 bool only_uc) 180 { 181 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 182 u16 local_port = mlxsw_sp_port->local_port; 183 enum mlxsw_flood_table_type table_type; 184 u16 range = idx_end - idx_begin + 1; 185 char *sftr_pl; 186 int err; 187 188 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 189 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 190 if (mlxsw_sp_vfid_is_vport_br(idx_begin)) 191 local_port = mlxsw_sp_port->local_port; 192 else 193 local_port = MLXSW_PORT_CPU_PORT; 194 } else { 195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 196 } 197 198 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 199 if (!sftr_pl) 200 return -ENOMEM; 201 202 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 203 table_type, range, local_port, set); 204 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 205 if (err) 206 goto buffer_out; 207 208 /* Flooding control allows one to decide whether a given port will 209 * flood unicast traffic for which there is no FDB entry. 210 */ 211 if (only_uc) 212 goto buffer_out; 213 214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 215 table_type, range, local_port, set); 216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 217 218 buffer_out: 219 kfree(sftr_pl); 220 return err; 221 } 222 223 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 224 bool set) 225 { 226 struct net_device *dev = mlxsw_sp_port->dev; 227 u16 vid, last_visited_vid; 228 int err; 229 230 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 231 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 232 233 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, 234 set, true); 235 } 236 237 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 238 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, 239 true); 240 if (err) { 241 last_visited_vid = vid; 242 goto err_port_flood_set; 243 } 244 } 245 246 return 0; 247 248 err_port_flood_set: 249 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 250 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); 251 netdev_err(dev, "Failed to configure unicast flooding\n"); 252 return err; 253 } 254 255 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 256 bool set, bool only_uc) 257 { 258 /* In case of vFIDs, index into the flooding table is relative to 259 * the start of the vFIDs range. 260 */ 261 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 262 only_uc); 263 } 264 265 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 266 struct switchdev_trans *trans, 267 unsigned long brport_flags) 268 { 269 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; 270 bool set; 271 int err; 272 273 if (!mlxsw_sp_port->bridged) 274 return -EINVAL; 275 276 if (switchdev_trans_ph_prepare(trans)) 277 return 0; 278 279 if ((uc_flood ^ brport_flags) & BR_FLOOD) { 280 set = mlxsw_sp_port->uc_flood ? false : true; 281 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); 282 if (err) 283 return err; 284 } 285 286 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; 287 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 288 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 289 290 return 0; 291 } 292 293 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 294 { 295 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 296 int err; 297 298 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 299 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 300 if (err) 301 return err; 302 mlxsw_sp->ageing_time = ageing_time; 303 return 0; 304 } 305 306 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 307 struct switchdev_trans *trans, 308 unsigned long ageing_clock_t) 309 { 310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 311 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 312 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 313 314 if (switchdev_trans_ph_prepare(trans)) 315 return 0; 316 317 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 318 } 319 320 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 321 struct switchdev_trans *trans, 322 struct net_device *orig_dev, 323 bool vlan_enabled) 324 { 325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 326 327 /* SWITCHDEV_TRANS_PREPARE phase */ 328 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { 329 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 static int mlxsw_sp_port_attr_set(struct net_device *dev, 337 const struct switchdev_attr *attr, 338 struct switchdev_trans *trans) 339 { 340 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 341 int err = 0; 342 343 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 344 if (!mlxsw_sp_port) 345 return -EINVAL; 346 347 switch (attr->id) { 348 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 349 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 350 attr->u.stp_state); 351 break; 352 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 353 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 354 attr->u.brport_flags); 355 break; 356 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 357 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 358 attr->u.ageing_time); 359 break; 360 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 361 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 362 attr->orig_dev, 363 attr->u.vlan_filtering); 364 break; 365 default: 366 err = -EOPNOTSUPP; 367 break; 368 } 369 370 return err; 371 } 372 373 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 374 u16 vid) 375 { 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 377 char spvid_pl[MLXSW_REG_SPVID_LEN]; 378 379 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 381 } 382 383 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 384 bool allow) 385 { 386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 387 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 388 389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 391 } 392 393 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 394 { 395 struct net_device *dev = mlxsw_sp_port->dev; 396 int err; 397 398 if (!vid) { 399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 400 if (err) { 401 netdev_err(dev, "Failed to disallow untagged traffic\n"); 402 return err; 403 } 404 } else { 405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 406 if (err) { 407 netdev_err(dev, "Failed to set PVID\n"); 408 return err; 409 } 410 411 /* Only allow if not already allowed. */ 412 if (!mlxsw_sp_port->pvid) { 413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, 414 true); 415 if (err) { 416 netdev_err(dev, "Failed to allow untagged traffic\n"); 417 goto err_port_allow_untagged_set; 418 } 419 } 420 } 421 422 mlxsw_sp_port->pvid = vid; 423 return 0; 424 425 err_port_allow_untagged_set: 426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 427 return err; 428 } 429 430 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 431 { 432 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 433 int err; 434 435 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); 436 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 437 438 if (err) 439 return err; 440 441 set_bit(fid, mlxsw_sp->active_fids); 442 return 0; 443 } 444 445 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) 446 { 447 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 448 449 clear_bit(fid, mlxsw_sp->active_fids); 450 451 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 452 fid, fid); 453 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 454 } 455 456 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 457 { 458 enum mlxsw_reg_svfa_mt mt; 459 460 if (!list_empty(&mlxsw_sp_port->vports_list)) 461 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 462 else 463 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 464 465 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); 466 } 467 468 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 469 { 470 enum mlxsw_reg_svfa_mt mt; 471 472 if (list_empty(&mlxsw_sp_port->vports_list)) 473 return 0; 474 475 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 476 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); 477 } 478 479 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, 480 u16 vid_end) 481 { 482 u16 vid; 483 int err; 484 485 for (vid = vid_begin; vid <= vid_end; vid++) { 486 err = mlxsw_sp_port_add_vid(dev, 0, vid); 487 if (err) 488 goto err_port_add_vid; 489 } 490 return 0; 491 492 err_port_add_vid: 493 for (vid--; vid >= vid_begin; vid--) 494 mlxsw_sp_port_kill_vid(dev, 0, vid); 495 return err; 496 } 497 498 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 499 u16 vid_begin, u16 vid_end, bool is_member, 500 bool untagged) 501 { 502 u16 vid, vid_e; 503 int err; 504 505 for (vid = vid_begin; vid <= vid_end; 506 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 507 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 508 vid_end); 509 510 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 511 is_member, untagged); 512 if (err) 513 return err; 514 } 515 516 return 0; 517 } 518 519 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 520 u16 vid_begin, u16 vid_end, 521 bool flag_untagged, bool flag_pvid) 522 { 523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 524 struct net_device *dev = mlxsw_sp_port->dev; 525 u16 vid, last_visited_vid, old_pvid; 526 enum mlxsw_reg_svfa_mt mt; 527 int err; 528 529 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 530 * not bridged, then packets ingressing through the port with 531 * the specified VIDs will be directed to CPU. 532 */ 533 if (!mlxsw_sp_port->bridged) 534 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 535 536 for (vid = vid_begin; vid <= vid_end; vid++) { 537 if (!test_bit(vid, mlxsw_sp->active_fids)) { 538 err = mlxsw_sp_fid_create(mlxsw_sp, vid); 539 if (err) { 540 netdev_err(dev, "Failed to create FID=%d\n", 541 vid); 542 return err; 543 } 544 545 /* When creating a FID, we set a VID to FID mapping 546 * regardless of the port's mode. 547 */ 548 mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 549 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, 550 true, vid, vid); 551 if (err) { 552 netdev_err(dev, "Failed to create FID=VID=%d mapping\n", 553 vid); 554 goto err_port_vid_to_fid_set; 555 } 556 } 557 } 558 559 /* Set FID mapping according to port's mode */ 560 for (vid = vid_begin; vid <= vid_end; vid++) { 561 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); 562 if (err) { 563 netdev_err(dev, "Failed to map FID=%d", vid); 564 last_visited_vid = --vid; 565 goto err_port_fid_map; 566 } 567 } 568 569 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 570 true, false); 571 if (err) { 572 netdev_err(dev, "Failed to configure flooding\n"); 573 goto err_port_flood_set; 574 } 575 576 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 577 true, flag_untagged); 578 if (err) { 579 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, 580 vid_end); 581 goto err_port_vlans_set; 582 } 583 584 old_pvid = mlxsw_sp_port->pvid; 585 if (flag_pvid && old_pvid != vid_begin) { 586 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); 587 if (err) { 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 589 goto err_port_pvid_set; 590 } 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) { 592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 593 if (err) { 594 netdev_err(dev, "Unable to del PVID\n"); 595 goto err_port_pvid_set; 596 } 597 } 598 599 /* Changing activity bits only if HW operation succeded */ 600 for (vid = vid_begin; vid <= vid_end; vid++) { 601 set_bit(vid, mlxsw_sp_port->active_vlans); 602 if (flag_untagged) 603 set_bit(vid, mlxsw_sp_port->untagged_vlans); 604 else 605 clear_bit(vid, mlxsw_sp_port->untagged_vlans); 606 } 607 608 /* STP state change must be done after we set active VLANs */ 609 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 610 mlxsw_sp_port->stp_state); 611 if (err) { 612 netdev_err(dev, "Failed to set STP state\n"); 613 goto err_port_stp_state_set; 614 } 615 616 return 0; 617 618 err_port_vid_to_fid_set: 619 mlxsw_sp_fid_destroy(mlxsw_sp, vid); 620 return err; 621 622 err_port_stp_state_set: 623 for (vid = vid_begin; vid <= vid_end; vid++) 624 clear_bit(vid, mlxsw_sp_port->active_vlans); 625 if (old_pvid != mlxsw_sp_port->pvid) 626 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 627 err_port_pvid_set: 628 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 629 false); 630 err_port_vlans_set: 631 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, 632 false); 633 err_port_flood_set: 634 last_visited_vid = vid_end; 635 err_port_fid_map: 636 for (vid = last_visited_vid; vid >= vid_begin; vid--) 637 mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 638 return err; 639 } 640 641 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 642 const struct switchdev_obj_port_vlan *vlan, 643 struct switchdev_trans *trans) 644 { 645 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 646 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 647 648 if (switchdev_trans_ph_prepare(trans)) 649 return 0; 650 651 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 652 vlan->vid_begin, vlan->vid_end, 653 flag_untagged, flag_pvid); 654 } 655 656 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 657 { 658 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 659 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 660 } 661 662 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 663 { 664 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 665 MLXSW_REG_SFD_OP_WRITE_REMOVE; 666 } 667 668 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 669 const char *mac, u16 fid, bool adding, 670 bool dynamic) 671 { 672 char *sfd_pl; 673 int err; 674 675 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 676 if (!sfd_pl) 677 return -ENOMEM; 678 679 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 680 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 681 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 682 local_port); 683 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 684 kfree(sfd_pl); 685 686 return err; 687 } 688 689 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 690 const char *mac, u16 fid, u16 lag_vid, 691 bool adding, bool dynamic) 692 { 693 char *sfd_pl; 694 int err; 695 696 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 697 if (!sfd_pl) 698 return -ENOMEM; 699 700 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 701 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 702 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 703 lag_vid, lag_id); 704 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 705 kfree(sfd_pl); 706 707 return err; 708 } 709 710 static int 711 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 712 const struct switchdev_obj_port_fdb *fdb, 713 struct switchdev_trans *trans) 714 { 715 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 716 u16 lag_vid = 0; 717 718 if (switchdev_trans_ph_prepare(trans)) 719 return 0; 720 721 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 722 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 723 } 724 725 if (!mlxsw_sp_port->lagged) 726 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 727 mlxsw_sp_port->local_port, 728 fdb->addr, fid, true, false); 729 else 730 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 731 mlxsw_sp_port->lag_id, 732 fdb->addr, fid, lag_vid, 733 true, false); 734 } 735 736 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 737 u16 fid, u16 mid, bool adding) 738 { 739 char *sfd_pl; 740 int err; 741 742 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 743 if (!sfd_pl) 744 return -ENOMEM; 745 746 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 747 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 748 MLXSW_REG_SFD_REC_ACTION_NOP, mid); 749 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 750 kfree(sfd_pl); 751 return err; 752 } 753 754 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, 755 bool add, bool clear_all_ports) 756 { 757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 758 char *smid_pl; 759 int err, i; 760 761 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 762 if (!smid_pl) 763 return -ENOMEM; 764 765 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); 766 if (clear_all_ports) { 767 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 768 if (mlxsw_sp->ports[i]) 769 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 770 } 771 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 772 kfree(smid_pl); 773 return err; 774 } 775 776 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 777 const unsigned char *addr, 778 u16 vid) 779 { 780 struct mlxsw_sp_mid *mid; 781 782 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 783 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 784 return mid; 785 } 786 return NULL; 787 } 788 789 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 790 const unsigned char *addr, 791 u16 vid) 792 { 793 struct mlxsw_sp_mid *mid; 794 u16 mid_idx; 795 796 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, 797 MLXSW_SP_MID_MAX); 798 if (mid_idx == MLXSW_SP_MID_MAX) 799 return NULL; 800 801 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 802 if (!mid) 803 return NULL; 804 805 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 806 ether_addr_copy(mid->addr, addr); 807 mid->vid = vid; 808 mid->mid = mid_idx; 809 mid->ref_count = 0; 810 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 811 812 return mid; 813 } 814 815 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, 816 struct mlxsw_sp_mid *mid) 817 { 818 if (--mid->ref_count == 0) { 819 list_del(&mid->list); 820 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); 821 kfree(mid); 822 return 1; 823 } 824 return 0; 825 } 826 827 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 828 const struct switchdev_obj_port_mdb *mdb, 829 struct switchdev_trans *trans) 830 { 831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 832 struct net_device *dev = mlxsw_sp_port->dev; 833 struct mlxsw_sp_mid *mid; 834 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 835 int err = 0; 836 837 if (switchdev_trans_ph_prepare(trans)) 838 return 0; 839 840 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 841 if (!mid) { 842 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 843 if (!mid) { 844 netdev_err(dev, "Unable to allocate MC group\n"); 845 return -ENOMEM; 846 } 847 } 848 mid->ref_count++; 849 850 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, 851 mid->ref_count == 1); 852 if (err) { 853 netdev_err(dev, "Unable to set SMID\n"); 854 goto err_out; 855 } 856 857 if (mid->ref_count == 1) { 858 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, 859 true); 860 if (err) { 861 netdev_err(dev, "Unable to set MC SFD\n"); 862 goto err_out; 863 } 864 } 865 866 return 0; 867 868 err_out: 869 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); 870 return err; 871 } 872 873 static int mlxsw_sp_port_obj_add(struct net_device *dev, 874 const struct switchdev_obj *obj, 875 struct switchdev_trans *trans) 876 { 877 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 878 int err = 0; 879 880 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 881 if (!mlxsw_sp_port) 882 return -EINVAL; 883 884 switch (obj->id) { 885 case SWITCHDEV_OBJ_ID_PORT_VLAN: 886 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 887 return 0; 888 889 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 890 SWITCHDEV_OBJ_PORT_VLAN(obj), 891 trans); 892 break; 893 case SWITCHDEV_OBJ_ID_PORT_FDB: 894 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 895 SWITCHDEV_OBJ_PORT_FDB(obj), 896 trans); 897 break; 898 case SWITCHDEV_OBJ_ID_PORT_MDB: 899 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 900 SWITCHDEV_OBJ_PORT_MDB(obj), 901 trans); 902 break; 903 default: 904 err = -EOPNOTSUPP; 905 break; 906 } 907 908 return err; 909 } 910 911 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, 912 u16 vid_end) 913 { 914 u16 vid; 915 int err; 916 917 for (vid = vid_begin; vid <= vid_end; vid++) { 918 err = mlxsw_sp_port_kill_vid(dev, 0, vid); 919 if (err) 920 return err; 921 } 922 923 return 0; 924 } 925 926 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 927 u16 vid_begin, u16 vid_end, bool init) 928 { 929 struct net_device *dev = mlxsw_sp_port->dev; 930 u16 vid, pvid; 931 int err; 932 933 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is 934 * not bridged, then prevent packets ingressing through the 935 * port with the specified VIDs from being trapped to CPU. 936 */ 937 if (!init && !mlxsw_sp_port->bridged) 938 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); 939 940 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 941 false, false); 942 if (err) { 943 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, 944 vid_end); 945 return err; 946 } 947 948 if (init) 949 goto out; 950 951 pvid = mlxsw_sp_port->pvid; 952 if (pvid >= vid_begin && pvid <= vid_end) { 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 954 if (err) { 955 netdev_err(dev, "Unable to del PVID %d\n", pvid); 956 return err; 957 } 958 } 959 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 961 false, false); 962 if (err) { 963 netdev_err(dev, "Failed to clear flooding\n"); 964 return err; 965 } 966 967 for (vid = vid_begin; vid <= vid_end; vid++) { 968 /* Remove FID mapping in case of Virtual mode */ 969 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); 970 if (err) { 971 netdev_err(dev, "Failed to unmap FID=%d", vid); 972 return err; 973 } 974 } 975 976 out: 977 /* Changing activity bits only if HW operation succeded */ 978 for (vid = vid_begin; vid <= vid_end; vid++) 979 clear_bit(vid, mlxsw_sp_port->active_vlans); 980 981 return 0; 982 } 983 984 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 985 const struct switchdev_obj_port_vlan *vlan) 986 { 987 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 988 vlan->vid_begin, vlan->vid_end, false); 989 } 990 991 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 992 { 993 u16 vid; 994 995 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 996 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); 997 } 998 999 static int 1000 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 1001 const struct switchdev_obj_port_fdb *fdb) 1002 { 1003 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 1004 u16 lag_vid = 0; 1005 1006 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1007 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1008 } 1009 1010 if (!mlxsw_sp_port->lagged) 1011 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 1012 mlxsw_sp_port->local_port, 1013 fdb->addr, fid, 1014 false, false); 1015 else 1016 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 1017 mlxsw_sp_port->lag_id, 1018 fdb->addr, fid, lag_vid, 1019 false, false); 1020 } 1021 1022 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1023 const struct switchdev_obj_port_mdb *mdb) 1024 { 1025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1026 struct net_device *dev = mlxsw_sp_port->dev; 1027 struct mlxsw_sp_mid *mid; 1028 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 1029 u16 mid_idx; 1030 int err = 0; 1031 1032 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 1033 if (!mid) { 1034 netdev_err(dev, "Unable to remove port from MC DB\n"); 1035 return -EINVAL; 1036 } 1037 1038 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); 1039 if (err) 1040 netdev_err(dev, "Unable to remove port from SMID\n"); 1041 1042 mid_idx = mid->mid; 1043 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { 1044 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, 1045 false); 1046 if (err) 1047 netdev_err(dev, "Unable to remove MC SFD\n"); 1048 } 1049 1050 return err; 1051 } 1052 1053 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1054 const struct switchdev_obj *obj) 1055 { 1056 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1057 int err = 0; 1058 1059 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1060 if (!mlxsw_sp_port) 1061 return -EINVAL; 1062 1063 switch (obj->id) { 1064 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1065 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1066 return 0; 1067 1068 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1069 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1070 break; 1071 case SWITCHDEV_OBJ_ID_PORT_FDB: 1072 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1073 SWITCHDEV_OBJ_PORT_FDB(obj)); 1074 break; 1075 case SWITCHDEV_OBJ_ID_PORT_MDB: 1076 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1077 SWITCHDEV_OBJ_PORT_MDB(obj)); 1078 break; 1079 default: 1080 err = -EOPNOTSUPP; 1081 break; 1082 } 1083 1084 return err; 1085 } 1086 1087 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1088 u16 lag_id) 1089 { 1090 struct mlxsw_sp_port *mlxsw_sp_port; 1091 int i; 1092 1093 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 1094 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1095 if (mlxsw_sp_port) 1096 return mlxsw_sp_port; 1097 } 1098 return NULL; 1099 } 1100 1101 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1102 struct switchdev_obj_port_fdb *fdb, 1103 switchdev_obj_dump_cb_t *cb, 1104 struct net_device *orig_dev) 1105 { 1106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1107 struct mlxsw_sp_port *tmp; 1108 u16 vport_fid = 0; 1109 char *sfd_pl; 1110 char mac[ETH_ALEN]; 1111 u16 fid; 1112 u8 local_port; 1113 u16 lag_id; 1114 u8 num_rec; 1115 int stored_err = 0; 1116 int i; 1117 int err; 1118 1119 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1120 if (!sfd_pl) 1121 return -ENOMEM; 1122 1123 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1124 u16 tmp; 1125 1126 tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); 1127 vport_fid = mlxsw_sp_vfid_to_fid(tmp); 1128 } 1129 1130 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1131 do { 1132 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 1133 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1134 if (err) 1135 goto out; 1136 1137 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1138 1139 /* Even in case of error, we have to run the dump to the end 1140 * so the session in firmware is finished. 1141 */ 1142 if (stored_err) 1143 continue; 1144 1145 for (i = 0; i < num_rec; i++) { 1146 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 1147 case MLXSW_REG_SFD_REC_TYPE_UNICAST: 1148 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1149 &local_port); 1150 if (local_port == mlxsw_sp_port->local_port) { 1151 if (vport_fid && vport_fid == fid) 1152 fdb->vid = 0; 1153 else if (!vport_fid && 1154 !mlxsw_sp_fid_is_vfid(fid)) 1155 fdb->vid = fid; 1156 else 1157 continue; 1158 ether_addr_copy(fdb->addr, mac); 1159 fdb->ndm_state = NUD_REACHABLE; 1160 err = cb(&fdb->obj); 1161 if (err) 1162 stored_err = err; 1163 } 1164 break; 1165 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1166 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1167 mac, &fid, &lag_id); 1168 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1169 if (tmp && tmp->local_port == 1170 mlxsw_sp_port->local_port) { 1171 /* LAG records can only point to LAG 1172 * devices or VLAN devices on top. 1173 */ 1174 if (!netif_is_lag_master(orig_dev) && 1175 !is_vlan_dev(orig_dev)) 1176 continue; 1177 if (vport_fid && vport_fid == fid) 1178 fdb->vid = 0; 1179 else if (!vport_fid && 1180 !mlxsw_sp_fid_is_vfid(fid)) 1181 fdb->vid = fid; 1182 else 1183 continue; 1184 ether_addr_copy(fdb->addr, mac); 1185 fdb->ndm_state = NUD_REACHABLE; 1186 err = cb(&fdb->obj); 1187 if (err) 1188 stored_err = err; 1189 } 1190 break; 1191 } 1192 } 1193 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1194 1195 out: 1196 kfree(sfd_pl); 1197 return stored_err ? stored_err : err; 1198 } 1199 1200 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1201 struct switchdev_obj_port_vlan *vlan, 1202 switchdev_obj_dump_cb_t *cb) 1203 { 1204 u16 vid; 1205 int err = 0; 1206 1207 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1208 vlan->flags = 0; 1209 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1210 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1211 return cb(&vlan->obj); 1212 } 1213 1214 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1215 vlan->flags = 0; 1216 if (vid == mlxsw_sp_port->pvid) 1217 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1218 if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) 1219 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1220 vlan->vid_begin = vid; 1221 vlan->vid_end = vid; 1222 err = cb(&vlan->obj); 1223 if (err) 1224 break; 1225 } 1226 return err; 1227 } 1228 1229 static int mlxsw_sp_port_obj_dump(struct net_device *dev, 1230 struct switchdev_obj *obj, 1231 switchdev_obj_dump_cb_t *cb) 1232 { 1233 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1234 int err = 0; 1235 1236 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1237 if (!mlxsw_sp_port) 1238 return -EINVAL; 1239 1240 switch (obj->id) { 1241 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1242 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 1243 SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 1244 break; 1245 case SWITCHDEV_OBJ_ID_PORT_FDB: 1246 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1247 SWITCHDEV_OBJ_PORT_FDB(obj), cb, 1248 obj->orig_dev); 1249 break; 1250 default: 1251 err = -EOPNOTSUPP; 1252 break; 1253 } 1254 1255 return err; 1256 } 1257 1258 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1259 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1260 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1261 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1262 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1263 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1264 }; 1265 1266 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, 1267 char *mac, u16 vid, 1268 struct net_device *dev) 1269 { 1270 struct switchdev_notifier_fdb_info info; 1271 unsigned long notifier_type; 1272 1273 if (learning_sync) { 1274 info.addr = mac; 1275 info.vid = vid; 1276 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1277 call_switchdev_notifiers(notifier_type, dev, &info.info); 1278 } 1279 } 1280 1281 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1282 char *sfn_pl, int rec_index, 1283 bool adding) 1284 { 1285 struct mlxsw_sp_port *mlxsw_sp_port; 1286 char mac[ETH_ALEN]; 1287 u8 local_port; 1288 u16 vid, fid; 1289 bool do_notification = true; 1290 int err; 1291 1292 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 1293 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1294 if (!mlxsw_sp_port) { 1295 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 1296 goto just_remove; 1297 } 1298 1299 if (mlxsw_sp_fid_is_vfid(fid)) { 1300 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1301 struct mlxsw_sp_port *mlxsw_sp_vport; 1302 1303 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1304 vfid); 1305 if (!mlxsw_sp_vport) { 1306 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1307 goto just_remove; 1308 } 1309 vid = 0; 1310 /* Override the physical port with the vPort. */ 1311 mlxsw_sp_port = mlxsw_sp_vport; 1312 } else { 1313 vid = fid; 1314 } 1315 1316 adding = adding && mlxsw_sp_port->learning; 1317 1318 do_fdb_op: 1319 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1320 adding, true); 1321 if (err) { 1322 if (net_ratelimit()) 1323 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1324 return; 1325 } 1326 1327 if (!do_notification) 1328 return; 1329 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, 1330 adding, mac, vid, mlxsw_sp_port->dev); 1331 return; 1332 1333 just_remove: 1334 adding = false; 1335 do_notification = false; 1336 goto do_fdb_op; 1337 } 1338 1339 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 1340 char *sfn_pl, int rec_index, 1341 bool adding) 1342 { 1343 struct mlxsw_sp_port *mlxsw_sp_port; 1344 struct net_device *dev; 1345 char mac[ETH_ALEN]; 1346 u16 lag_vid = 0; 1347 u16 lag_id; 1348 u16 vid, fid; 1349 bool do_notification = true; 1350 int err; 1351 1352 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 1353 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1354 if (!mlxsw_sp_port) { 1355 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 1356 goto just_remove; 1357 } 1358 1359 if (mlxsw_sp_fid_is_vfid(fid)) { 1360 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 1361 struct mlxsw_sp_port *mlxsw_sp_vport; 1362 1363 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, 1364 vfid); 1365 if (!mlxsw_sp_vport) { 1366 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1367 goto just_remove; 1368 } 1369 1370 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1371 dev = mlxsw_sp_vport->dev; 1372 vid = 0; 1373 /* Override the physical port with the vPort. */ 1374 mlxsw_sp_port = mlxsw_sp_vport; 1375 } else { 1376 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; 1377 vid = fid; 1378 } 1379 1380 adding = adding && mlxsw_sp_port->learning; 1381 1382 do_fdb_op: 1383 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1384 adding, true); 1385 if (err) { 1386 if (net_ratelimit()) 1387 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1388 return; 1389 } 1390 1391 if (!do_notification) 1392 return; 1393 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, 1394 vid, dev); 1395 return; 1396 1397 just_remove: 1398 adding = false; 1399 do_notification = false; 1400 goto do_fdb_op; 1401 } 1402 1403 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 1404 char *sfn_pl, int rec_index) 1405 { 1406 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 1407 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 1408 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1409 rec_index, true); 1410 break; 1411 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 1412 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1413 rec_index, false); 1414 break; 1415 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 1416 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1417 rec_index, true); 1418 break; 1419 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 1420 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1421 rec_index, false); 1422 break; 1423 } 1424 } 1425 1426 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1427 { 1428 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, 1429 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1430 } 1431 1432 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 1433 { 1434 struct mlxsw_sp *mlxsw_sp; 1435 char *sfn_pl; 1436 u8 num_rec; 1437 int i; 1438 int err; 1439 1440 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 1441 if (!sfn_pl) 1442 return; 1443 1444 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1445 1446 rtnl_lock(); 1447 do { 1448 mlxsw_reg_sfn_pack(sfn_pl); 1449 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1450 if (err) { 1451 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 1452 break; 1453 } 1454 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 1455 for (i = 0; i < num_rec; i++) 1456 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1457 1458 } while (num_rec); 1459 rtnl_unlock(); 1460 1461 kfree(sfn_pl); 1462 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1463 } 1464 1465 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 1466 { 1467 int err; 1468 1469 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 1470 if (err) { 1471 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1472 return err; 1473 } 1474 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1475 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1476 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1477 return 0; 1478 } 1479 1480 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 1481 { 1482 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1483 } 1484 1485 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) 1486 { 1487 u16 fid; 1488 1489 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) 1490 mlxsw_sp_fid_destroy(mlxsw_sp, fid); 1491 } 1492 1493 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1494 { 1495 return mlxsw_sp_fdb_init(mlxsw_sp); 1496 } 1497 1498 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1499 { 1500 mlxsw_sp_fdb_fini(mlxsw_sp); 1501 mlxsw_sp_fids_fini(mlxsw_sp); 1502 } 1503 1504 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) 1505 { 1506 struct net_device *dev = mlxsw_sp_port->dev; 1507 int err; 1508 1509 /* Allow only untagged packets to ingress and tag them internally 1510 * with VID 1. 1511 */ 1512 mlxsw_sp_port->pvid = 1; 1513 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, 1514 true); 1515 if (err) { 1516 netdev_err(dev, "Unable to init VLANs\n"); 1517 return err; 1518 } 1519 1520 /* Add implicit VLAN interface in the device, so that untagged 1521 * packets will be classified to the default vFID. 1522 */ 1523 err = mlxsw_sp_port_add_vid(dev, 0, 1); 1524 if (err) 1525 netdev_err(dev, "Failed to configure default vFID\n"); 1526 1527 return err; 1528 } 1529 1530 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1531 { 1532 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1533 } 1534 1535 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1536 { 1537 } 1538