1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <linux/netdevice.h> 40 #include <linux/etherdevice.h> 41 #include <linux/slab.h> 42 #include <linux/device.h> 43 #include <linux/skbuff.h> 44 #include <linux/if_vlan.h> 45 #include <linux/if_bridge.h> 46 #include <linux/workqueue.h> 47 #include <linux/jiffies.h> 48 #include <linux/rtnetlink.h> 49 #include <net/switchdev.h> 50 51 #include "spectrum.h" 52 #include "core.h" 53 #include "reg.h" 54 55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, 56 u16 vid) 57 { 58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); 59 u16 fid = vid; 60 61 fid = f ? f->fid : fid; 62 63 if (!fid) 64 fid = mlxsw_sp_port->pvid; 65 66 return fid; 67 } 68 69 static struct mlxsw_sp_port * 70 mlxsw_sp_port_orig_get(struct net_device *dev, 71 struct mlxsw_sp_port *mlxsw_sp_port) 72 { 73 struct mlxsw_sp_port *mlxsw_sp_vport; 74 u16 vid; 75 76 if (!is_vlan_dev(dev)) 77 return mlxsw_sp_port; 78 79 vid = vlan_dev_vlan_id(dev); 80 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 81 WARN_ON(!mlxsw_sp_vport); 82 83 return mlxsw_sp_vport; 84 } 85 86 static int mlxsw_sp_port_attr_get(struct net_device *dev, 87 struct switchdev_attr *attr) 88 { 89 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 90 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 91 92 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 93 if (!mlxsw_sp_port) 94 return -EINVAL; 95 96 switch (attr->id) { 97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 98 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); 99 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, 100 attr->u.ppid.id_len); 101 break; 102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 103 attr->u.brport_flags = 104 (mlxsw_sp_port->learning ? BR_LEARNING : 0) | 105 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | 106 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); 107 break; 108 default: 109 return -EOPNOTSUPP; 110 } 111 112 return 0; 113 } 114 115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 116 u8 state) 117 { 118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 119 enum mlxsw_reg_spms_state spms_state; 120 char *spms_pl; 121 u16 vid; 122 int err; 123 124 switch (state) { 125 case BR_STATE_FORWARDING: 126 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 127 break; 128 case BR_STATE_LEARNING: 129 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 130 break; 131 case BR_STATE_LISTENING: /* fall-through */ 132 case BR_STATE_DISABLED: /* fall-through */ 133 case BR_STATE_BLOCKING: 134 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 135 break; 136 default: 137 BUG(); 138 } 139 140 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 141 if (!spms_pl) 142 return -ENOMEM; 143 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 144 145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 146 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 148 } else { 149 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 151 } 152 153 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 154 kfree(spms_pl); 155 return err; 156 } 157 158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 159 struct switchdev_trans *trans, 160 u8 state) 161 { 162 if (switchdev_trans_ph_prepare(trans)) 163 return 0; 164 165 mlxsw_sp_port->stp_state = state; 166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 167 } 168 169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 170 u16 idx_begin, u16 idx_end, bool set, 171 bool only_uc) 172 { 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 174 u16 local_port = mlxsw_sp_port->local_port; 175 enum mlxsw_flood_table_type table_type; 176 u16 range = idx_end - idx_begin + 1; 177 char *sftr_pl; 178 int err; 179 180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 182 else 183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 184 185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 186 if (!sftr_pl) 187 return -ENOMEM; 188 189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 190 table_type, range, local_port, set); 191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 192 if (err) 193 goto buffer_out; 194 195 /* Flooding control allows one to decide whether a given port will 196 * flood unicast traffic for which there is no FDB entry. 197 */ 198 if (only_uc) 199 goto buffer_out; 200 201 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 202 table_type, range, local_port, set); 203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 204 if (err) 205 goto err_flood_bm_set; 206 else 207 goto buffer_out; 208 209 err_flood_bm_set: 210 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 211 table_type, range, local_port, !set); 212 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 213 buffer_out: 214 kfree(sftr_pl); 215 return err; 216 } 217 218 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 219 bool set) 220 { 221 struct net_device *dev = mlxsw_sp_port->dev; 222 u16 vid, last_visited_vid; 223 int err; 224 225 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 226 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; 227 u16 vfid = mlxsw_sp_fid_to_vfid(fid); 228 229 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, 230 set, true); 231 } 232 233 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 234 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, 235 true); 236 if (err) { 237 last_visited_vid = vid; 238 goto err_port_flood_set; 239 } 240 } 241 242 return 0; 243 244 err_port_flood_set: 245 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 246 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); 247 netdev_err(dev, "Failed to configure unicast flooding\n"); 248 return err; 249 } 250 251 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 252 bool set) 253 { 254 u16 vfid; 255 256 /* In case of vFIDs, index into the flooding table is relative to 257 * the start of the vFIDs range. 258 */ 259 vfid = mlxsw_sp_fid_to_vfid(fid); 260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 261 false); 262 } 263 264 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 265 struct switchdev_trans *trans, 266 unsigned long brport_flags) 267 { 268 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; 269 bool set; 270 int err; 271 272 if (!mlxsw_sp_port->bridged) 273 return -EINVAL; 274 275 if (switchdev_trans_ph_prepare(trans)) 276 return 0; 277 278 if ((uc_flood ^ brport_flags) & BR_FLOOD) { 279 set = mlxsw_sp_port->uc_flood ? false : true; 280 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); 281 if (err) 282 return err; 283 } 284 285 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; 286 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; 287 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; 288 289 return 0; 290 } 291 292 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) 293 { 294 char sfdat_pl[MLXSW_REG_SFDAT_LEN]; 295 int err; 296 297 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); 298 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); 299 if (err) 300 return err; 301 mlxsw_sp->ageing_time = ageing_time; 302 return 0; 303 } 304 305 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 struct switchdev_trans *trans, 307 unsigned long ageing_clock_t) 308 { 309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 310 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 311 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 312 313 if (switchdev_trans_ph_prepare(trans)) { 314 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || 315 ageing_time > MLXSW_SP_MAX_AGEING_TIME) 316 return -ERANGE; 317 else 318 return 0; 319 } 320 321 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); 322 } 323 324 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 325 struct switchdev_trans *trans, 326 struct net_device *orig_dev, 327 bool vlan_enabled) 328 { 329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 330 331 /* SWITCHDEV_TRANS_PREPARE phase */ 332 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { 333 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); 334 return -EINVAL; 335 } 336 337 return 0; 338 } 339 340 static int mlxsw_sp_port_attr_set(struct net_device *dev, 341 const struct switchdev_attr *attr, 342 struct switchdev_trans *trans) 343 { 344 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 345 int err = 0; 346 347 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); 348 if (!mlxsw_sp_port) 349 return -EINVAL; 350 351 switch (attr->id) { 352 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 353 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, 354 attr->u.stp_state); 355 break; 356 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 357 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, 358 attr->u.brport_flags); 359 break; 360 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 361 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, 362 attr->u.ageing_time); 363 break; 364 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 365 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, 366 attr->orig_dev, 367 attr->u.vlan_filtering); 368 break; 369 default: 370 err = -EOPNOTSUPP; 371 break; 372 } 373 374 return err; 375 } 376 377 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 378 { 379 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 380 381 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid); 382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 383 } 384 385 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid) 386 { 387 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID; 388 char svfa_pl[MLXSW_REG_SVFA_LEN]; 389 390 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid); 391 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 392 } 393 394 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) 395 { 396 struct mlxsw_sp_fid *f; 397 398 f = kzalloc(sizeof(*f), GFP_KERNEL); 399 if (!f) 400 return NULL; 401 402 f->fid = fid; 403 404 return f; 405 } 406 407 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 408 { 409 struct mlxsw_sp_fid *f; 410 int err; 411 412 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true); 413 if (err) 414 return ERR_PTR(err); 415 416 /* Although all the ports member in the FID might be using a 417 * {Port, VID} to FID mapping, we create a global VID-to-FID 418 * mapping. This allows a port to transition to VLAN mode, 419 * knowing the global mapping exists. 420 */ 421 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true); 422 if (err) 423 goto err_fid_map; 424 425 f = mlxsw_sp_fid_alloc(fid); 426 if (!f) { 427 err = -ENOMEM; 428 goto err_allocate_fid; 429 } 430 431 list_add(&f->list, &mlxsw_sp->fids); 432 433 return f; 434 435 err_allocate_fid: 436 mlxsw_sp_fid_map(mlxsw_sp, fid, false); 437 err_fid_map: 438 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 439 return ERR_PTR(err); 440 } 441 442 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) 443 { 444 u16 fid = f->fid; 445 446 list_del(&f->list); 447 448 if (f->r) 449 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 450 451 kfree(f); 452 453 mlxsw_sp_fid_map(mlxsw_sp, fid, false); 454 455 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 456 } 457 458 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, 459 u16 fid) 460 { 461 struct mlxsw_sp_fid *f; 462 463 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 464 if (!f) { 465 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); 466 if (IS_ERR(f)) 467 return PTR_ERR(f); 468 } 469 470 f->ref_count++; 471 472 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid); 473 474 return 0; 475 } 476 477 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, 478 u16 fid) 479 { 480 struct mlxsw_sp_fid *f; 481 482 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 483 if (WARN_ON(!f)) 484 return; 485 486 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid); 487 488 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid); 489 490 if (--f->ref_count == 0) 491 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f); 492 } 493 494 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, 495 bool valid) 496 { 497 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 498 499 /* If port doesn't have vPorts, then it can use the global 500 * VID-to-FID mapping. 501 */ 502 if (list_empty(&mlxsw_sp_port->vports_list)) 503 return 0; 504 505 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid); 506 } 507 508 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, 509 u16 fid_begin, u16 fid_end) 510 { 511 int fid, err; 512 513 for (fid = fid_begin; fid <= fid_end; fid++) { 514 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid); 515 if (err) 516 goto err_port_fid_join; 517 } 518 519 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, 520 true, false); 521 if (err) 522 goto err_port_flood_set; 523 524 for (fid = fid_begin; fid <= fid_end; fid++) { 525 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true); 526 if (err) 527 goto err_port_fid_map; 528 } 529 530 return 0; 531 532 err_port_fid_map: 533 for (fid--; fid >= fid_begin; fid--) 534 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); 535 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, 536 false); 537 err_port_flood_set: 538 fid = fid_end; 539 err_port_fid_join: 540 for (fid--; fid >= fid_begin; fid--) 541 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); 542 return err; 543 } 544 545 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, 546 u16 fid_begin, u16 fid_end) 547 { 548 int fid; 549 550 for (fid = fid_begin; fid <= fid_end; fid++) 551 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); 552 553 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, 554 false); 555 556 for (fid = fid_begin; fid <= fid_end; fid++) 557 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); 558 } 559 560 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 561 u16 vid) 562 { 563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 564 char spvid_pl[MLXSW_REG_SPVID_LEN]; 565 566 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 567 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 568 } 569 570 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 571 bool allow) 572 { 573 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 574 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 575 576 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 577 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 578 } 579 580 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 581 { 582 struct net_device *dev = mlxsw_sp_port->dev; 583 int err; 584 585 if (!vid) { 586 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 587 if (err) { 588 netdev_err(dev, "Failed to disallow untagged traffic\n"); 589 return err; 590 } 591 } else { 592 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 593 if (err) { 594 netdev_err(dev, "Failed to set PVID\n"); 595 return err; 596 } 597 598 /* Only allow if not already allowed. */ 599 if (!mlxsw_sp_port->pvid) { 600 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, 601 true); 602 if (err) { 603 netdev_err(dev, "Failed to allow untagged traffic\n"); 604 goto err_port_allow_untagged_set; 605 } 606 } 607 } 608 609 mlxsw_sp_port->pvid = vid; 610 return 0; 611 612 err_port_allow_untagged_set: 613 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 614 return err; 615 } 616 617 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 618 u16 vid_begin, u16 vid_end, bool is_member, 619 bool untagged) 620 { 621 u16 vid, vid_e; 622 int err; 623 624 for (vid = vid_begin; vid <= vid_end; 625 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 626 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 627 vid_end); 628 629 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 630 is_member, untagged); 631 if (err) 632 return err; 633 } 634 635 return 0; 636 } 637 638 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 639 u16 vid_begin, u16 vid_end, 640 bool flag_untagged, bool flag_pvid) 641 { 642 struct net_device *dev = mlxsw_sp_port->dev; 643 u16 vid, old_pvid; 644 int err; 645 646 if (!mlxsw_sp_port->bridged) 647 return -EINVAL; 648 649 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end); 650 if (err) { 651 netdev_err(dev, "Failed to join FIDs\n"); 652 return err; 653 } 654 655 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 656 true, flag_untagged); 657 if (err) { 658 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, 659 vid_end); 660 goto err_port_vlans_set; 661 } 662 663 old_pvid = mlxsw_sp_port->pvid; 664 if (flag_pvid && old_pvid != vid_begin) { 665 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); 666 if (err) { 667 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 668 goto err_port_pvid_set; 669 } 670 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) { 671 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 672 if (err) { 673 netdev_err(dev, "Unable to del PVID\n"); 674 goto err_port_pvid_set; 675 } 676 } 677 678 /* Changing activity bits only if HW operation succeded */ 679 for (vid = vid_begin; vid <= vid_end; vid++) { 680 set_bit(vid, mlxsw_sp_port->active_vlans); 681 if (flag_untagged) 682 set_bit(vid, mlxsw_sp_port->untagged_vlans); 683 else 684 clear_bit(vid, mlxsw_sp_port->untagged_vlans); 685 } 686 687 /* STP state change must be done after we set active VLANs */ 688 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, 689 mlxsw_sp_port->stp_state); 690 if (err) { 691 netdev_err(dev, "Failed to set STP state\n"); 692 goto err_port_stp_state_set; 693 } 694 695 return 0; 696 697 err_port_stp_state_set: 698 for (vid = vid_begin; vid <= vid_end; vid++) 699 clear_bit(vid, mlxsw_sp_port->active_vlans); 700 if (old_pvid != mlxsw_sp_port->pvid) 701 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); 702 err_port_pvid_set: 703 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, 704 false); 705 err_port_vlans_set: 706 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 707 return err; 708 } 709 710 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, 711 const struct switchdev_obj_port_vlan *vlan, 712 struct switchdev_trans *trans) 713 { 714 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 715 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 716 717 if (switchdev_trans_ph_prepare(trans)) 718 return 0; 719 720 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, 721 vlan->vid_begin, vlan->vid_end, 722 flag_untagged, flag_pvid); 723 } 724 725 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 726 { 727 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 728 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 729 } 730 731 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 732 { 733 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : 734 MLXSW_REG_SFD_OP_WRITE_REMOVE; 735 } 736 737 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 738 const char *mac, u16 fid, bool adding, 739 enum mlxsw_reg_sfd_rec_action action, 740 bool dynamic) 741 { 742 char *sfd_pl; 743 int err; 744 745 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 746 if (!sfd_pl) 747 return -ENOMEM; 748 749 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 750 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 751 mac, fid, action, local_port); 752 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 753 kfree(sfd_pl); 754 755 return err; 756 } 757 758 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 759 const char *mac, u16 fid, bool adding, 760 bool dynamic) 761 { 762 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 763 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 764 } 765 766 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 767 bool adding) 768 { 769 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 770 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 771 false); 772 } 773 774 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 775 const char *mac, u16 fid, u16 lag_vid, 776 bool adding, bool dynamic) 777 { 778 char *sfd_pl; 779 int err; 780 781 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 782 if (!sfd_pl) 783 return -ENOMEM; 784 785 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 786 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 787 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 788 lag_vid, lag_id); 789 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 790 kfree(sfd_pl); 791 792 return err; 793 } 794 795 static int 796 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, 797 const struct switchdev_obj_port_fdb *fdb, 798 struct switchdev_trans *trans) 799 { 800 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 801 u16 lag_vid = 0; 802 803 if (switchdev_trans_ph_prepare(trans)) 804 return 0; 805 806 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 807 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 808 } 809 810 if (!mlxsw_sp_port->lagged) 811 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 812 mlxsw_sp_port->local_port, 813 fdb->addr, fid, true, false); 814 else 815 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 816 mlxsw_sp_port->lag_id, 817 fdb->addr, fid, lag_vid, 818 true, false); 819 } 820 821 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, 822 u16 fid, u16 mid, bool adding) 823 { 824 char *sfd_pl; 825 int err; 826 827 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 828 if (!sfd_pl) 829 return -ENOMEM; 830 831 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 832 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 833 MLXSW_REG_SFD_REC_ACTION_NOP, mid); 834 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 835 kfree(sfd_pl); 836 return err; 837 } 838 839 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, 840 bool add, bool clear_all_ports) 841 { 842 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 843 char *smid_pl; 844 int err, i; 845 846 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); 847 if (!smid_pl) 848 return -ENOMEM; 849 850 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); 851 if (clear_all_ports) { 852 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 853 if (mlxsw_sp->ports[i]) 854 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); 855 } 856 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); 857 kfree(smid_pl); 858 return err; 859 } 860 861 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 862 const unsigned char *addr, 863 u16 vid) 864 { 865 struct mlxsw_sp_mid *mid; 866 867 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 868 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 869 return mid; 870 } 871 return NULL; 872 } 873 874 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 875 const unsigned char *addr, 876 u16 vid) 877 { 878 struct mlxsw_sp_mid *mid; 879 u16 mid_idx; 880 881 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, 882 MLXSW_SP_MID_MAX); 883 if (mid_idx == MLXSW_SP_MID_MAX) 884 return NULL; 885 886 mid = kzalloc(sizeof(*mid), GFP_KERNEL); 887 if (!mid) 888 return NULL; 889 890 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 891 ether_addr_copy(mid->addr, addr); 892 mid->vid = vid; 893 mid->mid = mid_idx; 894 mid->ref_count = 0; 895 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 896 897 return mid; 898 } 899 900 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, 901 struct mlxsw_sp_mid *mid) 902 { 903 if (--mid->ref_count == 0) { 904 list_del(&mid->list); 905 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); 906 kfree(mid); 907 return 1; 908 } 909 return 0; 910 } 911 912 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, 913 const struct switchdev_obj_port_mdb *mdb, 914 struct switchdev_trans *trans) 915 { 916 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 917 struct net_device *dev = mlxsw_sp_port->dev; 918 struct mlxsw_sp_mid *mid; 919 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 920 int err = 0; 921 922 if (switchdev_trans_ph_prepare(trans)) 923 return 0; 924 925 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 926 if (!mid) { 927 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 928 if (!mid) { 929 netdev_err(dev, "Unable to allocate MC group\n"); 930 return -ENOMEM; 931 } 932 } 933 mid->ref_count++; 934 935 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, 936 mid->ref_count == 1); 937 if (err) { 938 netdev_err(dev, "Unable to set SMID\n"); 939 goto err_out; 940 } 941 942 if (mid->ref_count == 1) { 943 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, 944 true); 945 if (err) { 946 netdev_err(dev, "Unable to set MC SFD\n"); 947 goto err_out; 948 } 949 } 950 951 return 0; 952 953 err_out: 954 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); 955 return err; 956 } 957 958 static int mlxsw_sp_port_obj_add(struct net_device *dev, 959 const struct switchdev_obj *obj, 960 struct switchdev_trans *trans) 961 { 962 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 963 int err = 0; 964 965 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 966 if (!mlxsw_sp_port) 967 return -EINVAL; 968 969 switch (obj->id) { 970 case SWITCHDEV_OBJ_ID_PORT_VLAN: 971 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 972 return 0; 973 974 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 975 SWITCHDEV_OBJ_PORT_VLAN(obj), 976 trans); 977 break; 978 case SWITCHDEV_OBJ_ID_IPV4_FIB: 979 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port, 980 SWITCHDEV_OBJ_IPV4_FIB(obj), 981 trans); 982 break; 983 case SWITCHDEV_OBJ_ID_PORT_FDB: 984 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 985 SWITCHDEV_OBJ_PORT_FDB(obj), 986 trans); 987 break; 988 case SWITCHDEV_OBJ_ID_PORT_MDB: 989 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 990 SWITCHDEV_OBJ_PORT_MDB(obj), 991 trans); 992 break; 993 default: 994 err = -EOPNOTSUPP; 995 break; 996 } 997 998 return err; 999 } 1000 1001 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1002 u16 vid_begin, u16 vid_end) 1003 { 1004 struct net_device *dev = mlxsw_sp_port->dev; 1005 u16 vid, pvid; 1006 int err; 1007 1008 if (!mlxsw_sp_port->bridged) 1009 return -EINVAL; 1010 1011 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 1012 false, false); 1013 if (err) { 1014 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, 1015 vid_end); 1016 return err; 1017 } 1018 1019 pvid = mlxsw_sp_port->pvid; 1020 if (pvid >= vid_begin && pvid <= vid_end) { 1021 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 1022 if (err) { 1023 netdev_err(dev, "Unable to del PVID %d\n", pvid); 1024 return err; 1025 } 1026 } 1027 1028 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 1029 1030 /* Changing activity bits only if HW operation succeded */ 1031 for (vid = vid_begin; vid <= vid_end; vid++) 1032 clear_bit(vid, mlxsw_sp_port->active_vlans); 1033 1034 return 0; 1035 } 1036 1037 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1038 const struct switchdev_obj_port_vlan *vlan) 1039 { 1040 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, 1041 vlan->vid_end); 1042 } 1043 1044 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 1045 { 1046 u16 vid; 1047 1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 1049 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); 1050 } 1051 1052 static int 1053 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, 1054 const struct switchdev_obj_port_fdb *fdb) 1055 { 1056 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); 1057 u16 lag_vid = 0; 1058 1059 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1060 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1061 } 1062 1063 if (!mlxsw_sp_port->lagged) 1064 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, 1065 mlxsw_sp_port->local_port, 1066 fdb->addr, fid, 1067 false, false); 1068 else 1069 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, 1070 mlxsw_sp_port->lag_id, 1071 fdb->addr, fid, lag_vid, 1072 false, false); 1073 } 1074 1075 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, 1076 const struct switchdev_obj_port_mdb *mdb) 1077 { 1078 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1079 struct net_device *dev = mlxsw_sp_port->dev; 1080 struct mlxsw_sp_mid *mid; 1081 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); 1082 u16 mid_idx; 1083 int err = 0; 1084 1085 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 1086 if (!mid) { 1087 netdev_err(dev, "Unable to remove port from MC DB\n"); 1088 return -EINVAL; 1089 } 1090 1091 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); 1092 if (err) 1093 netdev_err(dev, "Unable to remove port from SMID\n"); 1094 1095 mid_idx = mid->mid; 1096 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { 1097 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, 1098 false); 1099 if (err) 1100 netdev_err(dev, "Unable to remove MC SFD\n"); 1101 } 1102 1103 return err; 1104 } 1105 1106 static int mlxsw_sp_port_obj_del(struct net_device *dev, 1107 const struct switchdev_obj *obj) 1108 { 1109 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1110 int err = 0; 1111 1112 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1113 if (!mlxsw_sp_port) 1114 return -EINVAL; 1115 1116 switch (obj->id) { 1117 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1118 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) 1119 return 0; 1120 1121 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1122 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1123 break; 1124 case SWITCHDEV_OBJ_ID_IPV4_FIB: 1125 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port, 1126 SWITCHDEV_OBJ_IPV4_FIB(obj)); 1127 break; 1128 case SWITCHDEV_OBJ_ID_PORT_FDB: 1129 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1130 SWITCHDEV_OBJ_PORT_FDB(obj)); 1131 break; 1132 case SWITCHDEV_OBJ_ID_PORT_MDB: 1133 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, 1134 SWITCHDEV_OBJ_PORT_MDB(obj)); 1135 break; 1136 default: 1137 err = -EOPNOTSUPP; 1138 break; 1139 } 1140 1141 return err; 1142 } 1143 1144 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, 1145 u16 lag_id) 1146 { 1147 struct mlxsw_sp_port *mlxsw_sp_port; 1148 int i; 1149 1150 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 1151 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 1152 if (mlxsw_sp_port) 1153 return mlxsw_sp_port; 1154 } 1155 return NULL; 1156 } 1157 1158 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1159 struct switchdev_obj_port_fdb *fdb, 1160 switchdev_obj_dump_cb_t *cb, 1161 struct net_device *orig_dev) 1162 { 1163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1164 struct mlxsw_sp_port *tmp; 1165 struct mlxsw_sp_fid *f; 1166 u16 vport_fid; 1167 char *sfd_pl; 1168 char mac[ETH_ALEN]; 1169 u16 fid; 1170 u8 local_port; 1171 u16 lag_id; 1172 u8 num_rec; 1173 int stored_err = 0; 1174 int i; 1175 int err; 1176 1177 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1178 if (!sfd_pl) 1179 return -ENOMEM; 1180 1181 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); 1182 vport_fid = f ? f->fid : 0; 1183 1184 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); 1185 do { 1186 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); 1187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1188 if (err) 1189 goto out; 1190 1191 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1192 1193 /* Even in case of error, we have to run the dump to the end 1194 * so the session in firmware is finished. 1195 */ 1196 if (stored_err) 1197 continue; 1198 1199 for (i = 0; i < num_rec; i++) { 1200 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { 1201 case MLXSW_REG_SFD_REC_TYPE_UNICAST: 1202 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, 1203 &local_port); 1204 if (local_port == mlxsw_sp_port->local_port) { 1205 if (vport_fid && vport_fid == fid) 1206 fdb->vid = 0; 1207 else if (!vport_fid && 1208 !mlxsw_sp_fid_is_vfid(fid)) 1209 fdb->vid = fid; 1210 else 1211 continue; 1212 ether_addr_copy(fdb->addr, mac); 1213 fdb->ndm_state = NUD_REACHABLE; 1214 err = cb(&fdb->obj); 1215 if (err) 1216 stored_err = err; 1217 } 1218 break; 1219 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: 1220 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, 1221 mac, &fid, &lag_id); 1222 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1223 if (tmp && tmp->local_port == 1224 mlxsw_sp_port->local_port) { 1225 /* LAG records can only point to LAG 1226 * devices or VLAN devices on top. 1227 */ 1228 if (!netif_is_lag_master(orig_dev) && 1229 !is_vlan_dev(orig_dev)) 1230 continue; 1231 if (vport_fid && vport_fid == fid) 1232 fdb->vid = 0; 1233 else if (!vport_fid && 1234 !mlxsw_sp_fid_is_vfid(fid)) 1235 fdb->vid = fid; 1236 else 1237 continue; 1238 ether_addr_copy(fdb->addr, mac); 1239 fdb->ndm_state = NUD_REACHABLE; 1240 err = cb(&fdb->obj); 1241 if (err) 1242 stored_err = err; 1243 } 1244 break; 1245 } 1246 } 1247 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); 1248 1249 out: 1250 kfree(sfd_pl); 1251 return stored_err ? stored_err : err; 1252 } 1253 1254 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, 1255 struct switchdev_obj_port_vlan *vlan, 1256 switchdev_obj_dump_cb_t *cb) 1257 { 1258 u16 vid; 1259 int err = 0; 1260 1261 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 1262 vlan->flags = 0; 1263 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1264 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 1265 return cb(&vlan->obj); 1266 } 1267 1268 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1269 vlan->flags = 0; 1270 if (vid == mlxsw_sp_port->pvid) 1271 vlan->flags |= BRIDGE_VLAN_INFO_PVID; 1272 if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) 1273 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1274 vlan->vid_begin = vid; 1275 vlan->vid_end = vid; 1276 err = cb(&vlan->obj); 1277 if (err) 1278 break; 1279 } 1280 return err; 1281 } 1282 1283 static int mlxsw_sp_port_obj_dump(struct net_device *dev, 1284 struct switchdev_obj *obj, 1285 switchdev_obj_dump_cb_t *cb) 1286 { 1287 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1288 int err = 0; 1289 1290 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); 1291 if (!mlxsw_sp_port) 1292 return -EINVAL; 1293 1294 switch (obj->id) { 1295 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1296 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, 1297 SWITCHDEV_OBJ_PORT_VLAN(obj), cb); 1298 break; 1299 case SWITCHDEV_OBJ_ID_PORT_FDB: 1300 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, 1301 SWITCHDEV_OBJ_PORT_FDB(obj), cb, 1302 obj->orig_dev); 1303 break; 1304 default: 1305 err = -EOPNOTSUPP; 1306 break; 1307 } 1308 1309 return err; 1310 } 1311 1312 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { 1313 .switchdev_port_attr_get = mlxsw_sp_port_attr_get, 1314 .switchdev_port_attr_set = mlxsw_sp_port_attr_set, 1315 .switchdev_port_obj_add = mlxsw_sp_port_obj_add, 1316 .switchdev_port_obj_del = mlxsw_sp_port_obj_del, 1317 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, 1318 }; 1319 1320 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, 1321 char *mac, u16 vid, 1322 struct net_device *dev) 1323 { 1324 struct switchdev_notifier_fdb_info info; 1325 unsigned long notifier_type; 1326 1327 if (learning_sync) { 1328 info.addr = mac; 1329 info.vid = vid; 1330 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; 1331 call_switchdev_notifiers(notifier_type, dev, &info.info); 1332 } 1333 } 1334 1335 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, 1336 char *sfn_pl, int rec_index, 1337 bool adding) 1338 { 1339 struct mlxsw_sp_port *mlxsw_sp_port; 1340 char mac[ETH_ALEN]; 1341 u8 local_port; 1342 u16 vid, fid; 1343 bool do_notification = true; 1344 int err; 1345 1346 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); 1347 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1348 if (!mlxsw_sp_port) { 1349 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); 1350 goto just_remove; 1351 } 1352 1353 if (mlxsw_sp_fid_is_vfid(fid)) { 1354 struct mlxsw_sp_port *mlxsw_sp_vport; 1355 1356 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, 1357 fid); 1358 if (!mlxsw_sp_vport) { 1359 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1360 goto just_remove; 1361 } 1362 vid = 0; 1363 /* Override the physical port with the vPort. */ 1364 mlxsw_sp_port = mlxsw_sp_vport; 1365 } else { 1366 vid = fid; 1367 } 1368 1369 adding = adding && mlxsw_sp_port->learning; 1370 1371 do_fdb_op: 1372 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1373 adding, true); 1374 if (err) { 1375 if (net_ratelimit()) 1376 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1377 return; 1378 } 1379 1380 if (!do_notification) 1381 return; 1382 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, 1383 adding, mac, vid, mlxsw_sp_port->dev); 1384 return; 1385 1386 just_remove: 1387 adding = false; 1388 do_notification = false; 1389 goto do_fdb_op; 1390 } 1391 1392 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, 1393 char *sfn_pl, int rec_index, 1394 bool adding) 1395 { 1396 struct mlxsw_sp_port *mlxsw_sp_port; 1397 struct net_device *dev; 1398 char mac[ETH_ALEN]; 1399 u16 lag_vid = 0; 1400 u16 lag_id; 1401 u16 vid, fid; 1402 bool do_notification = true; 1403 int err; 1404 1405 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); 1406 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); 1407 if (!mlxsw_sp_port) { 1408 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); 1409 goto just_remove; 1410 } 1411 1412 if (mlxsw_sp_fid_is_vfid(fid)) { 1413 struct mlxsw_sp_port *mlxsw_sp_vport; 1414 1415 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, 1416 fid); 1417 if (!mlxsw_sp_vport) { 1418 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); 1419 goto just_remove; 1420 } 1421 1422 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1423 dev = mlxsw_sp_vport->dev; 1424 vid = 0; 1425 /* Override the physical port with the vPort. */ 1426 mlxsw_sp_port = mlxsw_sp_vport; 1427 } else { 1428 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; 1429 vid = fid; 1430 } 1431 1432 adding = adding && mlxsw_sp_port->learning; 1433 1434 do_fdb_op: 1435 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1436 adding, true); 1437 if (err) { 1438 if (net_ratelimit()) 1439 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); 1440 return; 1441 } 1442 1443 if (!do_notification) 1444 return; 1445 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, 1446 vid, dev); 1447 return; 1448 1449 just_remove: 1450 adding = false; 1451 do_notification = false; 1452 goto do_fdb_op; 1453 } 1454 1455 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, 1456 char *sfn_pl, int rec_index) 1457 { 1458 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { 1459 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: 1460 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1461 rec_index, true); 1462 break; 1463 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: 1464 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, 1465 rec_index, false); 1466 break; 1467 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: 1468 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1469 rec_index, true); 1470 break; 1471 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: 1472 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, 1473 rec_index, false); 1474 break; 1475 } 1476 } 1477 1478 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) 1479 { 1480 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw, 1481 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); 1482 } 1483 1484 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) 1485 { 1486 struct mlxsw_sp *mlxsw_sp; 1487 char *sfn_pl; 1488 u8 num_rec; 1489 int i; 1490 int err; 1491 1492 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); 1493 if (!sfn_pl) 1494 return; 1495 1496 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); 1497 1498 rtnl_lock(); 1499 do { 1500 mlxsw_reg_sfn_pack(sfn_pl); 1501 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); 1502 if (err) { 1503 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); 1504 break; 1505 } 1506 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); 1507 for (i = 0; i < num_rec; i++) 1508 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); 1509 1510 } while (num_rec); 1511 rtnl_unlock(); 1512 1513 kfree(sfn_pl); 1514 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1515 } 1516 1517 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 1518 { 1519 int err; 1520 1521 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); 1522 if (err) { 1523 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); 1524 return err; 1525 } 1526 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); 1527 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; 1528 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); 1529 return 0; 1530 } 1531 1532 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) 1533 { 1534 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); 1535 } 1536 1537 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1538 { 1539 return mlxsw_sp_fdb_init(mlxsw_sp); 1540 } 1541 1542 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1543 { 1544 mlxsw_sp_fdb_fini(mlxsw_sp); 1545 } 1546 1547 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1548 { 1549 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1550 } 1551 1552 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1553 { 1554 } 1555