1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <linux/notifier.h> 53 #include <linux/dcbnl.h> 54 #include <linux/inetdevice.h> 55 #include <net/switchdev.h> 56 #include <generated/utsrelease.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 60 #include "spectrum.h" 61 #include "core.h" 62 #include "reg.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "txheader.h" 66 67 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 /* tx_hdr_version 71 * Tx header version. 72 * Must be set to 1. 73 */ 74 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 75 76 /* tx_hdr_ctl 77 * Packet control type. 78 * 0 - Ethernet control (e.g. EMADs, LACP) 79 * 1 - Ethernet data 80 */ 81 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 82 83 /* tx_hdr_proto 84 * Packet protocol type. Must be set to 1 (Ethernet). 85 */ 86 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 87 88 /* tx_hdr_rx_is_router 89 * Packet is sent from the router. Valid for data packets only. 90 */ 91 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 92 93 /* tx_hdr_fid_valid 94 * Indicates if the 'fid' field is valid and should be used for 95 * forwarding lookup. Valid for data packets only. 96 */ 97 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 98 99 /* tx_hdr_swid 100 * Switch partition ID. Must be set to 0. 101 */ 102 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 103 104 /* tx_hdr_control_tclass 105 * Indicates if the packet should use the control TClass and not one 106 * of the data TClasses. 107 */ 108 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 109 110 /* tx_hdr_etclass 111 * Egress TClass to be used on the egress device on the egress port. 112 */ 113 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 114 115 /* tx_hdr_port_mid 116 * Destination local port for unicast packets. 117 * Destination multicast ID for multicast packets. 118 * 119 * Control packets are directed to a specific egress port, while data 120 * packets are transmitted through the CPU port (0) into the switch partition, 121 * where forwarding rules are applied. 122 */ 123 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 124 125 /* tx_hdr_fid 126 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 127 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 128 * Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 131 132 /* tx_hdr_type 133 * 0 - Data packets 134 * 6 - Control packets 135 */ 136 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 137 138 static bool mlxsw_sp_port_dev_check(const struct net_device *dev); 139 140 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 141 const struct mlxsw_tx_info *tx_info) 142 { 143 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 144 145 memset(txhdr, 0, MLXSW_TXHDR_LEN); 146 147 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 148 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 149 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 150 mlxsw_tx_hdr_swid_set(txhdr, 0); 151 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 152 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 153 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 154 } 155 156 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 157 { 158 char spad_pl[MLXSW_REG_SPAD_LEN]; 159 int err; 160 161 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 162 if (err) 163 return err; 164 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 165 return 0; 166 } 167 168 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 169 { 170 struct mlxsw_resources *resources; 171 int i; 172 173 resources = mlxsw_core_resources_get(mlxsw_sp->core); 174 if (!resources->max_span_valid) 175 return -EIO; 176 177 mlxsw_sp->span.entries_count = resources->max_span; 178 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 179 sizeof(struct mlxsw_sp_span_entry), 180 GFP_KERNEL); 181 if (!mlxsw_sp->span.entries) 182 return -ENOMEM; 183 184 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 185 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 186 187 return 0; 188 } 189 190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 191 { 192 int i; 193 194 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 195 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 196 197 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 198 } 199 kfree(mlxsw_sp->span.entries); 200 } 201 202 static struct mlxsw_sp_span_entry * 203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 204 { 205 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 206 struct mlxsw_sp_span_entry *span_entry; 207 char mpat_pl[MLXSW_REG_MPAT_LEN]; 208 u8 local_port = port->local_port; 209 int index; 210 int i; 211 int err; 212 213 /* find a free entry to use */ 214 index = -1; 215 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 216 if (!mlxsw_sp->span.entries[i].used) { 217 index = i; 218 span_entry = &mlxsw_sp->span.entries[i]; 219 break; 220 } 221 } 222 if (index < 0) 223 return NULL; 224 225 /* create a new port analayzer entry for local_port */ 226 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 227 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 228 if (err) 229 return NULL; 230 231 span_entry->used = true; 232 span_entry->id = index; 233 span_entry->ref_count = 0; 234 span_entry->local_port = local_port; 235 return span_entry; 236 } 237 238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 239 struct mlxsw_sp_span_entry *span_entry) 240 { 241 u8 local_port = span_entry->local_port; 242 char mpat_pl[MLXSW_REG_MPAT_LEN]; 243 int pa_id = span_entry->id; 244 245 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 246 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 247 span_entry->used = false; 248 } 249 250 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 251 { 252 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 253 int i; 254 255 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 256 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 257 258 if (curr->used && curr->local_port == port->local_port) 259 return curr; 260 } 261 return NULL; 262 } 263 264 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 265 { 266 struct mlxsw_sp_span_entry *span_entry; 267 268 span_entry = mlxsw_sp_span_entry_find(port); 269 if (span_entry) { 270 span_entry->ref_count++; 271 return span_entry; 272 } 273 274 return mlxsw_sp_span_entry_create(port); 275 } 276 277 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 278 struct mlxsw_sp_span_entry *span_entry) 279 { 280 if (--span_entry->ref_count == 0) 281 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 282 return 0; 283 } 284 285 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 286 { 287 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 288 struct mlxsw_sp_span_inspected_port *p; 289 int i; 290 291 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 292 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 293 294 list_for_each_entry(p, &curr->bound_ports_list, list) 295 if (p->local_port == port->local_port && 296 p->type == MLXSW_SP_SPAN_EGRESS) 297 return true; 298 } 299 300 return false; 301 } 302 303 static int mlxsw_sp_span_mtu_to_buffsize(int mtu) 304 { 305 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; 306 } 307 308 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 309 { 310 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 311 char sbib_pl[MLXSW_REG_SBIB_LEN]; 312 int err; 313 314 /* If port is egress mirrored, the shared buffer size should be 315 * updated according to the mtu value 316 */ 317 if (mlxsw_sp_span_is_egress_mirror(port)) { 318 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 319 mlxsw_sp_span_mtu_to_buffsize(mtu)); 320 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 321 if (err) { 322 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 323 return err; 324 } 325 } 326 327 return 0; 328 } 329 330 static struct mlxsw_sp_span_inspected_port * 331 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 332 struct mlxsw_sp_span_entry *span_entry) 333 { 334 struct mlxsw_sp_span_inspected_port *p; 335 336 list_for_each_entry(p, &span_entry->bound_ports_list, list) 337 if (port->local_port == p->local_port) 338 return p; 339 return NULL; 340 } 341 342 static int 343 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 344 struct mlxsw_sp_span_entry *span_entry, 345 enum mlxsw_sp_span_type type) 346 { 347 struct mlxsw_sp_span_inspected_port *inspected_port; 348 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 349 char mpar_pl[MLXSW_REG_MPAR_LEN]; 350 char sbib_pl[MLXSW_REG_SBIB_LEN]; 351 int pa_id = span_entry->id; 352 int err; 353 354 /* if it is an egress SPAN, bind a shared buffer to it */ 355 if (type == MLXSW_SP_SPAN_EGRESS) { 356 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 357 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); 358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 359 if (err) { 360 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 361 return err; 362 } 363 } 364 365 /* bind the port to the SPAN entry */ 366 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); 367 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 368 if (err) 369 goto err_mpar_reg_write; 370 371 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 372 if (!inspected_port) { 373 err = -ENOMEM; 374 goto err_inspected_port_alloc; 375 } 376 inspected_port->local_port = port->local_port; 377 inspected_port->type = type; 378 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 379 380 return 0; 381 382 err_mpar_reg_write: 383 err_inspected_port_alloc: 384 if (type == MLXSW_SP_SPAN_EGRESS) { 385 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 386 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 387 } 388 return err; 389 } 390 391 static void 392 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 393 struct mlxsw_sp_span_entry *span_entry, 394 enum mlxsw_sp_span_type type) 395 { 396 struct mlxsw_sp_span_inspected_port *inspected_port; 397 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 398 char mpar_pl[MLXSW_REG_MPAR_LEN]; 399 char sbib_pl[MLXSW_REG_SBIB_LEN]; 400 int pa_id = span_entry->id; 401 402 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 403 if (!inspected_port) 404 return; 405 406 /* remove the inspected port */ 407 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); 408 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 409 410 /* remove the SBIB buffer if it was egress SPAN */ 411 if (type == MLXSW_SP_SPAN_EGRESS) { 412 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 413 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 414 } 415 416 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 417 418 list_del(&inspected_port->list); 419 kfree(inspected_port); 420 } 421 422 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 423 struct mlxsw_sp_port *to, 424 enum mlxsw_sp_span_type type) 425 { 426 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 427 struct mlxsw_sp_span_entry *span_entry; 428 int err; 429 430 span_entry = mlxsw_sp_span_entry_get(to); 431 if (!span_entry) 432 return -ENOENT; 433 434 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 435 span_entry->id); 436 437 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 438 if (err) 439 goto err_port_bind; 440 441 return 0; 442 443 err_port_bind: 444 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 445 return err; 446 } 447 448 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 449 struct mlxsw_sp_port *to, 450 enum mlxsw_sp_span_type type) 451 { 452 struct mlxsw_sp_span_entry *span_entry; 453 454 span_entry = mlxsw_sp_span_entry_find(to); 455 if (!span_entry) { 456 netdev_err(from->dev, "no span entry found\n"); 457 return; 458 } 459 460 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 461 span_entry->id); 462 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 463 } 464 465 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 466 bool is_up) 467 { 468 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 469 char paos_pl[MLXSW_REG_PAOS_LEN]; 470 471 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 472 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 473 MLXSW_PORT_ADMIN_STATUS_DOWN); 474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 475 } 476 477 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 478 unsigned char *addr) 479 { 480 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 481 char ppad_pl[MLXSW_REG_PPAD_LEN]; 482 483 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 484 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 486 } 487 488 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 489 { 490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 491 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 492 493 ether_addr_copy(addr, mlxsw_sp->base_mac); 494 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 495 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 496 } 497 498 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 499 { 500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 501 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 502 int max_mtu; 503 int err; 504 505 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 506 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 507 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 508 if (err) 509 return err; 510 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 511 512 if (mtu > max_mtu) 513 return -EINVAL; 514 515 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 517 } 518 519 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 520 u8 swid) 521 { 522 char pspa_pl[MLXSW_REG_PSPA_LEN]; 523 524 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 525 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 526 } 527 528 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 529 { 530 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 531 532 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 533 swid); 534 } 535 536 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 537 bool enable) 538 { 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char svpe_pl[MLXSW_REG_SVPE_LEN]; 541 542 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 544 } 545 546 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 547 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 548 u16 vid) 549 { 550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 551 char svfa_pl[MLXSW_REG_SVFA_LEN]; 552 553 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 554 fid, vid); 555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 556 } 557 558 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 559 u16 vid, bool learn_enable) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char *spvmlr_pl; 563 int err; 564 565 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 566 if (!spvmlr_pl) 567 return -ENOMEM; 568 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 569 learn_enable); 570 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 571 kfree(spvmlr_pl); 572 return err; 573 } 574 575 static int 576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 577 { 578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 579 char sspr_pl[MLXSW_REG_SSPR_LEN]; 580 581 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 583 } 584 585 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 586 u8 local_port, u8 *p_module, 587 u8 *p_width, u8 *p_lane) 588 { 589 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 590 int err; 591 592 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 593 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 594 if (err) 595 return err; 596 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 597 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 598 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 599 return 0; 600 } 601 602 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 603 u8 module, u8 width, u8 lane) 604 { 605 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 606 int i; 607 608 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 609 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 610 for (i = 0; i < width; i++) { 611 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 612 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 613 } 614 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 616 } 617 618 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 619 { 620 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 621 622 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 623 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 624 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 625 } 626 627 static int mlxsw_sp_port_open(struct net_device *dev) 628 { 629 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 630 int err; 631 632 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 633 if (err) 634 return err; 635 netif_start_queue(dev); 636 return 0; 637 } 638 639 static int mlxsw_sp_port_stop(struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 643 netif_stop_queue(dev); 644 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 645 } 646 647 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 653 const struct mlxsw_tx_info tx_info = { 654 .local_port = mlxsw_sp_port->local_port, 655 .is_emad = false, 656 }; 657 u64 len; 658 int err; 659 660 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 661 return NETDEV_TX_BUSY; 662 663 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 664 struct sk_buff *skb_orig = skb; 665 666 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 667 if (!skb) { 668 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 669 dev_kfree_skb_any(skb_orig); 670 return NETDEV_TX_OK; 671 } 672 } 673 674 if (eth_skb_pad(skb)) { 675 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 676 return NETDEV_TX_OK; 677 } 678 679 mlxsw_sp_txhdr_construct(skb, &tx_info); 680 /* TX header is consumed by HW on the way so we shouldn't count its 681 * bytes as being sent. 682 */ 683 len = skb->len - MLXSW_TXHDR_LEN; 684 685 /* Due to a race we might fail here because of a full queue. In that 686 * unlikely case we simply drop the packet. 687 */ 688 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 689 690 if (!err) { 691 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 692 u64_stats_update_begin(&pcpu_stats->syncp); 693 pcpu_stats->tx_packets++; 694 pcpu_stats->tx_bytes += len; 695 u64_stats_update_end(&pcpu_stats->syncp); 696 } else { 697 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 698 dev_kfree_skb_any(skb); 699 } 700 return NETDEV_TX_OK; 701 } 702 703 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 704 { 705 } 706 707 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 708 { 709 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 710 struct sockaddr *addr = p; 711 int err; 712 713 if (!is_valid_ether_addr(addr->sa_data)) 714 return -EADDRNOTAVAIL; 715 716 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 717 if (err) 718 return err; 719 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 720 return 0; 721 } 722 723 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 724 bool pause_en, bool pfc_en, u16 delay) 725 { 726 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 727 728 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 729 MLXSW_SP_PAUSE_DELAY; 730 731 if (pause_en || pfc_en) 732 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 733 pg_size + delay, pg_size); 734 else 735 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 736 } 737 738 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 739 u8 *prio_tc, bool pause_en, 740 struct ieee_pfc *my_pfc) 741 { 742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 743 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 744 u16 delay = !!my_pfc ? my_pfc->delay : 0; 745 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 746 int i, j, err; 747 748 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 749 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 750 if (err) 751 return err; 752 753 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 754 bool configure = false; 755 bool pfc = false; 756 757 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 758 if (prio_tc[j] == i) { 759 pfc = pfc_en & BIT(j); 760 configure = true; 761 break; 762 } 763 } 764 765 if (!configure) 766 continue; 767 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 768 } 769 770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 771 } 772 773 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 774 int mtu, bool pause_en) 775 { 776 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 777 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 778 struct ieee_pfc *my_pfc; 779 u8 *prio_tc; 780 781 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 782 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 783 784 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 785 pause_en, my_pfc); 786 } 787 788 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 789 { 790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 791 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 792 int err; 793 794 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 795 if (err) 796 return err; 797 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 798 if (err) 799 goto err_span_port_mtu_update; 800 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 801 if (err) 802 goto err_port_mtu_set; 803 dev->mtu = mtu; 804 return 0; 805 806 err_port_mtu_set: 807 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 808 err_span_port_mtu_update: 809 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 810 return err; 811 } 812 813 static struct rtnl_link_stats64 * 814 mlxsw_sp_port_get_stats64(struct net_device *dev, 815 struct rtnl_link_stats64 *stats) 816 { 817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 818 struct mlxsw_sp_port_pcpu_stats *p; 819 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 820 u32 tx_dropped = 0; 821 unsigned int start; 822 int i; 823 824 for_each_possible_cpu(i) { 825 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 826 do { 827 start = u64_stats_fetch_begin_irq(&p->syncp); 828 rx_packets = p->rx_packets; 829 rx_bytes = p->rx_bytes; 830 tx_packets = p->tx_packets; 831 tx_bytes = p->tx_bytes; 832 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 833 834 stats->rx_packets += rx_packets; 835 stats->rx_bytes += rx_bytes; 836 stats->tx_packets += tx_packets; 837 stats->tx_bytes += tx_bytes; 838 /* tx_dropped is u32, updated without syncp protection. */ 839 tx_dropped += p->tx_dropped; 840 } 841 stats->tx_dropped = tx_dropped; 842 return stats; 843 } 844 845 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 846 u16 vid_end, bool is_member, bool untagged) 847 { 848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 849 char *spvm_pl; 850 int err; 851 852 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 853 if (!spvm_pl) 854 return -ENOMEM; 855 856 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 857 vid_end, is_member, untagged); 858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 859 kfree(spvm_pl); 860 return err; 861 } 862 863 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 864 { 865 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 866 u16 vid, last_visited_vid; 867 int err; 868 869 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 870 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 871 vid); 872 if (err) { 873 last_visited_vid = vid; 874 goto err_port_vid_to_fid_set; 875 } 876 } 877 878 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 879 if (err) { 880 last_visited_vid = VLAN_N_VID; 881 goto err_port_vid_to_fid_set; 882 } 883 884 return 0; 885 886 err_port_vid_to_fid_set: 887 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 888 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 889 vid); 890 return err; 891 } 892 893 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 894 { 895 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 896 u16 vid; 897 int err; 898 899 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 900 if (err) 901 return err; 902 903 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 904 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 905 vid, vid); 906 if (err) 907 return err; 908 } 909 910 return 0; 911 } 912 913 static struct mlxsw_sp_port * 914 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 915 { 916 struct mlxsw_sp_port *mlxsw_sp_vport; 917 918 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 919 if (!mlxsw_sp_vport) 920 return NULL; 921 922 /* dev will be set correctly after the VLAN device is linked 923 * with the real device. In case of bridge SELF invocation, dev 924 * will remain as is. 925 */ 926 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 927 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 929 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 930 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 931 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 932 mlxsw_sp_vport->vport.vid = vid; 933 934 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 935 936 return mlxsw_sp_vport; 937 } 938 939 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 940 { 941 list_del(&mlxsw_sp_vport->vport.list); 942 kfree(mlxsw_sp_vport); 943 } 944 945 static int mlxsw_sp_port_add_vid(struct net_device *dev, 946 __be16 __always_unused proto, u16 vid) 947 { 948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 949 struct mlxsw_sp_port *mlxsw_sp_vport; 950 bool untagged = vid == 1; 951 int err; 952 953 /* VLAN 0 is added to HW filter when device goes up, but it is 954 * reserved in our case, so simply return. 955 */ 956 if (!vid) 957 return 0; 958 959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) 960 return 0; 961 962 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 963 if (!mlxsw_sp_vport) 964 return -ENOMEM; 965 966 /* When adding the first VLAN interface on a bridged port we need to 967 * transition all the active 802.1Q bridge VLANs to use explicit 968 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 969 */ 970 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 971 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 972 if (err) 973 goto err_port_vp_mode_trans; 974 } 975 976 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 977 if (err) 978 goto err_port_vid_learning_set; 979 980 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 981 if (err) 982 goto err_port_add_vid; 983 984 return 0; 985 986 err_port_add_vid: 987 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 988 err_port_vid_learning_set: 989 if (list_is_singular(&mlxsw_sp_port->vports_list)) 990 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 991 err_port_vp_mode_trans: 992 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 993 return err; 994 } 995 996 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 997 __be16 __always_unused proto, u16 vid) 998 { 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 struct mlxsw_sp_port *mlxsw_sp_vport; 1001 struct mlxsw_sp_fid *f; 1002 1003 /* VLAN 0 is removed from HW filter when device goes down, but 1004 * it is reserved in our case, so simply return. 1005 */ 1006 if (!vid) 1007 return 0; 1008 1009 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1010 if (WARN_ON(!mlxsw_sp_vport)) 1011 return 0; 1012 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1014 1015 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1016 1017 /* Drop FID reference. If this was the last reference the 1018 * resources will be freed. 1019 */ 1020 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1021 if (f && !WARN_ON(!f->leave)) 1022 f->leave(mlxsw_sp_vport); 1023 1024 /* When removing the last VLAN interface on a bridged port we need to 1025 * transition all active 802.1Q bridge VLANs to use VID to FID 1026 * mappings and set port's mode to VLAN mode. 1027 */ 1028 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1029 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1030 1031 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1032 1033 return 0; 1034 } 1035 1036 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1037 size_t len) 1038 { 1039 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1040 u8 module = mlxsw_sp_port->mapping.module; 1041 u8 width = mlxsw_sp_port->mapping.width; 1042 u8 lane = mlxsw_sp_port->mapping.lane; 1043 int err; 1044 1045 if (!mlxsw_sp_port->split) 1046 err = snprintf(name, len, "p%d", module + 1); 1047 else 1048 err = snprintf(name, len, "p%ds%d", module + 1, 1049 lane / width); 1050 1051 if (err >= len) 1052 return -EINVAL; 1053 1054 return 0; 1055 } 1056 1057 static struct mlxsw_sp_port_mall_tc_entry * 1058 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, 1059 unsigned long cookie) { 1060 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1061 1062 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1063 if (mall_tc_entry->cookie == cookie) 1064 return mall_tc_entry; 1065 1066 return NULL; 1067 } 1068 1069 static int 1070 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1071 struct tc_cls_matchall_offload *cls, 1072 const struct tc_action *a, 1073 bool ingress) 1074 { 1075 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1076 struct net *net = dev_net(mlxsw_sp_port->dev); 1077 enum mlxsw_sp_span_type span_type; 1078 struct mlxsw_sp_port *to_port; 1079 struct net_device *to_dev; 1080 int ifindex; 1081 int err; 1082 1083 ifindex = tcf_mirred_ifindex(a); 1084 to_dev = __dev_get_by_index(net, ifindex); 1085 if (!to_dev) { 1086 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1087 return -EINVAL; 1088 } 1089 1090 if (!mlxsw_sp_port_dev_check(to_dev)) { 1091 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1092 return -ENOTSUPP; 1093 } 1094 to_port = netdev_priv(to_dev); 1095 1096 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1097 if (!mall_tc_entry) 1098 return -ENOMEM; 1099 1100 mall_tc_entry->cookie = cls->cookie; 1101 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1102 mall_tc_entry->mirror.to_local_port = to_port->local_port; 1103 mall_tc_entry->mirror.ingress = ingress; 1104 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1105 1106 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1107 err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1108 if (err) 1109 goto err_mirror_add; 1110 return 0; 1111 1112 err_mirror_add: 1113 list_del(&mall_tc_entry->list); 1114 kfree(mall_tc_entry); 1115 return err; 1116 } 1117 1118 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1119 __be16 protocol, 1120 struct tc_cls_matchall_offload *cls, 1121 bool ingress) 1122 { 1123 const struct tc_action *a; 1124 LIST_HEAD(actions); 1125 int err; 1126 1127 if (!tc_single_action(cls->exts)) { 1128 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1129 return -ENOTSUPP; 1130 } 1131 1132 tcf_exts_to_list(cls->exts, &actions); 1133 list_for_each_entry(a, &actions, list) { 1134 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1135 return -ENOTSUPP; 1136 1137 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, 1138 a, ingress); 1139 if (err) 1140 return err; 1141 } 1142 1143 return 0; 1144 } 1145 1146 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1147 struct tc_cls_matchall_offload *cls) 1148 { 1149 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1150 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1151 enum mlxsw_sp_span_type span_type; 1152 struct mlxsw_sp_port *to_port; 1153 1154 mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, 1155 cls->cookie); 1156 if (!mall_tc_entry) { 1157 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1158 return; 1159 } 1160 1161 switch (mall_tc_entry->type) { 1162 case MLXSW_SP_PORT_MALL_MIRROR: 1163 to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; 1164 span_type = mall_tc_entry->mirror.ingress ? 1165 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1166 1167 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1168 break; 1169 default: 1170 WARN_ON(1); 1171 } 1172 1173 list_del(&mall_tc_entry->list); 1174 kfree(mall_tc_entry); 1175 } 1176 1177 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1178 __be16 proto, struct tc_to_netdev *tc) 1179 { 1180 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1181 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1182 1183 if (tc->type == TC_SETUP_MATCHALL) { 1184 switch (tc->cls_mall->command) { 1185 case TC_CLSMATCHALL_REPLACE: 1186 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1187 proto, 1188 tc->cls_mall, 1189 ingress); 1190 case TC_CLSMATCHALL_DESTROY: 1191 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1192 tc->cls_mall); 1193 return 0; 1194 default: 1195 return -EINVAL; 1196 } 1197 } 1198 1199 return -ENOTSUPP; 1200 } 1201 1202 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1203 .ndo_open = mlxsw_sp_port_open, 1204 .ndo_stop = mlxsw_sp_port_stop, 1205 .ndo_start_xmit = mlxsw_sp_port_xmit, 1206 .ndo_setup_tc = mlxsw_sp_setup_tc, 1207 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1208 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1209 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1210 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1211 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1212 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1213 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, 1214 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, 1215 .ndo_fdb_add = switchdev_port_fdb_add, 1216 .ndo_fdb_del = switchdev_port_fdb_del, 1217 .ndo_fdb_dump = switchdev_port_fdb_dump, 1218 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1219 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1220 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1221 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1222 }; 1223 1224 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1225 struct ethtool_drvinfo *drvinfo) 1226 { 1227 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1228 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1229 1230 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1231 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1232 sizeof(drvinfo->version)); 1233 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1234 "%d.%d.%d", 1235 mlxsw_sp->bus_info->fw_rev.major, 1236 mlxsw_sp->bus_info->fw_rev.minor, 1237 mlxsw_sp->bus_info->fw_rev.subminor); 1238 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1239 sizeof(drvinfo->bus_info)); 1240 } 1241 1242 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1243 struct ethtool_pauseparam *pause) 1244 { 1245 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1246 1247 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1248 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1249 } 1250 1251 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1252 struct ethtool_pauseparam *pause) 1253 { 1254 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1255 1256 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1257 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1258 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1259 1260 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1261 pfcc_pl); 1262 } 1263 1264 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1265 struct ethtool_pauseparam *pause) 1266 { 1267 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1268 bool pause_en = pause->tx_pause || pause->rx_pause; 1269 int err; 1270 1271 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1272 netdev_err(dev, "PFC already enabled on port\n"); 1273 return -EINVAL; 1274 } 1275 1276 if (pause->autoneg) { 1277 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1278 return -EINVAL; 1279 } 1280 1281 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1282 if (err) { 1283 netdev_err(dev, "Failed to configure port's headroom\n"); 1284 return err; 1285 } 1286 1287 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1288 if (err) { 1289 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1290 goto err_port_pause_configure; 1291 } 1292 1293 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1294 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1295 1296 return 0; 1297 1298 err_port_pause_configure: 1299 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1300 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1301 return err; 1302 } 1303 1304 struct mlxsw_sp_port_hw_stats { 1305 char str[ETH_GSTRING_LEN]; 1306 u64 (*getter)(char *payload); 1307 }; 1308 1309 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1310 { 1311 .str = "a_frames_transmitted_ok", 1312 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1313 }, 1314 { 1315 .str = "a_frames_received_ok", 1316 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1317 }, 1318 { 1319 .str = "a_frame_check_sequence_errors", 1320 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1321 }, 1322 { 1323 .str = "a_alignment_errors", 1324 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1325 }, 1326 { 1327 .str = "a_octets_transmitted_ok", 1328 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1329 }, 1330 { 1331 .str = "a_octets_received_ok", 1332 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1333 }, 1334 { 1335 .str = "a_multicast_frames_xmitted_ok", 1336 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1337 }, 1338 { 1339 .str = "a_broadcast_frames_xmitted_ok", 1340 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1341 }, 1342 { 1343 .str = "a_multicast_frames_received_ok", 1344 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1345 }, 1346 { 1347 .str = "a_broadcast_frames_received_ok", 1348 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1349 }, 1350 { 1351 .str = "a_in_range_length_errors", 1352 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1353 }, 1354 { 1355 .str = "a_out_of_range_length_field", 1356 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1357 }, 1358 { 1359 .str = "a_frame_too_long_errors", 1360 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1361 }, 1362 { 1363 .str = "a_symbol_error_during_carrier", 1364 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1365 }, 1366 { 1367 .str = "a_mac_control_frames_transmitted", 1368 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1369 }, 1370 { 1371 .str = "a_mac_control_frames_received", 1372 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1373 }, 1374 { 1375 .str = "a_unsupported_opcodes_received", 1376 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1377 }, 1378 { 1379 .str = "a_pause_mac_ctrl_frames_received", 1380 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1381 }, 1382 { 1383 .str = "a_pause_mac_ctrl_frames_xmitted", 1384 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1385 }, 1386 }; 1387 1388 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1389 1390 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1391 { 1392 .str = "rx_octets_prio", 1393 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1394 }, 1395 { 1396 .str = "rx_frames_prio", 1397 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1398 }, 1399 { 1400 .str = "tx_octets_prio", 1401 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1402 }, 1403 { 1404 .str = "tx_frames_prio", 1405 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1406 }, 1407 { 1408 .str = "rx_pause_prio", 1409 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1410 }, 1411 { 1412 .str = "rx_pause_duration_prio", 1413 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1414 }, 1415 { 1416 .str = "tx_pause_prio", 1417 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1418 }, 1419 { 1420 .str = "tx_pause_duration_prio", 1421 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1422 }, 1423 }; 1424 1425 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1426 1427 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) 1428 { 1429 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1430 1431 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); 1432 } 1433 1434 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1435 { 1436 .str = "tc_transmit_queue_tc", 1437 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, 1438 }, 1439 { 1440 .str = "tc_no_buffer_discard_uc_tc", 1441 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1442 }, 1443 }; 1444 1445 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1446 1447 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1448 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1449 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1450 IEEE_8021QAZ_MAX_TCS) 1451 1452 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1453 { 1454 int i; 1455 1456 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1457 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1458 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1459 *p += ETH_GSTRING_LEN; 1460 } 1461 } 1462 1463 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1464 { 1465 int i; 1466 1467 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1468 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1469 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1470 *p += ETH_GSTRING_LEN; 1471 } 1472 } 1473 1474 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1475 u32 stringset, u8 *data) 1476 { 1477 u8 *p = data; 1478 int i; 1479 1480 switch (stringset) { 1481 case ETH_SS_STATS: 1482 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1483 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1484 ETH_GSTRING_LEN); 1485 p += ETH_GSTRING_LEN; 1486 } 1487 1488 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1489 mlxsw_sp_port_get_prio_strings(&p, i); 1490 1491 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1492 mlxsw_sp_port_get_tc_strings(&p, i); 1493 1494 break; 1495 } 1496 } 1497 1498 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1499 enum ethtool_phys_id_state state) 1500 { 1501 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1502 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1503 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1504 bool active; 1505 1506 switch (state) { 1507 case ETHTOOL_ID_ACTIVE: 1508 active = true; 1509 break; 1510 case ETHTOOL_ID_INACTIVE: 1511 active = false; 1512 break; 1513 default: 1514 return -EOPNOTSUPP; 1515 } 1516 1517 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1518 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1519 } 1520 1521 static int 1522 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1523 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1524 { 1525 switch (grp) { 1526 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1527 *p_hw_stats = mlxsw_sp_port_hw_stats; 1528 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1529 break; 1530 case MLXSW_REG_PPCNT_PRIO_CNT: 1531 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1532 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1533 break; 1534 case MLXSW_REG_PPCNT_TC_CNT: 1535 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1536 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1537 break; 1538 default: 1539 WARN_ON(1); 1540 return -ENOTSUPP; 1541 } 1542 return 0; 1543 } 1544 1545 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1546 enum mlxsw_reg_ppcnt_grp grp, int prio, 1547 u64 *data, int data_index) 1548 { 1549 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1551 struct mlxsw_sp_port_hw_stats *hw_stats; 1552 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1553 int i, len; 1554 int err; 1555 1556 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1557 if (err) 1558 return; 1559 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1560 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1561 for (i = 0; i < len; i++) 1562 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; 1563 } 1564 1565 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1566 struct ethtool_stats *stats, u64 *data) 1567 { 1568 int i, data_index = 0; 1569 1570 /* IEEE 802.3 Counters */ 1571 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1572 data, data_index); 1573 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1574 1575 /* Per-Priority Counters */ 1576 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1577 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1578 data, data_index); 1579 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1580 } 1581 1582 /* Per-TC Counters */ 1583 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1584 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1585 data, data_index); 1586 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1587 } 1588 } 1589 1590 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1591 { 1592 switch (sset) { 1593 case ETH_SS_STATS: 1594 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1595 default: 1596 return -EOPNOTSUPP; 1597 } 1598 } 1599 1600 struct mlxsw_sp_port_link_mode { 1601 u32 mask; 1602 u32 supported; 1603 u32 advertised; 1604 u32 speed; 1605 }; 1606 1607 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1608 { 1609 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1610 .supported = SUPPORTED_100baseT_Full, 1611 .advertised = ADVERTISED_100baseT_Full, 1612 .speed = 100, 1613 }, 1614 { 1615 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1616 .speed = 100, 1617 }, 1618 { 1619 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1620 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1621 .supported = SUPPORTED_1000baseKX_Full, 1622 .advertised = ADVERTISED_1000baseKX_Full, 1623 .speed = 1000, 1624 }, 1625 { 1626 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1627 .supported = SUPPORTED_10000baseT_Full, 1628 .advertised = ADVERTISED_10000baseT_Full, 1629 .speed = 10000, 1630 }, 1631 { 1632 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1633 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1634 .supported = SUPPORTED_10000baseKX4_Full, 1635 .advertised = ADVERTISED_10000baseKX4_Full, 1636 .speed = 10000, 1637 }, 1638 { 1639 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1640 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1641 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1642 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1643 .supported = SUPPORTED_10000baseKR_Full, 1644 .advertised = ADVERTISED_10000baseKR_Full, 1645 .speed = 10000, 1646 }, 1647 { 1648 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1649 .supported = SUPPORTED_20000baseKR2_Full, 1650 .advertised = ADVERTISED_20000baseKR2_Full, 1651 .speed = 20000, 1652 }, 1653 { 1654 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1655 .supported = SUPPORTED_40000baseCR4_Full, 1656 .advertised = ADVERTISED_40000baseCR4_Full, 1657 .speed = 40000, 1658 }, 1659 { 1660 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1661 .supported = SUPPORTED_40000baseKR4_Full, 1662 .advertised = ADVERTISED_40000baseKR4_Full, 1663 .speed = 40000, 1664 }, 1665 { 1666 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1667 .supported = SUPPORTED_40000baseSR4_Full, 1668 .advertised = ADVERTISED_40000baseSR4_Full, 1669 .speed = 40000, 1670 }, 1671 { 1672 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1673 .supported = SUPPORTED_40000baseLR4_Full, 1674 .advertised = ADVERTISED_40000baseLR4_Full, 1675 .speed = 40000, 1676 }, 1677 { 1678 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1679 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1680 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1681 .speed = 25000, 1682 }, 1683 { 1684 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1685 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1686 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1687 .speed = 50000, 1688 }, 1689 { 1690 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1691 .supported = SUPPORTED_56000baseKR4_Full, 1692 .advertised = ADVERTISED_56000baseKR4_Full, 1693 .speed = 56000, 1694 }, 1695 { 1696 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1697 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1698 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1699 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1700 .speed = 100000, 1701 }, 1702 }; 1703 1704 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1705 1706 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1707 { 1708 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1709 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1710 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1711 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1712 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1713 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1714 return SUPPORTED_FIBRE; 1715 1716 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1717 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1718 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1719 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1720 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1721 return SUPPORTED_Backplane; 1722 return 0; 1723 } 1724 1725 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1726 { 1727 u32 modes = 0; 1728 int i; 1729 1730 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1731 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1732 modes |= mlxsw_sp_port_link_mode[i].supported; 1733 } 1734 return modes; 1735 } 1736 1737 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1738 { 1739 u32 modes = 0; 1740 int i; 1741 1742 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1743 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1744 modes |= mlxsw_sp_port_link_mode[i].advertised; 1745 } 1746 return modes; 1747 } 1748 1749 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1750 struct ethtool_cmd *cmd) 1751 { 1752 u32 speed = SPEED_UNKNOWN; 1753 u8 duplex = DUPLEX_UNKNOWN; 1754 int i; 1755 1756 if (!carrier_ok) 1757 goto out; 1758 1759 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1760 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1761 speed = mlxsw_sp_port_link_mode[i].speed; 1762 duplex = DUPLEX_FULL; 1763 break; 1764 } 1765 } 1766 out: 1767 ethtool_cmd_speed_set(cmd, speed); 1768 cmd->duplex = duplex; 1769 } 1770 1771 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1772 { 1773 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1774 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1775 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1776 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1777 return PORT_FIBRE; 1778 1779 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1780 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1781 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1782 return PORT_DA; 1783 1784 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1785 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1786 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1787 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1788 return PORT_NONE; 1789 1790 return PORT_OTHER; 1791 } 1792 1793 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1794 struct ethtool_cmd *cmd) 1795 { 1796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1797 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1798 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1799 u32 eth_proto_cap; 1800 u32 eth_proto_admin; 1801 u32 eth_proto_oper; 1802 int err; 1803 1804 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1805 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1806 if (err) { 1807 netdev_err(dev, "Failed to get proto"); 1808 return err; 1809 } 1810 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1811 ð_proto_admin, ð_proto_oper); 1812 1813 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1814 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1815 SUPPORTED_Pause | SUPPORTED_Asym_Pause | 1816 SUPPORTED_Autoneg; 1817 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1818 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1819 eth_proto_oper, cmd); 1820 1821 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1822 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1823 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1824 1825 cmd->transceiver = XCVR_INTERNAL; 1826 return 0; 1827 } 1828 1829 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1830 { 1831 u32 ptys_proto = 0; 1832 int i; 1833 1834 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1835 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1836 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1837 } 1838 return ptys_proto; 1839 } 1840 1841 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1842 { 1843 u32 ptys_proto = 0; 1844 int i; 1845 1846 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1847 if (speed == mlxsw_sp_port_link_mode[i].speed) 1848 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1849 } 1850 return ptys_proto; 1851 } 1852 1853 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1854 { 1855 u32 ptys_proto = 0; 1856 int i; 1857 1858 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1859 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1860 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1861 } 1862 return ptys_proto; 1863 } 1864 1865 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1866 struct ethtool_cmd *cmd) 1867 { 1868 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1869 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1870 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1871 u32 speed; 1872 u32 eth_proto_new; 1873 u32 eth_proto_cap; 1874 u32 eth_proto_admin; 1875 int err; 1876 1877 speed = ethtool_cmd_speed(cmd); 1878 1879 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1880 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1881 mlxsw_sp_to_ptys_speed(speed); 1882 1883 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1884 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1885 if (err) { 1886 netdev_err(dev, "Failed to get proto"); 1887 return err; 1888 } 1889 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1890 1891 eth_proto_new = eth_proto_new & eth_proto_cap; 1892 if (!eth_proto_new) { 1893 netdev_err(dev, "Not supported proto admin requested"); 1894 return -EINVAL; 1895 } 1896 if (eth_proto_new == eth_proto_admin) 1897 return 0; 1898 1899 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1900 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1901 if (err) { 1902 netdev_err(dev, "Failed to set proto admin"); 1903 return err; 1904 } 1905 1906 if (!netif_running(dev)) 1907 return 0; 1908 1909 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1910 if (err) { 1911 netdev_err(dev, "Failed to set admin status"); 1912 return err; 1913 } 1914 1915 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1916 if (err) { 1917 netdev_err(dev, "Failed to set admin status"); 1918 return err; 1919 } 1920 1921 return 0; 1922 } 1923 1924 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1925 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1926 .get_link = ethtool_op_get_link, 1927 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 1928 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 1929 .get_strings = mlxsw_sp_port_get_strings, 1930 .set_phys_id = mlxsw_sp_port_set_phys_id, 1931 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1932 .get_sset_count = mlxsw_sp_port_get_sset_count, 1933 .get_settings = mlxsw_sp_port_get_settings, 1934 .set_settings = mlxsw_sp_port_set_settings, 1935 }; 1936 1937 static int 1938 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 1939 { 1940 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1941 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 1942 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1943 u32 eth_proto_admin; 1944 1945 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 1946 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 1947 eth_proto_admin); 1948 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1949 } 1950 1951 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1952 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1953 bool dwrr, u8 dwrr_weight) 1954 { 1955 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1956 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1957 1958 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1959 next_index); 1960 mlxsw_reg_qeec_de_set(qeec_pl, true); 1961 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1962 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1963 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1964 } 1965 1966 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1967 enum mlxsw_reg_qeec_hr hr, u8 index, 1968 u8 next_index, u32 maxrate) 1969 { 1970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1971 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1972 1973 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1974 next_index); 1975 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1976 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1977 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1978 } 1979 1980 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1981 u8 switch_prio, u8 tclass) 1982 { 1983 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1984 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1985 1986 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1987 tclass); 1988 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1989 } 1990 1991 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1992 { 1993 int err, i; 1994 1995 /* Setup the elements hierarcy, so that each TC is linked to 1996 * one subgroup, which are all member in the same group. 1997 */ 1998 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1999 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2000 0); 2001 if (err) 2002 return err; 2003 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2004 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2005 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2006 0, false, 0); 2007 if (err) 2008 return err; 2009 } 2010 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2011 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2012 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2013 false, 0); 2014 if (err) 2015 return err; 2016 } 2017 2018 /* Make sure the max shaper is disabled in all hierarcies that 2019 * support it. 2020 */ 2021 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2022 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2023 MLXSW_REG_QEEC_MAS_DIS); 2024 if (err) 2025 return err; 2026 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2027 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2028 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2029 i, 0, 2030 MLXSW_REG_QEEC_MAS_DIS); 2031 if (err) 2032 return err; 2033 } 2034 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2035 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2036 MLXSW_REG_QEEC_HIERARCY_TC, 2037 i, i, 2038 MLXSW_REG_QEEC_MAS_DIS); 2039 if (err) 2040 return err; 2041 } 2042 2043 /* Map all priorities to traffic class 0. */ 2044 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2045 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2046 if (err) 2047 return err; 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) 2054 { 2055 mlxsw_sp_port->pvid = 1; 2056 2057 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); 2058 } 2059 2060 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 2061 { 2062 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2063 } 2064 2065 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2066 bool split, u8 module, u8 width, u8 lane) 2067 { 2068 struct mlxsw_sp_port *mlxsw_sp_port; 2069 struct net_device *dev; 2070 size_t bytes; 2071 int err; 2072 2073 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2074 if (!dev) 2075 return -ENOMEM; 2076 mlxsw_sp_port = netdev_priv(dev); 2077 mlxsw_sp_port->dev = dev; 2078 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2079 mlxsw_sp_port->local_port = local_port; 2080 mlxsw_sp_port->split = split; 2081 mlxsw_sp_port->mapping.module = module; 2082 mlxsw_sp_port->mapping.width = width; 2083 mlxsw_sp_port->mapping.lane = lane; 2084 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2085 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2086 if (!mlxsw_sp_port->active_vlans) { 2087 err = -ENOMEM; 2088 goto err_port_active_vlans_alloc; 2089 } 2090 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2091 if (!mlxsw_sp_port->untagged_vlans) { 2092 err = -ENOMEM; 2093 goto err_port_untagged_vlans_alloc; 2094 } 2095 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2096 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2097 2098 mlxsw_sp_port->pcpu_stats = 2099 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2100 if (!mlxsw_sp_port->pcpu_stats) { 2101 err = -ENOMEM; 2102 goto err_alloc_stats; 2103 } 2104 2105 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2106 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2107 2108 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2109 if (err) { 2110 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2111 mlxsw_sp_port->local_port); 2112 goto err_dev_addr_init; 2113 } 2114 2115 netif_carrier_off(dev); 2116 2117 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2118 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2119 dev->hw_features |= NETIF_F_HW_TC; 2120 2121 /* Each packet needs to have a Tx header (metadata) on top all other 2122 * headers. 2123 */ 2124 dev->hard_header_len += MLXSW_TXHDR_LEN; 2125 2126 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2127 if (err) { 2128 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2129 mlxsw_sp_port->local_port); 2130 goto err_port_system_port_mapping_set; 2131 } 2132 2133 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2134 if (err) { 2135 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2136 mlxsw_sp_port->local_port); 2137 goto err_port_swid_set; 2138 } 2139 2140 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2141 if (err) { 2142 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2143 mlxsw_sp_port->local_port); 2144 goto err_port_speed_by_width_set; 2145 } 2146 2147 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2148 if (err) { 2149 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2150 mlxsw_sp_port->local_port); 2151 goto err_port_mtu_set; 2152 } 2153 2154 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2155 if (err) 2156 goto err_port_admin_status_set; 2157 2158 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2159 if (err) { 2160 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2161 mlxsw_sp_port->local_port); 2162 goto err_port_buffers_init; 2163 } 2164 2165 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2166 if (err) { 2167 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2168 mlxsw_sp_port->local_port); 2169 goto err_port_ets_init; 2170 } 2171 2172 /* ETS and buffers must be initialized before DCB. */ 2173 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2174 if (err) { 2175 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2176 mlxsw_sp_port->local_port); 2177 goto err_port_dcb_init; 2178 } 2179 2180 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); 2181 if (err) { 2182 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", 2183 mlxsw_sp_port->local_port); 2184 goto err_port_pvid_vport_create; 2185 } 2186 2187 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2188 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2189 err = register_netdev(dev); 2190 if (err) { 2191 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2192 mlxsw_sp_port->local_port); 2193 goto err_register_netdev; 2194 } 2195 2196 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, 2197 mlxsw_sp_port->local_port, dev, 2198 mlxsw_sp_port->split, module); 2199 if (err) { 2200 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2201 mlxsw_sp_port->local_port); 2202 goto err_core_port_init; 2203 } 2204 2205 return 0; 2206 2207 err_core_port_init: 2208 unregister_netdev(dev); 2209 err_register_netdev: 2210 mlxsw_sp->ports[local_port] = NULL; 2211 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2212 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2213 err_port_pvid_vport_create: 2214 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2215 err_port_dcb_init: 2216 err_port_ets_init: 2217 err_port_buffers_init: 2218 err_port_admin_status_set: 2219 err_port_mtu_set: 2220 err_port_speed_by_width_set: 2221 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2222 err_port_swid_set: 2223 err_port_system_port_mapping_set: 2224 err_dev_addr_init: 2225 free_percpu(mlxsw_sp_port->pcpu_stats); 2226 err_alloc_stats: 2227 kfree(mlxsw_sp_port->untagged_vlans); 2228 err_port_untagged_vlans_alloc: 2229 kfree(mlxsw_sp_port->active_vlans); 2230 err_port_active_vlans_alloc: 2231 free_netdev(dev); 2232 return err; 2233 } 2234 2235 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2236 { 2237 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2238 2239 if (!mlxsw_sp_port) 2240 return; 2241 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2242 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2243 mlxsw_sp->ports[local_port] = NULL; 2244 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2245 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2246 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2247 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2248 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2249 free_percpu(mlxsw_sp_port->pcpu_stats); 2250 kfree(mlxsw_sp_port->untagged_vlans); 2251 kfree(mlxsw_sp_port->active_vlans); 2252 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2253 free_netdev(mlxsw_sp_port->dev); 2254 } 2255 2256 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2257 { 2258 int i; 2259 2260 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 2261 mlxsw_sp_port_remove(mlxsw_sp, i); 2262 kfree(mlxsw_sp->ports); 2263 } 2264 2265 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2266 { 2267 u8 module, width, lane; 2268 size_t alloc_size; 2269 int i; 2270 int err; 2271 2272 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 2273 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2274 if (!mlxsw_sp->ports) 2275 return -ENOMEM; 2276 2277 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 2278 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2279 &width, &lane); 2280 if (err) 2281 goto err_port_module_info_get; 2282 if (!width) 2283 continue; 2284 mlxsw_sp->port_to_module[i] = module; 2285 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 2286 lane); 2287 if (err) 2288 goto err_port_create; 2289 } 2290 return 0; 2291 2292 err_port_create: 2293 err_port_module_info_get: 2294 for (i--; i >= 1; i--) 2295 mlxsw_sp_port_remove(mlxsw_sp, i); 2296 kfree(mlxsw_sp->ports); 2297 return err; 2298 } 2299 2300 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2301 { 2302 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2303 2304 return local_port - offset; 2305 } 2306 2307 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2308 u8 module, unsigned int count) 2309 { 2310 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2311 int err, i; 2312 2313 for (i = 0; i < count; i++) { 2314 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2315 width, i * width); 2316 if (err) 2317 goto err_port_module_map; 2318 } 2319 2320 for (i = 0; i < count; i++) { 2321 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2322 if (err) 2323 goto err_port_swid_set; 2324 } 2325 2326 for (i = 0; i < count; i++) { 2327 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2328 module, width, i * width); 2329 if (err) 2330 goto err_port_create; 2331 } 2332 2333 return 0; 2334 2335 err_port_create: 2336 for (i--; i >= 0; i--) 2337 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2338 i = count; 2339 err_port_swid_set: 2340 for (i--; i >= 0; i--) 2341 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2342 MLXSW_PORT_SWID_DISABLED_PORT); 2343 i = count; 2344 err_port_module_map: 2345 for (i--; i >= 0; i--) 2346 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2347 return err; 2348 } 2349 2350 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2351 u8 base_port, unsigned int count) 2352 { 2353 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2354 int i; 2355 2356 /* Split by four means we need to re-create two ports, otherwise 2357 * only one. 2358 */ 2359 count = count / 2; 2360 2361 for (i = 0; i < count; i++) { 2362 local_port = base_port + i * 2; 2363 module = mlxsw_sp->port_to_module[local_port]; 2364 2365 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2366 0); 2367 } 2368 2369 for (i = 0; i < count; i++) 2370 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2371 2372 for (i = 0; i < count; i++) { 2373 local_port = base_port + i * 2; 2374 module = mlxsw_sp->port_to_module[local_port]; 2375 2376 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2377 width, 0); 2378 } 2379 } 2380 2381 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2382 unsigned int count) 2383 { 2384 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2385 struct mlxsw_sp_port *mlxsw_sp_port; 2386 u8 module, cur_width, base_port; 2387 int i; 2388 int err; 2389 2390 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2391 if (!mlxsw_sp_port) { 2392 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2393 local_port); 2394 return -EINVAL; 2395 } 2396 2397 module = mlxsw_sp_port->mapping.module; 2398 cur_width = mlxsw_sp_port->mapping.width; 2399 2400 if (count != 2 && count != 4) { 2401 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2402 return -EINVAL; 2403 } 2404 2405 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2406 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2407 return -EINVAL; 2408 } 2409 2410 /* Make sure we have enough slave (even) ports for the split. */ 2411 if (count == 2) { 2412 base_port = local_port; 2413 if (mlxsw_sp->ports[base_port + 1]) { 2414 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2415 return -EINVAL; 2416 } 2417 } else { 2418 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2419 if (mlxsw_sp->ports[base_port + 1] || 2420 mlxsw_sp->ports[base_port + 3]) { 2421 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2422 return -EINVAL; 2423 } 2424 } 2425 2426 for (i = 0; i < count; i++) 2427 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2428 2429 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2430 if (err) { 2431 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2432 goto err_port_split_create; 2433 } 2434 2435 return 0; 2436 2437 err_port_split_create: 2438 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2439 return err; 2440 } 2441 2442 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2443 { 2444 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2445 struct mlxsw_sp_port *mlxsw_sp_port; 2446 u8 cur_width, base_port; 2447 unsigned int count; 2448 int i; 2449 2450 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2451 if (!mlxsw_sp_port) { 2452 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2453 local_port); 2454 return -EINVAL; 2455 } 2456 2457 if (!mlxsw_sp_port->split) { 2458 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2459 return -EINVAL; 2460 } 2461 2462 cur_width = mlxsw_sp_port->mapping.width; 2463 count = cur_width == 1 ? 4 : 2; 2464 2465 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2466 2467 /* Determine which ports to remove. */ 2468 if (count == 2 && local_port >= base_port + 2) 2469 base_port = base_port + 2; 2470 2471 for (i = 0; i < count; i++) 2472 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2473 2474 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2475 2476 return 0; 2477 } 2478 2479 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2480 char *pude_pl, void *priv) 2481 { 2482 struct mlxsw_sp *mlxsw_sp = priv; 2483 struct mlxsw_sp_port *mlxsw_sp_port; 2484 enum mlxsw_reg_pude_oper_status status; 2485 u8 local_port; 2486 2487 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2488 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2489 if (!mlxsw_sp_port) 2490 return; 2491 2492 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2493 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2494 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2495 netif_carrier_on(mlxsw_sp_port->dev); 2496 } else { 2497 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2498 netif_carrier_off(mlxsw_sp_port->dev); 2499 } 2500 } 2501 2502 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 2503 .func = mlxsw_sp_pude_event_func, 2504 .trap_id = MLXSW_TRAP_ID_PUDE, 2505 }; 2506 2507 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 2508 enum mlxsw_event_trap_id trap_id) 2509 { 2510 struct mlxsw_event_listener *el; 2511 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2512 int err; 2513 2514 switch (trap_id) { 2515 case MLXSW_TRAP_ID_PUDE: 2516 el = &mlxsw_sp_pude_event; 2517 break; 2518 } 2519 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 2520 if (err) 2521 return err; 2522 2523 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 2524 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2525 if (err) 2526 goto err_event_trap_set; 2527 2528 return 0; 2529 2530 err_event_trap_set: 2531 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2532 return err; 2533 } 2534 2535 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 2536 enum mlxsw_event_trap_id trap_id) 2537 { 2538 struct mlxsw_event_listener *el; 2539 2540 switch (trap_id) { 2541 case MLXSW_TRAP_ID_PUDE: 2542 el = &mlxsw_sp_pude_event; 2543 break; 2544 } 2545 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2546 } 2547 2548 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 2549 void *priv) 2550 { 2551 struct mlxsw_sp *mlxsw_sp = priv; 2552 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2553 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2554 2555 if (unlikely(!mlxsw_sp_port)) { 2556 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2557 local_port); 2558 return; 2559 } 2560 2561 skb->dev = mlxsw_sp_port->dev; 2562 2563 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2564 u64_stats_update_begin(&pcpu_stats->syncp); 2565 pcpu_stats->rx_packets++; 2566 pcpu_stats->rx_bytes += skb->len; 2567 u64_stats_update_end(&pcpu_stats->syncp); 2568 2569 skb->protocol = eth_type_trans(skb, skb->dev); 2570 netif_receive_skb(skb); 2571 } 2572 2573 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 2574 { 2575 .func = mlxsw_sp_rx_listener_func, 2576 .local_port = MLXSW_PORT_DONT_CARE, 2577 .trap_id = MLXSW_TRAP_ID_FDB_MC, 2578 }, 2579 /* Traps for specific L2 packet types, not trapped as FDB MC */ 2580 { 2581 .func = mlxsw_sp_rx_listener_func, 2582 .local_port = MLXSW_PORT_DONT_CARE, 2583 .trap_id = MLXSW_TRAP_ID_STP, 2584 }, 2585 { 2586 .func = mlxsw_sp_rx_listener_func, 2587 .local_port = MLXSW_PORT_DONT_CARE, 2588 .trap_id = MLXSW_TRAP_ID_LACP, 2589 }, 2590 { 2591 .func = mlxsw_sp_rx_listener_func, 2592 .local_port = MLXSW_PORT_DONT_CARE, 2593 .trap_id = MLXSW_TRAP_ID_EAPOL, 2594 }, 2595 { 2596 .func = mlxsw_sp_rx_listener_func, 2597 .local_port = MLXSW_PORT_DONT_CARE, 2598 .trap_id = MLXSW_TRAP_ID_LLDP, 2599 }, 2600 { 2601 .func = mlxsw_sp_rx_listener_func, 2602 .local_port = MLXSW_PORT_DONT_CARE, 2603 .trap_id = MLXSW_TRAP_ID_MMRP, 2604 }, 2605 { 2606 .func = mlxsw_sp_rx_listener_func, 2607 .local_port = MLXSW_PORT_DONT_CARE, 2608 .trap_id = MLXSW_TRAP_ID_MVRP, 2609 }, 2610 { 2611 .func = mlxsw_sp_rx_listener_func, 2612 .local_port = MLXSW_PORT_DONT_CARE, 2613 .trap_id = MLXSW_TRAP_ID_RPVST, 2614 }, 2615 { 2616 .func = mlxsw_sp_rx_listener_func, 2617 .local_port = MLXSW_PORT_DONT_CARE, 2618 .trap_id = MLXSW_TRAP_ID_DHCP, 2619 }, 2620 { 2621 .func = mlxsw_sp_rx_listener_func, 2622 .local_port = MLXSW_PORT_DONT_CARE, 2623 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 2624 }, 2625 { 2626 .func = mlxsw_sp_rx_listener_func, 2627 .local_port = MLXSW_PORT_DONT_CARE, 2628 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 2629 }, 2630 { 2631 .func = mlxsw_sp_rx_listener_func, 2632 .local_port = MLXSW_PORT_DONT_CARE, 2633 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 2634 }, 2635 { 2636 .func = mlxsw_sp_rx_listener_func, 2637 .local_port = MLXSW_PORT_DONT_CARE, 2638 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 2639 }, 2640 { 2641 .func = mlxsw_sp_rx_listener_func, 2642 .local_port = MLXSW_PORT_DONT_CARE, 2643 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 2644 }, 2645 { 2646 .func = mlxsw_sp_rx_listener_func, 2647 .local_port = MLXSW_PORT_DONT_CARE, 2648 .trap_id = MLXSW_TRAP_ID_ARPBC, 2649 }, 2650 { 2651 .func = mlxsw_sp_rx_listener_func, 2652 .local_port = MLXSW_PORT_DONT_CARE, 2653 .trap_id = MLXSW_TRAP_ID_ARPUC, 2654 }, 2655 { 2656 .func = mlxsw_sp_rx_listener_func, 2657 .local_port = MLXSW_PORT_DONT_CARE, 2658 .trap_id = MLXSW_TRAP_ID_MTUERROR, 2659 }, 2660 { 2661 .func = mlxsw_sp_rx_listener_func, 2662 .local_port = MLXSW_PORT_DONT_CARE, 2663 .trap_id = MLXSW_TRAP_ID_TTLERROR, 2664 }, 2665 { 2666 .func = mlxsw_sp_rx_listener_func, 2667 .local_port = MLXSW_PORT_DONT_CARE, 2668 .trap_id = MLXSW_TRAP_ID_LBERROR, 2669 }, 2670 { 2671 .func = mlxsw_sp_rx_listener_func, 2672 .local_port = MLXSW_PORT_DONT_CARE, 2673 .trap_id = MLXSW_TRAP_ID_OSPF, 2674 }, 2675 { 2676 .func = mlxsw_sp_rx_listener_func, 2677 .local_port = MLXSW_PORT_DONT_CARE, 2678 .trap_id = MLXSW_TRAP_ID_IP2ME, 2679 }, 2680 { 2681 .func = mlxsw_sp_rx_listener_func, 2682 .local_port = MLXSW_PORT_DONT_CARE, 2683 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, 2684 }, 2685 { 2686 .func = mlxsw_sp_rx_listener_func, 2687 .local_port = MLXSW_PORT_DONT_CARE, 2688 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, 2689 }, 2690 }; 2691 2692 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2693 { 2694 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2695 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2696 int i; 2697 int err; 2698 2699 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 2700 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2701 if (err) 2702 return err; 2703 2704 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 2705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2706 if (err) 2707 return err; 2708 2709 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2710 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 2711 &mlxsw_sp_rx_listener[i], 2712 mlxsw_sp); 2713 if (err) 2714 goto err_rx_listener_register; 2715 2716 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 2717 mlxsw_sp_rx_listener[i].trap_id); 2718 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2719 if (err) 2720 goto err_rx_trap_set; 2721 } 2722 return 0; 2723 2724 err_rx_trap_set: 2725 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2726 &mlxsw_sp_rx_listener[i], 2727 mlxsw_sp); 2728 err_rx_listener_register: 2729 for (i--; i >= 0; i--) { 2730 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2731 mlxsw_sp_rx_listener[i].trap_id); 2732 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2733 2734 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2735 &mlxsw_sp_rx_listener[i], 2736 mlxsw_sp); 2737 } 2738 return err; 2739 } 2740 2741 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2742 { 2743 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2744 int i; 2745 2746 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2747 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2748 mlxsw_sp_rx_listener[i].trap_id); 2749 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2750 2751 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2752 &mlxsw_sp_rx_listener[i], 2753 mlxsw_sp); 2754 } 2755 } 2756 2757 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2758 enum mlxsw_reg_sfgc_type type, 2759 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2760 { 2761 enum mlxsw_flood_table_type table_type; 2762 enum mlxsw_sp_flood_table flood_table; 2763 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2764 2765 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2766 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2767 else 2768 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2769 2770 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2771 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2772 else 2773 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2774 2775 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2776 flood_table); 2777 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2778 } 2779 2780 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2781 { 2782 int type, err; 2783 2784 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2785 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2786 continue; 2787 2788 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2789 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2790 if (err) 2791 return err; 2792 2793 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2794 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2795 if (err) 2796 return err; 2797 } 2798 2799 return 0; 2800 } 2801 2802 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2803 { 2804 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2805 2806 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2807 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2808 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2809 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2810 MLXSW_REG_SLCR_LAG_HASH_SIP | 2811 MLXSW_REG_SLCR_LAG_HASH_DIP | 2812 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2813 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2814 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2815 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2816 } 2817 2818 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2819 const struct mlxsw_bus_info *mlxsw_bus_info) 2820 { 2821 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2822 int err; 2823 2824 mlxsw_sp->core = mlxsw_core; 2825 mlxsw_sp->bus_info = mlxsw_bus_info; 2826 INIT_LIST_HEAD(&mlxsw_sp->fids); 2827 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 2828 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2829 2830 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2831 if (err) { 2832 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2833 return err; 2834 } 2835 2836 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2837 if (err) { 2838 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2839 return err; 2840 } 2841 2842 err = mlxsw_sp_traps_init(mlxsw_sp); 2843 if (err) { 2844 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2845 goto err_rx_listener_register; 2846 } 2847 2848 err = mlxsw_sp_flood_init(mlxsw_sp); 2849 if (err) { 2850 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2851 goto err_flood_init; 2852 } 2853 2854 err = mlxsw_sp_buffers_init(mlxsw_sp); 2855 if (err) { 2856 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2857 goto err_buffers_init; 2858 } 2859 2860 err = mlxsw_sp_lag_init(mlxsw_sp); 2861 if (err) { 2862 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2863 goto err_lag_init; 2864 } 2865 2866 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2867 if (err) { 2868 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2869 goto err_switchdev_init; 2870 } 2871 2872 err = mlxsw_sp_router_init(mlxsw_sp); 2873 if (err) { 2874 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2875 goto err_router_init; 2876 } 2877 2878 err = mlxsw_sp_span_init(mlxsw_sp); 2879 if (err) { 2880 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2881 goto err_span_init; 2882 } 2883 2884 err = mlxsw_sp_ports_create(mlxsw_sp); 2885 if (err) { 2886 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2887 goto err_ports_create; 2888 } 2889 2890 return 0; 2891 2892 err_ports_create: 2893 mlxsw_sp_span_fini(mlxsw_sp); 2894 err_span_init: 2895 mlxsw_sp_router_fini(mlxsw_sp); 2896 err_router_init: 2897 mlxsw_sp_switchdev_fini(mlxsw_sp); 2898 err_switchdev_init: 2899 err_lag_init: 2900 mlxsw_sp_buffers_fini(mlxsw_sp); 2901 err_buffers_init: 2902 err_flood_init: 2903 mlxsw_sp_traps_fini(mlxsw_sp); 2904 err_rx_listener_register: 2905 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2906 return err; 2907 } 2908 2909 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 2910 { 2911 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2912 int i; 2913 2914 mlxsw_sp_ports_remove(mlxsw_sp); 2915 mlxsw_sp_span_fini(mlxsw_sp); 2916 mlxsw_sp_router_fini(mlxsw_sp); 2917 mlxsw_sp_switchdev_fini(mlxsw_sp); 2918 mlxsw_sp_buffers_fini(mlxsw_sp); 2919 mlxsw_sp_traps_fini(mlxsw_sp); 2920 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2921 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 2922 WARN_ON(!list_empty(&mlxsw_sp->fids)); 2923 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 2924 WARN_ON_ONCE(mlxsw_sp->rifs[i]); 2925 } 2926 2927 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2928 .used_max_vepa_channels = 1, 2929 .max_vepa_channels = 0, 2930 .used_max_lag = 1, 2931 .max_lag = MLXSW_SP_LAG_MAX, 2932 .used_max_port_per_lag = 1, 2933 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 2934 .used_max_mid = 1, 2935 .max_mid = MLXSW_SP_MID_MAX, 2936 .used_max_pgt = 1, 2937 .max_pgt = 0, 2938 .used_max_system_port = 1, 2939 .max_system_port = 64, 2940 .used_max_vlan_groups = 1, 2941 .max_vlan_groups = 127, 2942 .used_max_regions = 1, 2943 .max_regions = 400, 2944 .used_flood_tables = 1, 2945 .used_flood_mode = 1, 2946 .flood_mode = 3, 2947 .max_fid_offset_flood_tables = 2, 2948 .fid_offset_flood_table_size = VLAN_N_VID - 1, 2949 .max_fid_flood_tables = 2, 2950 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 2951 .used_max_ib_mc = 1, 2952 .max_ib_mc = 0, 2953 .used_max_pkey = 1, 2954 .max_pkey = 0, 2955 .used_kvd_sizes = 1, 2956 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 2957 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, 2958 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, 2959 .swid_config = { 2960 { 2961 .used_type = 1, 2962 .type = MLXSW_PORT_SWID_TYPE_ETH, 2963 } 2964 }, 2965 .resource_query_enable = 1, 2966 }; 2967 2968 static struct mlxsw_driver mlxsw_sp_driver = { 2969 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2970 .owner = THIS_MODULE, 2971 .priv_size = sizeof(struct mlxsw_sp), 2972 .init = mlxsw_sp_init, 2973 .fini = mlxsw_sp_fini, 2974 .port_split = mlxsw_sp_port_split, 2975 .port_unsplit = mlxsw_sp_port_unsplit, 2976 .sb_pool_get = mlxsw_sp_sb_pool_get, 2977 .sb_pool_set = mlxsw_sp_sb_pool_set, 2978 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 2979 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 2980 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 2981 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 2982 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 2983 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 2984 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 2985 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 2986 .txhdr_construct = mlxsw_sp_txhdr_construct, 2987 .txhdr_len = MLXSW_TXHDR_LEN, 2988 .profile = &mlxsw_sp_config_profile, 2989 }; 2990 2991 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2992 { 2993 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2994 } 2995 2996 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 2997 { 2998 struct net_device *lower_dev; 2999 struct list_head *iter; 3000 3001 if (mlxsw_sp_port_dev_check(dev)) 3002 return netdev_priv(dev); 3003 3004 netdev_for_each_all_lower_dev(dev, lower_dev, iter) { 3005 if (mlxsw_sp_port_dev_check(lower_dev)) 3006 return netdev_priv(lower_dev); 3007 } 3008 return NULL; 3009 } 3010 3011 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3012 { 3013 struct mlxsw_sp_port *mlxsw_sp_port; 3014 3015 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3016 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3017 } 3018 3019 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3020 { 3021 struct net_device *lower_dev; 3022 struct list_head *iter; 3023 3024 if (mlxsw_sp_port_dev_check(dev)) 3025 return netdev_priv(dev); 3026 3027 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { 3028 if (mlxsw_sp_port_dev_check(lower_dev)) 3029 return netdev_priv(lower_dev); 3030 } 3031 return NULL; 3032 } 3033 3034 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3035 { 3036 struct mlxsw_sp_port *mlxsw_sp_port; 3037 3038 rcu_read_lock(); 3039 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3040 if (mlxsw_sp_port) 3041 dev_hold(mlxsw_sp_port->dev); 3042 rcu_read_unlock(); 3043 return mlxsw_sp_port; 3044 } 3045 3046 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3047 { 3048 dev_put(mlxsw_sp_port->dev); 3049 } 3050 3051 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, 3052 unsigned long event) 3053 { 3054 switch (event) { 3055 case NETDEV_UP: 3056 if (!r) 3057 return true; 3058 r->ref_count++; 3059 return false; 3060 case NETDEV_DOWN: 3061 if (r && --r->ref_count == 0) 3062 return true; 3063 /* It is possible we already removed the RIF ourselves 3064 * if it was assigned to a netdev that is now a bridge 3065 * or LAG slave. 3066 */ 3067 return false; 3068 } 3069 3070 return false; 3071 } 3072 3073 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) 3074 { 3075 int i; 3076 3077 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 3078 if (!mlxsw_sp->rifs[i]) 3079 return i; 3080 3081 return MLXSW_SP_RIF_MAX; 3082 } 3083 3084 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, 3085 bool *p_lagged, u16 *p_system_port) 3086 { 3087 u8 local_port = mlxsw_sp_vport->local_port; 3088 3089 *p_lagged = mlxsw_sp_vport->lagged; 3090 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; 3091 } 3092 3093 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, 3094 struct net_device *l3_dev, u16 rif, 3095 bool create) 3096 { 3097 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3098 bool lagged = mlxsw_sp_vport->lagged; 3099 char ritr_pl[MLXSW_REG_RITR_LEN]; 3100 u16 system_port; 3101 3102 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, 3103 l3_dev->mtu, l3_dev->dev_addr); 3104 3105 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); 3106 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, 3107 mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); 3108 3109 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3110 } 3111 3112 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3113 3114 static struct mlxsw_sp_fid * 3115 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) 3116 { 3117 struct mlxsw_sp_fid *f; 3118 3119 f = kzalloc(sizeof(*f), GFP_KERNEL); 3120 if (!f) 3121 return NULL; 3122 3123 f->leave = mlxsw_sp_vport_rif_sp_leave; 3124 f->ref_count = 0; 3125 f->dev = l3_dev; 3126 f->fid = fid; 3127 3128 return f; 3129 } 3130 3131 static struct mlxsw_sp_rif * 3132 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) 3133 { 3134 struct mlxsw_sp_rif *r; 3135 3136 r = kzalloc(sizeof(*r), GFP_KERNEL); 3137 if (!r) 3138 return NULL; 3139 3140 ether_addr_copy(r->addr, l3_dev->dev_addr); 3141 r->mtu = l3_dev->mtu; 3142 r->ref_count = 1; 3143 r->dev = l3_dev; 3144 r->rif = rif; 3145 r->f = f; 3146 3147 return r; 3148 } 3149 3150 static struct mlxsw_sp_rif * 3151 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, 3152 struct net_device *l3_dev) 3153 { 3154 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3155 struct mlxsw_sp_fid *f; 3156 struct mlxsw_sp_rif *r; 3157 u16 fid, rif; 3158 int err; 3159 3160 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3161 if (rif == MLXSW_SP_RIF_MAX) 3162 return ERR_PTR(-ERANGE); 3163 3164 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); 3165 if (err) 3166 return ERR_PTR(err); 3167 3168 fid = mlxsw_sp_rif_sp_to_fid(rif); 3169 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); 3170 if (err) 3171 goto err_rif_fdb_op; 3172 3173 f = mlxsw_sp_rfid_alloc(fid, l3_dev); 3174 if (!f) { 3175 err = -ENOMEM; 3176 goto err_rfid_alloc; 3177 } 3178 3179 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3180 if (!r) { 3181 err = -ENOMEM; 3182 goto err_rif_alloc; 3183 } 3184 3185 f->r = r; 3186 mlxsw_sp->rifs[rif] = r; 3187 3188 return r; 3189 3190 err_rif_alloc: 3191 kfree(f); 3192 err_rfid_alloc: 3193 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3194 err_rif_fdb_op: 3195 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3196 return ERR_PTR(err); 3197 } 3198 3199 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, 3200 struct mlxsw_sp_rif *r) 3201 { 3202 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3203 struct net_device *l3_dev = r->dev; 3204 struct mlxsw_sp_fid *f = r->f; 3205 u16 fid = f->fid; 3206 u16 rif = r->rif; 3207 3208 mlxsw_sp->rifs[rif] = NULL; 3209 f->r = NULL; 3210 3211 kfree(r); 3212 3213 kfree(f); 3214 3215 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3216 3217 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3218 } 3219 3220 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3221 struct net_device *l3_dev) 3222 { 3223 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3224 struct mlxsw_sp_rif *r; 3225 3226 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 3227 if (!r) { 3228 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); 3229 if (IS_ERR(r)) 3230 return PTR_ERR(r); 3231 } 3232 3233 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); 3234 r->f->ref_count++; 3235 3236 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); 3237 3238 return 0; 3239 } 3240 3241 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3242 { 3243 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3244 3245 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 3246 3247 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3248 if (--f->ref_count == 0) 3249 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); 3250 } 3251 3252 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, 3253 struct net_device *port_dev, 3254 unsigned long event, u16 vid) 3255 { 3256 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 3257 struct mlxsw_sp_port *mlxsw_sp_vport; 3258 3259 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3260 if (WARN_ON(!mlxsw_sp_vport)) 3261 return -EINVAL; 3262 3263 switch (event) { 3264 case NETDEV_UP: 3265 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); 3266 case NETDEV_DOWN: 3267 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); 3268 break; 3269 } 3270 3271 return 0; 3272 } 3273 3274 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 3275 unsigned long event) 3276 { 3277 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) 3278 return 0; 3279 3280 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); 3281 } 3282 3283 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 3284 struct net_device *lag_dev, 3285 unsigned long event, u16 vid) 3286 { 3287 struct net_device *port_dev; 3288 struct list_head *iter; 3289 int err; 3290 3291 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 3292 if (mlxsw_sp_port_dev_check(port_dev)) { 3293 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, 3294 event, vid); 3295 if (err) 3296 return err; 3297 } 3298 } 3299 3300 return 0; 3301 } 3302 3303 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 3304 unsigned long event) 3305 { 3306 if (netif_is_bridge_port(lag_dev)) 3307 return 0; 3308 3309 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); 3310 } 3311 3312 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 3313 struct net_device *l3_dev) 3314 { 3315 u16 fid; 3316 3317 if (is_vlan_dev(l3_dev)) 3318 fid = vlan_dev_vlan_id(l3_dev); 3319 else if (mlxsw_sp->master_bridge.dev == l3_dev) 3320 fid = 1; 3321 else 3322 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); 3323 3324 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3325 } 3326 3327 static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) 3328 { 3329 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : 3330 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 3331 } 3332 3333 static u16 mlxsw_sp_flood_table_index_get(u16 fid) 3334 { 3335 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; 3336 } 3337 3338 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, 3339 bool set) 3340 { 3341 enum mlxsw_flood_table_type table_type; 3342 char *sftr_pl; 3343 u16 index; 3344 int err; 3345 3346 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 3347 if (!sftr_pl) 3348 return -ENOMEM; 3349 3350 table_type = mlxsw_sp_flood_table_type_get(fid); 3351 index = mlxsw_sp_flood_table_index_get(fid); 3352 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, 3353 1, MLXSW_PORT_ROUTER_PORT, set); 3354 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 3355 3356 kfree(sftr_pl); 3357 return err; 3358 } 3359 3360 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3361 { 3362 if (mlxsw_sp_fid_is_vfid(fid)) 3363 return MLXSW_REG_RITR_FID_IF; 3364 else 3365 return MLXSW_REG_RITR_VLAN_IF; 3366 } 3367 3368 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, 3369 struct net_device *l3_dev, 3370 u16 fid, u16 rif, 3371 bool create) 3372 { 3373 enum mlxsw_reg_ritr_if_type rif_type; 3374 char ritr_pl[MLXSW_REG_RITR_LEN]; 3375 3376 rif_type = mlxsw_sp_rif_type_get(fid); 3377 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, 3378 l3_dev->dev_addr); 3379 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); 3380 3381 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3382 } 3383 3384 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, 3385 struct net_device *l3_dev, 3386 struct mlxsw_sp_fid *f) 3387 { 3388 struct mlxsw_sp_rif *r; 3389 u16 rif; 3390 int err; 3391 3392 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3393 if (rif == MLXSW_SP_RIF_MAX) 3394 return -ERANGE; 3395 3396 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); 3397 if (err) 3398 return err; 3399 3400 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3401 if (err) 3402 goto err_rif_bridge_op; 3403 3404 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3405 if (err) 3406 goto err_rif_fdb_op; 3407 3408 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3409 if (!r) { 3410 err = -ENOMEM; 3411 goto err_rif_alloc; 3412 } 3413 3414 f->r = r; 3415 mlxsw_sp->rifs[rif] = r; 3416 3417 netdev_dbg(l3_dev, "RIF=%d created\n", rif); 3418 3419 return 0; 3420 3421 err_rif_alloc: 3422 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3423 err_rif_fdb_op: 3424 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3425 err_rif_bridge_op: 3426 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3427 return err; 3428 } 3429 3430 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 3431 struct mlxsw_sp_rif *r) 3432 { 3433 struct net_device *l3_dev = r->dev; 3434 struct mlxsw_sp_fid *f = r->f; 3435 u16 rif = r->rif; 3436 3437 mlxsw_sp->rifs[rif] = NULL; 3438 f->r = NULL; 3439 3440 kfree(r); 3441 3442 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3443 3444 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3445 3446 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3447 3448 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3449 } 3450 3451 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, 3452 struct net_device *br_dev, 3453 unsigned long event) 3454 { 3455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 3456 struct mlxsw_sp_fid *f; 3457 3458 /* FID can either be an actual FID if the L3 device is the 3459 * VLAN-aware bridge or a VLAN device on top. Otherwise, the 3460 * L3 device is a VLAN-unaware bridge and we get a vFID. 3461 */ 3462 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); 3463 if (WARN_ON(!f)) 3464 return -EINVAL; 3465 3466 switch (event) { 3467 case NETDEV_UP: 3468 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); 3469 case NETDEV_DOWN: 3470 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 3471 break; 3472 } 3473 3474 return 0; 3475 } 3476 3477 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, 3478 unsigned long event) 3479 { 3480 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3482 u16 vid = vlan_dev_vlan_id(vlan_dev); 3483 3484 if (mlxsw_sp_port_dev_check(real_dev)) 3485 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3486 vid); 3487 else if (netif_is_lag_master(real_dev)) 3488 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 3489 vid); 3490 else if (netif_is_bridge_master(real_dev) && 3491 mlxsw_sp->master_bridge.dev == real_dev) 3492 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, 3493 event); 3494 3495 return 0; 3496 } 3497 3498 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 3499 unsigned long event, void *ptr) 3500 { 3501 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3502 struct net_device *dev = ifa->ifa_dev->dev; 3503 struct mlxsw_sp *mlxsw_sp; 3504 struct mlxsw_sp_rif *r; 3505 int err = 0; 3506 3507 mlxsw_sp = mlxsw_sp_lower_get(dev); 3508 if (!mlxsw_sp) 3509 goto out; 3510 3511 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3512 if (!mlxsw_sp_rif_should_config(r, event)) 3513 goto out; 3514 3515 if (mlxsw_sp_port_dev_check(dev)) 3516 err = mlxsw_sp_inetaddr_port_event(dev, event); 3517 else if (netif_is_lag_master(dev)) 3518 err = mlxsw_sp_inetaddr_lag_event(dev, event); 3519 else if (netif_is_bridge_master(dev)) 3520 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); 3521 else if (is_vlan_dev(dev)) 3522 err = mlxsw_sp_inetaddr_vlan_event(dev, event); 3523 3524 out: 3525 return notifier_from_errno(err); 3526 } 3527 3528 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, 3529 const char *mac, int mtu) 3530 { 3531 char ritr_pl[MLXSW_REG_RITR_LEN]; 3532 int err; 3533 3534 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 3535 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3536 if (err) 3537 return err; 3538 3539 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 3540 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 3541 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 3542 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3543 } 3544 3545 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) 3546 { 3547 struct mlxsw_sp *mlxsw_sp; 3548 struct mlxsw_sp_rif *r; 3549 int err; 3550 3551 mlxsw_sp = mlxsw_sp_lower_get(dev); 3552 if (!mlxsw_sp) 3553 return 0; 3554 3555 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3556 if (!r) 3557 return 0; 3558 3559 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); 3560 if (err) 3561 return err; 3562 3563 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); 3564 if (err) 3565 goto err_rif_edit; 3566 3567 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); 3568 if (err) 3569 goto err_rif_fdb_op; 3570 3571 ether_addr_copy(r->addr, dev->dev_addr); 3572 r->mtu = dev->mtu; 3573 3574 netdev_dbg(dev, "Updated RIF=%d\n", r->rif); 3575 3576 return 0; 3577 3578 err_rif_fdb_op: 3579 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); 3580 err_rif_edit: 3581 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); 3582 return err; 3583 } 3584 3585 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3586 u16 fid) 3587 { 3588 if (mlxsw_sp_fid_is_vfid(fid)) 3589 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3590 else 3591 return test_bit(fid, lag_port->active_vlans); 3592 } 3593 3594 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3595 u16 fid) 3596 { 3597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3598 u8 local_port = mlxsw_sp_port->local_port; 3599 u16 lag_id = mlxsw_sp_port->lag_id; 3600 int i, count = 0; 3601 3602 if (!mlxsw_sp_port->lagged) 3603 return true; 3604 3605 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3606 struct mlxsw_sp_port *lag_port; 3607 3608 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3609 if (!lag_port || lag_port->local_port == local_port) 3610 continue; 3611 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3612 count++; 3613 } 3614 3615 return !count; 3616 } 3617 3618 static int 3619 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3620 u16 fid) 3621 { 3622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3623 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3624 3625 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3626 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3627 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3628 mlxsw_sp_port->local_port); 3629 3630 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3631 mlxsw_sp_port->local_port, fid); 3632 3633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3634 } 3635 3636 static int 3637 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3638 u16 fid) 3639 { 3640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3641 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3642 3643 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3644 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3645 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3646 3647 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3648 mlxsw_sp_port->lag_id, fid); 3649 3650 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3651 } 3652 3653 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 3654 { 3655 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 3656 return 0; 3657 3658 if (mlxsw_sp_port->lagged) 3659 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 3660 fid); 3661 else 3662 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3663 } 3664 3665 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 3666 { 3667 struct mlxsw_sp_fid *f, *tmp; 3668 3669 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 3670 if (--f->ref_count == 0) 3671 mlxsw_sp_fid_destroy(mlxsw_sp, f); 3672 else 3673 WARN_ON_ONCE(1); 3674 } 3675 3676 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3677 struct net_device *br_dev) 3678 { 3679 return !mlxsw_sp->master_bridge.dev || 3680 mlxsw_sp->master_bridge.dev == br_dev; 3681 } 3682 3683 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 3684 struct net_device *br_dev) 3685 { 3686 mlxsw_sp->master_bridge.dev = br_dev; 3687 mlxsw_sp->master_bridge.ref_count++; 3688 } 3689 3690 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3691 { 3692 if (--mlxsw_sp->master_bridge.ref_count == 0) { 3693 mlxsw_sp->master_bridge.dev = NULL; 3694 /* It's possible upper VLAN devices are still holding 3695 * references to underlying FIDs. Drop the reference 3696 * and release the resources if it was the last one. 3697 * If it wasn't, then something bad happened. 3698 */ 3699 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 3700 } 3701 } 3702 3703 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3704 struct net_device *br_dev) 3705 { 3706 struct net_device *dev = mlxsw_sp_port->dev; 3707 int err; 3708 3709 /* When port is not bridged untagged packets are tagged with 3710 * PVID=VID=1, thereby creating an implicit VLAN interface in 3711 * the device. Remove it and let bridge code take care of its 3712 * own VLANs. 3713 */ 3714 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 3715 if (err) 3716 return err; 3717 3718 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 3719 3720 mlxsw_sp_port->learning = 1; 3721 mlxsw_sp_port->learning_sync = 1; 3722 mlxsw_sp_port->uc_flood = 1; 3723 mlxsw_sp_port->bridged = 1; 3724 3725 return 0; 3726 } 3727 3728 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3729 { 3730 struct net_device *dev = mlxsw_sp_port->dev; 3731 3732 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 3733 3734 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 3735 3736 mlxsw_sp_port->learning = 0; 3737 mlxsw_sp_port->learning_sync = 0; 3738 mlxsw_sp_port->uc_flood = 0; 3739 mlxsw_sp_port->bridged = 0; 3740 3741 /* Add implicit VLAN interface in the device, so that untagged 3742 * packets will be classified to the default vFID. 3743 */ 3744 mlxsw_sp_port_add_vid(dev, 0, 1); 3745 } 3746 3747 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3748 { 3749 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3750 3751 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3752 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3753 } 3754 3755 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3756 { 3757 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3758 3759 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3760 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3761 } 3762 3763 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3764 u16 lag_id, u8 port_index) 3765 { 3766 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3767 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3768 3769 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3770 lag_id, port_index); 3771 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3772 } 3773 3774 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3775 u16 lag_id) 3776 { 3777 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3778 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3779 3780 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3781 lag_id); 3782 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3783 } 3784 3785 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3786 u16 lag_id) 3787 { 3788 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3789 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3790 3791 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3792 lag_id); 3793 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3794 } 3795 3796 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3797 u16 lag_id) 3798 { 3799 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3800 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3801 3802 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3803 lag_id); 3804 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3805 } 3806 3807 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3808 struct net_device *lag_dev, 3809 u16 *p_lag_id) 3810 { 3811 struct mlxsw_sp_upper *lag; 3812 int free_lag_id = -1; 3813 int i; 3814 3815 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 3816 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3817 if (lag->ref_count) { 3818 if (lag->dev == lag_dev) { 3819 *p_lag_id = i; 3820 return 0; 3821 } 3822 } else if (free_lag_id < 0) { 3823 free_lag_id = i; 3824 } 3825 } 3826 if (free_lag_id < 0) 3827 return -EBUSY; 3828 *p_lag_id = free_lag_id; 3829 return 0; 3830 } 3831 3832 static bool 3833 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3834 struct net_device *lag_dev, 3835 struct netdev_lag_upper_info *lag_upper_info) 3836 { 3837 u16 lag_id; 3838 3839 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3840 return false; 3841 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3842 return false; 3843 return true; 3844 } 3845 3846 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3847 u16 lag_id, u8 *p_port_index) 3848 { 3849 int i; 3850 3851 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3852 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3853 *p_port_index = i; 3854 return 0; 3855 } 3856 } 3857 return -EBUSY; 3858 } 3859 3860 static void 3861 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3862 u16 lag_id) 3863 { 3864 struct mlxsw_sp_port *mlxsw_sp_vport; 3865 struct mlxsw_sp_fid *f; 3866 3867 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3868 if (WARN_ON(!mlxsw_sp_vport)) 3869 return; 3870 3871 /* If vPort is assigned a RIF, then leave it since it's no 3872 * longer valid. 3873 */ 3874 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3875 if (f) 3876 f->leave(mlxsw_sp_vport); 3877 3878 mlxsw_sp_vport->lag_id = lag_id; 3879 mlxsw_sp_vport->lagged = 1; 3880 } 3881 3882 static void 3883 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3884 { 3885 struct mlxsw_sp_port *mlxsw_sp_vport; 3886 struct mlxsw_sp_fid *f; 3887 3888 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3889 if (WARN_ON(!mlxsw_sp_vport)) 3890 return; 3891 3892 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3893 if (f) 3894 f->leave(mlxsw_sp_vport); 3895 3896 mlxsw_sp_vport->lagged = 0; 3897 } 3898 3899 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3900 struct net_device *lag_dev) 3901 { 3902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3903 struct mlxsw_sp_upper *lag; 3904 u16 lag_id; 3905 u8 port_index; 3906 int err; 3907 3908 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3909 if (err) 3910 return err; 3911 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3912 if (!lag->ref_count) { 3913 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3914 if (err) 3915 return err; 3916 lag->dev = lag_dev; 3917 } 3918 3919 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3920 if (err) 3921 return err; 3922 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3923 if (err) 3924 goto err_col_port_add; 3925 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 3926 if (err) 3927 goto err_col_port_enable; 3928 3929 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3930 mlxsw_sp_port->local_port); 3931 mlxsw_sp_port->lag_id = lag_id; 3932 mlxsw_sp_port->lagged = 1; 3933 lag->ref_count++; 3934 3935 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 3936 3937 return 0; 3938 3939 err_col_port_enable: 3940 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3941 err_col_port_add: 3942 if (!lag->ref_count) 3943 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3944 return err; 3945 } 3946 3947 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3948 struct net_device *lag_dev) 3949 { 3950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3951 u16 lag_id = mlxsw_sp_port->lag_id; 3952 struct mlxsw_sp_upper *lag; 3953 3954 if (!mlxsw_sp_port->lagged) 3955 return; 3956 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3957 WARN_ON(lag->ref_count == 0); 3958 3959 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 3960 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3961 3962 if (mlxsw_sp_port->bridged) { 3963 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 3964 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 3965 } 3966 3967 if (lag->ref_count == 1) 3968 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3969 3970 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3971 mlxsw_sp_port->local_port); 3972 mlxsw_sp_port->lagged = 0; 3973 lag->ref_count--; 3974 3975 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 3976 } 3977 3978 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3979 u16 lag_id) 3980 { 3981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3982 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3983 3984 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3985 mlxsw_sp_port->local_port); 3986 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3987 } 3988 3989 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3990 u16 lag_id) 3991 { 3992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3993 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3994 3995 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3996 mlxsw_sp_port->local_port); 3997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3998 } 3999 4000 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4001 bool lag_tx_enabled) 4002 { 4003 if (lag_tx_enabled) 4004 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4005 mlxsw_sp_port->lag_id); 4006 else 4007 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4008 mlxsw_sp_port->lag_id); 4009 } 4010 4011 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4012 struct netdev_lag_lower_state_info *info) 4013 { 4014 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4015 } 4016 4017 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 4018 struct net_device *vlan_dev) 4019 { 4020 struct mlxsw_sp_port *mlxsw_sp_vport; 4021 u16 vid = vlan_dev_vlan_id(vlan_dev); 4022 4023 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4024 if (WARN_ON(!mlxsw_sp_vport)) 4025 return -EINVAL; 4026 4027 mlxsw_sp_vport->dev = vlan_dev; 4028 4029 return 0; 4030 } 4031 4032 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 4033 struct net_device *vlan_dev) 4034 { 4035 struct mlxsw_sp_port *mlxsw_sp_vport; 4036 u16 vid = vlan_dev_vlan_id(vlan_dev); 4037 4038 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4039 if (WARN_ON(!mlxsw_sp_vport)) 4040 return; 4041 4042 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 4043 } 4044 4045 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 4046 unsigned long event, void *ptr) 4047 { 4048 struct netdev_notifier_changeupper_info *info; 4049 struct mlxsw_sp_port *mlxsw_sp_port; 4050 struct net_device *upper_dev; 4051 struct mlxsw_sp *mlxsw_sp; 4052 int err = 0; 4053 4054 mlxsw_sp_port = netdev_priv(dev); 4055 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4056 info = ptr; 4057 4058 switch (event) { 4059 case NETDEV_PRECHANGEUPPER: 4060 upper_dev = info->upper_dev; 4061 if (!is_vlan_dev(upper_dev) && 4062 !netif_is_lag_master(upper_dev) && 4063 !netif_is_bridge_master(upper_dev)) 4064 return -EINVAL; 4065 if (!info->linking) 4066 break; 4067 /* HW limitation forbids to put ports to multiple bridges. */ 4068 if (netif_is_bridge_master(upper_dev) && 4069 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4070 return -EINVAL; 4071 if (netif_is_lag_master(upper_dev) && 4072 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4073 info->upper_info)) 4074 return -EINVAL; 4075 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4076 return -EINVAL; 4077 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4078 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4079 return -EINVAL; 4080 break; 4081 case NETDEV_CHANGEUPPER: 4082 upper_dev = info->upper_dev; 4083 if (is_vlan_dev(upper_dev)) { 4084 if (info->linking) 4085 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4086 upper_dev); 4087 else 4088 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4089 upper_dev); 4090 } else if (netif_is_bridge_master(upper_dev)) { 4091 if (info->linking) 4092 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4093 upper_dev); 4094 else 4095 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4096 } else if (netif_is_lag_master(upper_dev)) { 4097 if (info->linking) 4098 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4099 upper_dev); 4100 else 4101 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4102 upper_dev); 4103 } else { 4104 err = -EINVAL; 4105 WARN_ON(1); 4106 } 4107 break; 4108 } 4109 4110 return err; 4111 } 4112 4113 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4114 unsigned long event, void *ptr) 4115 { 4116 struct netdev_notifier_changelowerstate_info *info; 4117 struct mlxsw_sp_port *mlxsw_sp_port; 4118 int err; 4119 4120 mlxsw_sp_port = netdev_priv(dev); 4121 info = ptr; 4122 4123 switch (event) { 4124 case NETDEV_CHANGELOWERSTATE: 4125 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4126 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4127 info->lower_state_info); 4128 if (err) 4129 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4130 } 4131 break; 4132 } 4133 4134 return 0; 4135 } 4136 4137 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4138 unsigned long event, void *ptr) 4139 { 4140 switch (event) { 4141 case NETDEV_PRECHANGEUPPER: 4142 case NETDEV_CHANGEUPPER: 4143 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4144 case NETDEV_CHANGELOWERSTATE: 4145 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4146 } 4147 4148 return 0; 4149 } 4150 4151 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4152 unsigned long event, void *ptr) 4153 { 4154 struct net_device *dev; 4155 struct list_head *iter; 4156 int ret; 4157 4158 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4159 if (mlxsw_sp_port_dev_check(dev)) { 4160 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4161 if (ret) 4162 return ret; 4163 } 4164 } 4165 4166 return 0; 4167 } 4168 4169 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4170 struct net_device *vlan_dev) 4171 { 4172 u16 fid = vlan_dev_vlan_id(vlan_dev); 4173 struct mlxsw_sp_fid *f; 4174 4175 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4176 if (!f) { 4177 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4178 if (IS_ERR(f)) 4179 return PTR_ERR(f); 4180 } 4181 4182 f->ref_count++; 4183 4184 return 0; 4185 } 4186 4187 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4188 struct net_device *vlan_dev) 4189 { 4190 u16 fid = vlan_dev_vlan_id(vlan_dev); 4191 struct mlxsw_sp_fid *f; 4192 4193 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4194 if (f && f->r) 4195 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4196 if (f && --f->ref_count == 0) 4197 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4198 } 4199 4200 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4201 unsigned long event, void *ptr) 4202 { 4203 struct netdev_notifier_changeupper_info *info; 4204 struct net_device *upper_dev; 4205 struct mlxsw_sp *mlxsw_sp; 4206 int err; 4207 4208 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4209 if (!mlxsw_sp) 4210 return 0; 4211 if (br_dev != mlxsw_sp->master_bridge.dev) 4212 return 0; 4213 4214 info = ptr; 4215 4216 switch (event) { 4217 case NETDEV_CHANGEUPPER: 4218 upper_dev = info->upper_dev; 4219 if (!is_vlan_dev(upper_dev)) 4220 break; 4221 if (info->linking) { 4222 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4223 upper_dev); 4224 if (err) 4225 return err; 4226 } else { 4227 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4228 } 4229 break; 4230 } 4231 4232 return 0; 4233 } 4234 4235 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4236 { 4237 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4238 MLXSW_SP_VFID_MAX); 4239 } 4240 4241 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4242 { 4243 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4244 4245 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4247 } 4248 4249 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4250 4251 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4252 struct net_device *br_dev) 4253 { 4254 struct device *dev = mlxsw_sp->bus_info->dev; 4255 struct mlxsw_sp_fid *f; 4256 u16 vfid, fid; 4257 int err; 4258 4259 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4260 if (vfid == MLXSW_SP_VFID_MAX) { 4261 dev_err(dev, "No available vFIDs\n"); 4262 return ERR_PTR(-ERANGE); 4263 } 4264 4265 fid = mlxsw_sp_vfid_to_fid(vfid); 4266 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4267 if (err) { 4268 dev_err(dev, "Failed to create FID=%d\n", fid); 4269 return ERR_PTR(err); 4270 } 4271 4272 f = kzalloc(sizeof(*f), GFP_KERNEL); 4273 if (!f) 4274 goto err_allocate_vfid; 4275 4276 f->leave = mlxsw_sp_vport_vfid_leave; 4277 f->fid = fid; 4278 f->dev = br_dev; 4279 4280 list_add(&f->list, &mlxsw_sp->vfids.list); 4281 set_bit(vfid, mlxsw_sp->vfids.mapped); 4282 4283 return f; 4284 4285 err_allocate_vfid: 4286 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4287 return ERR_PTR(-ENOMEM); 4288 } 4289 4290 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4291 struct mlxsw_sp_fid *f) 4292 { 4293 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4294 u16 fid = f->fid; 4295 4296 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4297 list_del(&f->list); 4298 4299 if (f->r) 4300 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4301 4302 kfree(f); 4303 4304 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4305 } 4306 4307 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4308 bool valid) 4309 { 4310 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4311 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4312 4313 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4314 vid); 4315 } 4316 4317 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4318 struct net_device *br_dev) 4319 { 4320 struct mlxsw_sp_fid *f; 4321 int err; 4322 4323 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4324 if (!f) { 4325 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4326 if (IS_ERR(f)) 4327 return PTR_ERR(f); 4328 } 4329 4330 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4331 if (err) 4332 goto err_vport_flood_set; 4333 4334 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4335 if (err) 4336 goto err_vport_fid_map; 4337 4338 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4339 f->ref_count++; 4340 4341 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4342 4343 return 0; 4344 4345 err_vport_fid_map: 4346 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4347 err_vport_flood_set: 4348 if (!f->ref_count) 4349 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4350 return err; 4351 } 4352 4353 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4354 { 4355 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4356 4357 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4358 4359 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4360 4361 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4362 4363 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4364 4365 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4366 if (--f->ref_count == 0) 4367 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4368 } 4369 4370 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4371 struct net_device *br_dev) 4372 { 4373 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4374 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4375 struct net_device *dev = mlxsw_sp_vport->dev; 4376 int err; 4377 4378 if (f && !WARN_ON(!f->leave)) 4379 f->leave(mlxsw_sp_vport); 4380 4381 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4382 if (err) { 4383 netdev_err(dev, "Failed to join vFID\n"); 4384 return err; 4385 } 4386 4387 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4388 if (err) { 4389 netdev_err(dev, "Failed to enable learning\n"); 4390 goto err_port_vid_learning_set; 4391 } 4392 4393 mlxsw_sp_vport->learning = 1; 4394 mlxsw_sp_vport->learning_sync = 1; 4395 mlxsw_sp_vport->uc_flood = 1; 4396 mlxsw_sp_vport->bridged = 1; 4397 4398 return 0; 4399 4400 err_port_vid_learning_set: 4401 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4402 return err; 4403 } 4404 4405 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4406 { 4407 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4408 4409 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4410 4411 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4412 4413 mlxsw_sp_vport->learning = 0; 4414 mlxsw_sp_vport->learning_sync = 0; 4415 mlxsw_sp_vport->uc_flood = 0; 4416 mlxsw_sp_vport->bridged = 0; 4417 } 4418 4419 static bool 4420 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4421 const struct net_device *br_dev) 4422 { 4423 struct mlxsw_sp_port *mlxsw_sp_vport; 4424 4425 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4426 vport.list) { 4427 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4428 4429 if (dev && dev == br_dev) 4430 return false; 4431 } 4432 4433 return true; 4434 } 4435 4436 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4437 unsigned long event, void *ptr, 4438 u16 vid) 4439 { 4440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4441 struct netdev_notifier_changeupper_info *info = ptr; 4442 struct mlxsw_sp_port *mlxsw_sp_vport; 4443 struct net_device *upper_dev; 4444 int err = 0; 4445 4446 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4447 4448 switch (event) { 4449 case NETDEV_PRECHANGEUPPER: 4450 upper_dev = info->upper_dev; 4451 if (!netif_is_bridge_master(upper_dev)) 4452 return -EINVAL; 4453 if (!info->linking) 4454 break; 4455 /* We can't have multiple VLAN interfaces configured on 4456 * the same port and being members in the same bridge. 4457 */ 4458 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4459 upper_dev)) 4460 return -EINVAL; 4461 break; 4462 case NETDEV_CHANGEUPPER: 4463 upper_dev = info->upper_dev; 4464 if (info->linking) { 4465 if (WARN_ON(!mlxsw_sp_vport)) 4466 return -EINVAL; 4467 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4468 upper_dev); 4469 } else { 4470 if (!mlxsw_sp_vport) 4471 return 0; 4472 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4473 } 4474 } 4475 4476 return err; 4477 } 4478 4479 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4480 unsigned long event, void *ptr, 4481 u16 vid) 4482 { 4483 struct net_device *dev; 4484 struct list_head *iter; 4485 int ret; 4486 4487 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4488 if (mlxsw_sp_port_dev_check(dev)) { 4489 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4490 vid); 4491 if (ret) 4492 return ret; 4493 } 4494 } 4495 4496 return 0; 4497 } 4498 4499 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4500 unsigned long event, void *ptr) 4501 { 4502 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4503 u16 vid = vlan_dev_vlan_id(vlan_dev); 4504 4505 if (mlxsw_sp_port_dev_check(real_dev)) 4506 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4507 vid); 4508 else if (netif_is_lag_master(real_dev)) 4509 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4510 vid); 4511 4512 return 0; 4513 } 4514 4515 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4516 unsigned long event, void *ptr) 4517 { 4518 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4519 int err = 0; 4520 4521 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4522 err = mlxsw_sp_netdevice_router_port_event(dev); 4523 else if (mlxsw_sp_port_dev_check(dev)) 4524 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4525 else if (netif_is_lag_master(dev)) 4526 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4527 else if (netif_is_bridge_master(dev)) 4528 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4529 else if (is_vlan_dev(dev)) 4530 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4531 4532 return notifier_from_errno(err); 4533 } 4534 4535 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4536 .notifier_call = mlxsw_sp_netdevice_event, 4537 }; 4538 4539 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4540 .notifier_call = mlxsw_sp_inetaddr_event, 4541 .priority = 10, /* Must be called before FIB notifier block */ 4542 }; 4543 4544 static int __init mlxsw_sp_module_init(void) 4545 { 4546 int err; 4547 4548 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4549 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4550 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4551 if (err) 4552 goto err_core_driver_register; 4553 return 0; 4554 4555 err_core_driver_register: 4556 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4557 return err; 4558 } 4559 4560 static void __exit mlxsw_sp_module_exit(void) 4561 { 4562 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4563 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4564 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4565 } 4566 4567 module_init(mlxsw_sp_module_init); 4568 module_exit(mlxsw_sp_module_exit); 4569 4570 MODULE_LICENSE("Dual BSD/GPL"); 4571 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4572 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4573 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 4574