1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <linux/notifier.h> 53 #include <linux/dcbnl.h> 54 #include <linux/inetdevice.h> 55 #include <net/switchdev.h> 56 #include <generated/utsrelease.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 60 #include "spectrum.h" 61 #include "core.h" 62 #include "reg.h" 63 #include "port.h" 64 #include "trap.h" 65 #include "txheader.h" 66 67 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 /* tx_hdr_version 71 * Tx header version. 72 * Must be set to 1. 73 */ 74 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 75 76 /* tx_hdr_ctl 77 * Packet control type. 78 * 0 - Ethernet control (e.g. EMADs, LACP) 79 * 1 - Ethernet data 80 */ 81 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 82 83 /* tx_hdr_proto 84 * Packet protocol type. Must be set to 1 (Ethernet). 85 */ 86 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 87 88 /* tx_hdr_rx_is_router 89 * Packet is sent from the router. Valid for data packets only. 90 */ 91 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 92 93 /* tx_hdr_fid_valid 94 * Indicates if the 'fid' field is valid and should be used for 95 * forwarding lookup. Valid for data packets only. 96 */ 97 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 98 99 /* tx_hdr_swid 100 * Switch partition ID. Must be set to 0. 101 */ 102 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 103 104 /* tx_hdr_control_tclass 105 * Indicates if the packet should use the control TClass and not one 106 * of the data TClasses. 107 */ 108 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 109 110 /* tx_hdr_etclass 111 * Egress TClass to be used on the egress device on the egress port. 112 */ 113 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 114 115 /* tx_hdr_port_mid 116 * Destination local port for unicast packets. 117 * Destination multicast ID for multicast packets. 118 * 119 * Control packets are directed to a specific egress port, while data 120 * packets are transmitted through the CPU port (0) into the switch partition, 121 * where forwarding rules are applied. 122 */ 123 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 124 125 /* tx_hdr_fid 126 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 127 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 128 * Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 131 132 /* tx_hdr_type 133 * 0 - Data packets 134 * 6 - Control packets 135 */ 136 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 137 138 static bool mlxsw_sp_port_dev_check(const struct net_device *dev); 139 140 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 141 const struct mlxsw_tx_info *tx_info) 142 { 143 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 144 145 memset(txhdr, 0, MLXSW_TXHDR_LEN); 146 147 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 148 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 149 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 150 mlxsw_tx_hdr_swid_set(txhdr, 0); 151 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 152 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 153 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 154 } 155 156 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 157 { 158 char spad_pl[MLXSW_REG_SPAD_LEN]; 159 int err; 160 161 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 162 if (err) 163 return err; 164 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 165 return 0; 166 } 167 168 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 169 { 170 struct mlxsw_resources *resources; 171 int i; 172 173 resources = mlxsw_core_resources_get(mlxsw_sp->core); 174 if (!resources->max_span_valid) 175 return -EIO; 176 177 mlxsw_sp->span.entries_count = resources->max_span; 178 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 179 sizeof(struct mlxsw_sp_span_entry), 180 GFP_KERNEL); 181 if (!mlxsw_sp->span.entries) 182 return -ENOMEM; 183 184 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 185 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 186 187 return 0; 188 } 189 190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 191 { 192 int i; 193 194 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 195 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 196 197 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 198 } 199 kfree(mlxsw_sp->span.entries); 200 } 201 202 static struct mlxsw_sp_span_entry * 203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 204 { 205 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 206 struct mlxsw_sp_span_entry *span_entry; 207 char mpat_pl[MLXSW_REG_MPAT_LEN]; 208 u8 local_port = port->local_port; 209 int index; 210 int i; 211 int err; 212 213 /* find a free entry to use */ 214 index = -1; 215 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 216 if (!mlxsw_sp->span.entries[i].used) { 217 index = i; 218 span_entry = &mlxsw_sp->span.entries[i]; 219 break; 220 } 221 } 222 if (index < 0) 223 return NULL; 224 225 /* create a new port analayzer entry for local_port */ 226 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 227 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 228 if (err) 229 return NULL; 230 231 span_entry->used = true; 232 span_entry->id = index; 233 span_entry->ref_count = 0; 234 span_entry->local_port = local_port; 235 return span_entry; 236 } 237 238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 239 struct mlxsw_sp_span_entry *span_entry) 240 { 241 u8 local_port = span_entry->local_port; 242 char mpat_pl[MLXSW_REG_MPAT_LEN]; 243 int pa_id = span_entry->id; 244 245 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 246 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 247 span_entry->used = false; 248 } 249 250 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 251 { 252 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 253 int i; 254 255 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 256 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 257 258 if (curr->used && curr->local_port == port->local_port) 259 return curr; 260 } 261 return NULL; 262 } 263 264 struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 265 { 266 struct mlxsw_sp_span_entry *span_entry; 267 268 span_entry = mlxsw_sp_span_entry_find(port); 269 if (span_entry) { 270 span_entry->ref_count++; 271 return span_entry; 272 } 273 274 return mlxsw_sp_span_entry_create(port); 275 } 276 277 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 278 struct mlxsw_sp_span_entry *span_entry) 279 { 280 if (--span_entry->ref_count == 0) 281 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 282 return 0; 283 } 284 285 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 286 { 287 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 288 struct mlxsw_sp_span_inspected_port *p; 289 int i; 290 291 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 292 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 293 294 list_for_each_entry(p, &curr->bound_ports_list, list) 295 if (p->local_port == port->local_port && 296 p->type == MLXSW_SP_SPAN_EGRESS) 297 return true; 298 } 299 300 return false; 301 } 302 303 static int mlxsw_sp_span_mtu_to_buffsize(int mtu) 304 { 305 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; 306 } 307 308 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 309 { 310 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 311 char sbib_pl[MLXSW_REG_SBIB_LEN]; 312 int err; 313 314 /* If port is egress mirrored, the shared buffer size should be 315 * updated according to the mtu value 316 */ 317 if (mlxsw_sp_span_is_egress_mirror(port)) { 318 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 319 mlxsw_sp_span_mtu_to_buffsize(mtu)); 320 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 321 if (err) { 322 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 323 return err; 324 } 325 } 326 327 return 0; 328 } 329 330 static struct mlxsw_sp_span_inspected_port * 331 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 332 struct mlxsw_sp_span_entry *span_entry) 333 { 334 struct mlxsw_sp_span_inspected_port *p; 335 336 list_for_each_entry(p, &span_entry->bound_ports_list, list) 337 if (port->local_port == p->local_port) 338 return p; 339 return NULL; 340 } 341 342 static int 343 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 344 struct mlxsw_sp_span_entry *span_entry, 345 enum mlxsw_sp_span_type type) 346 { 347 struct mlxsw_sp_span_inspected_port *inspected_port; 348 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 349 char mpar_pl[MLXSW_REG_MPAR_LEN]; 350 char sbib_pl[MLXSW_REG_SBIB_LEN]; 351 int pa_id = span_entry->id; 352 int err; 353 354 /* if it is an egress SPAN, bind a shared buffer to it */ 355 if (type == MLXSW_SP_SPAN_EGRESS) { 356 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 357 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); 358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 359 if (err) { 360 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 361 return err; 362 } 363 } 364 365 /* bind the port to the SPAN entry */ 366 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); 367 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 368 if (err) 369 goto err_mpar_reg_write; 370 371 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 372 if (!inspected_port) { 373 err = -ENOMEM; 374 goto err_inspected_port_alloc; 375 } 376 inspected_port->local_port = port->local_port; 377 inspected_port->type = type; 378 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 379 380 return 0; 381 382 err_mpar_reg_write: 383 err_inspected_port_alloc: 384 if (type == MLXSW_SP_SPAN_EGRESS) { 385 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 386 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 387 } 388 return err; 389 } 390 391 static void 392 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 393 struct mlxsw_sp_span_entry *span_entry, 394 enum mlxsw_sp_span_type type) 395 { 396 struct mlxsw_sp_span_inspected_port *inspected_port; 397 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 398 char mpar_pl[MLXSW_REG_MPAR_LEN]; 399 char sbib_pl[MLXSW_REG_SBIB_LEN]; 400 int pa_id = span_entry->id; 401 402 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 403 if (!inspected_port) 404 return; 405 406 /* remove the inspected port */ 407 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); 408 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 409 410 /* remove the SBIB buffer if it was egress SPAN */ 411 if (type == MLXSW_SP_SPAN_EGRESS) { 412 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 413 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 414 } 415 416 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 417 418 list_del(&inspected_port->list); 419 kfree(inspected_port); 420 } 421 422 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 423 struct mlxsw_sp_port *to, 424 enum mlxsw_sp_span_type type) 425 { 426 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 427 struct mlxsw_sp_span_entry *span_entry; 428 int err; 429 430 span_entry = mlxsw_sp_span_entry_get(to); 431 if (!span_entry) 432 return -ENOENT; 433 434 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 435 span_entry->id); 436 437 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 438 if (err) 439 goto err_port_bind; 440 441 return 0; 442 443 err_port_bind: 444 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 445 return err; 446 } 447 448 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 449 struct mlxsw_sp_port *to, 450 enum mlxsw_sp_span_type type) 451 { 452 struct mlxsw_sp_span_entry *span_entry; 453 454 span_entry = mlxsw_sp_span_entry_find(to); 455 if (!span_entry) { 456 netdev_err(from->dev, "no span entry found\n"); 457 return; 458 } 459 460 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 461 span_entry->id); 462 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 463 } 464 465 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 466 bool is_up) 467 { 468 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 469 char paos_pl[MLXSW_REG_PAOS_LEN]; 470 471 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 472 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 473 MLXSW_PORT_ADMIN_STATUS_DOWN); 474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 475 } 476 477 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 478 unsigned char *addr) 479 { 480 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 481 char ppad_pl[MLXSW_REG_PPAD_LEN]; 482 483 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 484 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 486 } 487 488 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 489 { 490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 491 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 492 493 ether_addr_copy(addr, mlxsw_sp->base_mac); 494 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 495 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 496 } 497 498 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 499 { 500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 501 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 502 int max_mtu; 503 int err; 504 505 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 506 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 507 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 508 if (err) 509 return err; 510 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 511 512 if (mtu > max_mtu) 513 return -EINVAL; 514 515 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 517 } 518 519 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 520 u8 swid) 521 { 522 char pspa_pl[MLXSW_REG_PSPA_LEN]; 523 524 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 525 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 526 } 527 528 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 529 { 530 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 531 532 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 533 swid); 534 } 535 536 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 537 bool enable) 538 { 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char svpe_pl[MLXSW_REG_SVPE_LEN]; 541 542 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 544 } 545 546 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 547 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 548 u16 vid) 549 { 550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 551 char svfa_pl[MLXSW_REG_SVFA_LEN]; 552 553 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 554 fid, vid); 555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 556 } 557 558 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 559 u16 vid, bool learn_enable) 560 { 561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 562 char *spvmlr_pl; 563 int err; 564 565 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 566 if (!spvmlr_pl) 567 return -ENOMEM; 568 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 569 learn_enable); 570 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 571 kfree(spvmlr_pl); 572 return err; 573 } 574 575 static int 576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 577 { 578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 579 char sspr_pl[MLXSW_REG_SSPR_LEN]; 580 581 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 583 } 584 585 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 586 u8 local_port, u8 *p_module, 587 u8 *p_width, u8 *p_lane) 588 { 589 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 590 int err; 591 592 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 593 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 594 if (err) 595 return err; 596 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 597 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 598 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 599 return 0; 600 } 601 602 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 603 u8 module, u8 width, u8 lane) 604 { 605 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 606 int i; 607 608 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 609 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 610 for (i = 0; i < width; i++) { 611 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 612 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 613 } 614 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 616 } 617 618 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 619 { 620 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 621 622 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 623 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 624 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 625 } 626 627 static int mlxsw_sp_port_open(struct net_device *dev) 628 { 629 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 630 int err; 631 632 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 633 if (err) 634 return err; 635 netif_start_queue(dev); 636 return 0; 637 } 638 639 static int mlxsw_sp_port_stop(struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 643 netif_stop_queue(dev); 644 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 645 } 646 647 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 653 const struct mlxsw_tx_info tx_info = { 654 .local_port = mlxsw_sp_port->local_port, 655 .is_emad = false, 656 }; 657 u64 len; 658 int err; 659 660 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 661 return NETDEV_TX_BUSY; 662 663 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 664 struct sk_buff *skb_orig = skb; 665 666 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 667 if (!skb) { 668 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 669 dev_kfree_skb_any(skb_orig); 670 return NETDEV_TX_OK; 671 } 672 } 673 674 if (eth_skb_pad(skb)) { 675 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 676 return NETDEV_TX_OK; 677 } 678 679 mlxsw_sp_txhdr_construct(skb, &tx_info); 680 /* TX header is consumed by HW on the way so we shouldn't count its 681 * bytes as being sent. 682 */ 683 len = skb->len - MLXSW_TXHDR_LEN; 684 685 /* Due to a race we might fail here because of a full queue. In that 686 * unlikely case we simply drop the packet. 687 */ 688 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 689 690 if (!err) { 691 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 692 u64_stats_update_begin(&pcpu_stats->syncp); 693 pcpu_stats->tx_packets++; 694 pcpu_stats->tx_bytes += len; 695 u64_stats_update_end(&pcpu_stats->syncp); 696 } else { 697 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 698 dev_kfree_skb_any(skb); 699 } 700 return NETDEV_TX_OK; 701 } 702 703 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 704 { 705 } 706 707 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 708 { 709 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 710 struct sockaddr *addr = p; 711 int err; 712 713 if (!is_valid_ether_addr(addr->sa_data)) 714 return -EADDRNOTAVAIL; 715 716 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 717 if (err) 718 return err; 719 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 720 return 0; 721 } 722 723 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 724 bool pause_en, bool pfc_en, u16 delay) 725 { 726 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 727 728 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 729 MLXSW_SP_PAUSE_DELAY; 730 731 if (pause_en || pfc_en) 732 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 733 pg_size + delay, pg_size); 734 else 735 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 736 } 737 738 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 739 u8 *prio_tc, bool pause_en, 740 struct ieee_pfc *my_pfc) 741 { 742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 743 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 744 u16 delay = !!my_pfc ? my_pfc->delay : 0; 745 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 746 int i, j, err; 747 748 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 749 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 750 if (err) 751 return err; 752 753 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 754 bool configure = false; 755 bool pfc = false; 756 757 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 758 if (prio_tc[j] == i) { 759 pfc = pfc_en & BIT(j); 760 configure = true; 761 break; 762 } 763 } 764 765 if (!configure) 766 continue; 767 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 768 } 769 770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 771 } 772 773 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 774 int mtu, bool pause_en) 775 { 776 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 777 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 778 struct ieee_pfc *my_pfc; 779 u8 *prio_tc; 780 781 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 782 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 783 784 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 785 pause_en, my_pfc); 786 } 787 788 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 789 { 790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 791 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 792 int err; 793 794 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 795 if (err) 796 return err; 797 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 798 if (err) 799 goto err_span_port_mtu_update; 800 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 801 if (err) 802 goto err_port_mtu_set; 803 dev->mtu = mtu; 804 return 0; 805 806 err_port_mtu_set: 807 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 808 err_span_port_mtu_update: 809 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 810 return err; 811 } 812 813 static struct rtnl_link_stats64 * 814 mlxsw_sp_port_get_stats64(struct net_device *dev, 815 struct rtnl_link_stats64 *stats) 816 { 817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 818 struct mlxsw_sp_port_pcpu_stats *p; 819 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 820 u32 tx_dropped = 0; 821 unsigned int start; 822 int i; 823 824 for_each_possible_cpu(i) { 825 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 826 do { 827 start = u64_stats_fetch_begin_irq(&p->syncp); 828 rx_packets = p->rx_packets; 829 rx_bytes = p->rx_bytes; 830 tx_packets = p->tx_packets; 831 tx_bytes = p->tx_bytes; 832 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 833 834 stats->rx_packets += rx_packets; 835 stats->rx_bytes += rx_bytes; 836 stats->tx_packets += tx_packets; 837 stats->tx_bytes += tx_bytes; 838 /* tx_dropped is u32, updated without syncp protection. */ 839 tx_dropped += p->tx_dropped; 840 } 841 stats->tx_dropped = tx_dropped; 842 return stats; 843 } 844 845 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 846 u16 vid_end, bool is_member, bool untagged) 847 { 848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 849 char *spvm_pl; 850 int err; 851 852 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 853 if (!spvm_pl) 854 return -ENOMEM; 855 856 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 857 vid_end, is_member, untagged); 858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 859 kfree(spvm_pl); 860 return err; 861 } 862 863 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 864 { 865 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 866 u16 vid, last_visited_vid; 867 int err; 868 869 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 870 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 871 vid); 872 if (err) { 873 last_visited_vid = vid; 874 goto err_port_vid_to_fid_set; 875 } 876 } 877 878 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 879 if (err) { 880 last_visited_vid = VLAN_N_VID; 881 goto err_port_vid_to_fid_set; 882 } 883 884 return 0; 885 886 err_port_vid_to_fid_set: 887 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 888 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 889 vid); 890 return err; 891 } 892 893 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 894 { 895 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 896 u16 vid; 897 int err; 898 899 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 900 if (err) 901 return err; 902 903 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 904 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 905 vid, vid); 906 if (err) 907 return err; 908 } 909 910 return 0; 911 } 912 913 static struct mlxsw_sp_port * 914 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 915 { 916 struct mlxsw_sp_port *mlxsw_sp_vport; 917 918 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 919 if (!mlxsw_sp_vport) 920 return NULL; 921 922 /* dev will be set correctly after the VLAN device is linked 923 * with the real device. In case of bridge SELF invocation, dev 924 * will remain as is. 925 */ 926 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 927 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 929 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 930 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 931 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 932 mlxsw_sp_vport->vport.vid = vid; 933 934 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 935 936 return mlxsw_sp_vport; 937 } 938 939 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 940 { 941 list_del(&mlxsw_sp_vport->vport.list); 942 kfree(mlxsw_sp_vport); 943 } 944 945 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 946 u16 vid) 947 { 948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 949 struct mlxsw_sp_port *mlxsw_sp_vport; 950 bool untagged = vid == 1; 951 int err; 952 953 /* VLAN 0 is added to HW filter when device goes up, but it is 954 * reserved in our case, so simply return. 955 */ 956 if (!vid) 957 return 0; 958 959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 960 netdev_warn(dev, "VID=%d already configured\n", vid); 961 return 0; 962 } 963 964 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 965 if (!mlxsw_sp_vport) { 966 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 967 return -ENOMEM; 968 } 969 970 /* When adding the first VLAN interface on a bridged port we need to 971 * transition all the active 802.1Q bridge VLANs to use explicit 972 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 973 */ 974 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 975 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 976 if (err) { 977 netdev_err(dev, "Failed to set to Virtual mode\n"); 978 goto err_port_vp_mode_trans; 979 } 980 } 981 982 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 983 if (err) { 984 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 985 goto err_port_vid_learning_set; 986 } 987 988 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 989 if (err) { 990 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 991 vid); 992 goto err_port_add_vid; 993 } 994 995 return 0; 996 997 err_port_add_vid: 998 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 999 err_port_vid_learning_set: 1000 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1001 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1002 err_port_vp_mode_trans: 1003 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1004 return err; 1005 } 1006 1007 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1008 __be16 __always_unused proto, u16 vid) 1009 { 1010 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1011 struct mlxsw_sp_port *mlxsw_sp_vport; 1012 struct mlxsw_sp_fid *f; 1013 int err; 1014 1015 /* VLAN 0 is removed from HW filter when device goes down, but 1016 * it is reserved in our case, so simply return. 1017 */ 1018 if (!vid) 1019 return 0; 1020 1021 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1022 if (!mlxsw_sp_vport) { 1023 netdev_warn(dev, "VID=%d does not exist\n", vid); 1024 return 0; 1025 } 1026 1027 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1028 if (err) { 1029 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 1030 vid); 1031 return err; 1032 } 1033 1034 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1035 if (err) { 1036 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 1037 return err; 1038 } 1039 1040 /* Drop FID reference. If this was the last reference the 1041 * resources will be freed. 1042 */ 1043 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1044 if (f && !WARN_ON(!f->leave)) 1045 f->leave(mlxsw_sp_vport); 1046 1047 /* When removing the last VLAN interface on a bridged port we need to 1048 * transition all active 802.1Q bridge VLANs to use VID to FID 1049 * mappings and set port's mode to VLAN mode. 1050 */ 1051 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1052 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1053 if (err) { 1054 netdev_err(dev, "Failed to set to VLAN mode\n"); 1055 return err; 1056 } 1057 } 1058 1059 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1060 1061 return 0; 1062 } 1063 1064 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1065 size_t len) 1066 { 1067 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1068 u8 module = mlxsw_sp_port->mapping.module; 1069 u8 width = mlxsw_sp_port->mapping.width; 1070 u8 lane = mlxsw_sp_port->mapping.lane; 1071 int err; 1072 1073 if (!mlxsw_sp_port->split) 1074 err = snprintf(name, len, "p%d", module + 1); 1075 else 1076 err = snprintf(name, len, "p%ds%d", module + 1, 1077 lane / width); 1078 1079 if (err >= len) 1080 return -EINVAL; 1081 1082 return 0; 1083 } 1084 1085 static struct mlxsw_sp_port_mall_tc_entry * 1086 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, 1087 unsigned long cookie) { 1088 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1089 1090 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1091 if (mall_tc_entry->cookie == cookie) 1092 return mall_tc_entry; 1093 1094 return NULL; 1095 } 1096 1097 static int 1098 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1099 struct tc_cls_matchall_offload *cls, 1100 const struct tc_action *a, 1101 bool ingress) 1102 { 1103 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1104 struct net *net = dev_net(mlxsw_sp_port->dev); 1105 enum mlxsw_sp_span_type span_type; 1106 struct mlxsw_sp_port *to_port; 1107 struct net_device *to_dev; 1108 int ifindex; 1109 int err; 1110 1111 ifindex = tcf_mirred_ifindex(a); 1112 to_dev = __dev_get_by_index(net, ifindex); 1113 if (!to_dev) { 1114 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1115 return -EINVAL; 1116 } 1117 1118 if (!mlxsw_sp_port_dev_check(to_dev)) { 1119 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1120 return -ENOTSUPP; 1121 } 1122 to_port = netdev_priv(to_dev); 1123 1124 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1125 if (!mall_tc_entry) 1126 return -ENOMEM; 1127 1128 mall_tc_entry->cookie = cls->cookie; 1129 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1130 mall_tc_entry->mirror.to_local_port = to_port->local_port; 1131 mall_tc_entry->mirror.ingress = ingress; 1132 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1133 1134 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1135 err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1136 if (err) 1137 goto err_mirror_add; 1138 return 0; 1139 1140 err_mirror_add: 1141 list_del(&mall_tc_entry->list); 1142 kfree(mall_tc_entry); 1143 return err; 1144 } 1145 1146 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1147 __be16 protocol, 1148 struct tc_cls_matchall_offload *cls, 1149 bool ingress) 1150 { 1151 const struct tc_action *a; 1152 int err; 1153 1154 if (!tc_single_action(cls->exts)) { 1155 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1156 return -ENOTSUPP; 1157 } 1158 1159 tc_for_each_action(a, cls->exts) { 1160 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1161 return -ENOTSUPP; 1162 1163 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, 1164 a, ingress); 1165 if (err) 1166 return err; 1167 } 1168 1169 return 0; 1170 } 1171 1172 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1173 struct tc_cls_matchall_offload *cls) 1174 { 1175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1176 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1177 enum mlxsw_sp_span_type span_type; 1178 struct mlxsw_sp_port *to_port; 1179 1180 mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, 1181 cls->cookie); 1182 if (!mall_tc_entry) { 1183 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1184 return; 1185 } 1186 1187 switch (mall_tc_entry->type) { 1188 case MLXSW_SP_PORT_MALL_MIRROR: 1189 to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; 1190 span_type = mall_tc_entry->mirror.ingress ? 1191 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1192 1193 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1194 break; 1195 default: 1196 WARN_ON(1); 1197 } 1198 1199 list_del(&mall_tc_entry->list); 1200 kfree(mall_tc_entry); 1201 } 1202 1203 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1204 __be16 proto, struct tc_to_netdev *tc) 1205 { 1206 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1207 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1208 1209 if (tc->type == TC_SETUP_MATCHALL) { 1210 switch (tc->cls_mall->command) { 1211 case TC_CLSMATCHALL_REPLACE: 1212 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1213 proto, 1214 tc->cls_mall, 1215 ingress); 1216 case TC_CLSMATCHALL_DESTROY: 1217 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1218 tc->cls_mall); 1219 return 0; 1220 default: 1221 return -EINVAL; 1222 } 1223 } 1224 1225 return -ENOTSUPP; 1226 } 1227 1228 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1229 .ndo_open = mlxsw_sp_port_open, 1230 .ndo_stop = mlxsw_sp_port_stop, 1231 .ndo_start_xmit = mlxsw_sp_port_xmit, 1232 .ndo_setup_tc = mlxsw_sp_setup_tc, 1233 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1234 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1235 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1236 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1237 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1238 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1239 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, 1240 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, 1241 .ndo_fdb_add = switchdev_port_fdb_add, 1242 .ndo_fdb_del = switchdev_port_fdb_del, 1243 .ndo_fdb_dump = switchdev_port_fdb_dump, 1244 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1245 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1246 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1247 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1248 }; 1249 1250 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1251 struct ethtool_drvinfo *drvinfo) 1252 { 1253 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1255 1256 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1257 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1258 sizeof(drvinfo->version)); 1259 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1260 "%d.%d.%d", 1261 mlxsw_sp->bus_info->fw_rev.major, 1262 mlxsw_sp->bus_info->fw_rev.minor, 1263 mlxsw_sp->bus_info->fw_rev.subminor); 1264 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1265 sizeof(drvinfo->bus_info)); 1266 } 1267 1268 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1269 struct ethtool_pauseparam *pause) 1270 { 1271 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1272 1273 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1274 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1275 } 1276 1277 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1278 struct ethtool_pauseparam *pause) 1279 { 1280 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1281 1282 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1283 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1284 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1285 1286 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1287 pfcc_pl); 1288 } 1289 1290 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1291 struct ethtool_pauseparam *pause) 1292 { 1293 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1294 bool pause_en = pause->tx_pause || pause->rx_pause; 1295 int err; 1296 1297 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1298 netdev_err(dev, "PFC already enabled on port\n"); 1299 return -EINVAL; 1300 } 1301 1302 if (pause->autoneg) { 1303 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1304 return -EINVAL; 1305 } 1306 1307 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1308 if (err) { 1309 netdev_err(dev, "Failed to configure port's headroom\n"); 1310 return err; 1311 } 1312 1313 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1314 if (err) { 1315 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1316 goto err_port_pause_configure; 1317 } 1318 1319 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1320 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1321 1322 return 0; 1323 1324 err_port_pause_configure: 1325 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1326 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1327 return err; 1328 } 1329 1330 struct mlxsw_sp_port_hw_stats { 1331 char str[ETH_GSTRING_LEN]; 1332 u64 (*getter)(char *payload); 1333 }; 1334 1335 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1336 { 1337 .str = "a_frames_transmitted_ok", 1338 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1339 }, 1340 { 1341 .str = "a_frames_received_ok", 1342 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1343 }, 1344 { 1345 .str = "a_frame_check_sequence_errors", 1346 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1347 }, 1348 { 1349 .str = "a_alignment_errors", 1350 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1351 }, 1352 { 1353 .str = "a_octets_transmitted_ok", 1354 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1355 }, 1356 { 1357 .str = "a_octets_received_ok", 1358 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1359 }, 1360 { 1361 .str = "a_multicast_frames_xmitted_ok", 1362 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1363 }, 1364 { 1365 .str = "a_broadcast_frames_xmitted_ok", 1366 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1367 }, 1368 { 1369 .str = "a_multicast_frames_received_ok", 1370 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1371 }, 1372 { 1373 .str = "a_broadcast_frames_received_ok", 1374 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1375 }, 1376 { 1377 .str = "a_in_range_length_errors", 1378 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1379 }, 1380 { 1381 .str = "a_out_of_range_length_field", 1382 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1383 }, 1384 { 1385 .str = "a_frame_too_long_errors", 1386 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1387 }, 1388 { 1389 .str = "a_symbol_error_during_carrier", 1390 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1391 }, 1392 { 1393 .str = "a_mac_control_frames_transmitted", 1394 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1395 }, 1396 { 1397 .str = "a_mac_control_frames_received", 1398 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1399 }, 1400 { 1401 .str = "a_unsupported_opcodes_received", 1402 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1403 }, 1404 { 1405 .str = "a_pause_mac_ctrl_frames_received", 1406 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1407 }, 1408 { 1409 .str = "a_pause_mac_ctrl_frames_xmitted", 1410 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1411 }, 1412 }; 1413 1414 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1415 1416 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1417 { 1418 .str = "rx_octets_prio", 1419 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1420 }, 1421 { 1422 .str = "rx_frames_prio", 1423 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1424 }, 1425 { 1426 .str = "tx_octets_prio", 1427 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1428 }, 1429 { 1430 .str = "tx_frames_prio", 1431 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1432 }, 1433 { 1434 .str = "rx_pause_prio", 1435 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1436 }, 1437 { 1438 .str = "rx_pause_duration_prio", 1439 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1440 }, 1441 { 1442 .str = "tx_pause_prio", 1443 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1444 }, 1445 { 1446 .str = "tx_pause_duration_prio", 1447 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1448 }, 1449 }; 1450 1451 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1452 1453 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) 1454 { 1455 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1456 1457 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); 1458 } 1459 1460 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1461 { 1462 .str = "tc_transmit_queue_tc", 1463 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, 1464 }, 1465 { 1466 .str = "tc_no_buffer_discard_uc_tc", 1467 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1468 }, 1469 }; 1470 1471 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1472 1473 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1474 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1475 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1476 IEEE_8021QAZ_MAX_TCS) 1477 1478 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1479 { 1480 int i; 1481 1482 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1483 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1484 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1485 *p += ETH_GSTRING_LEN; 1486 } 1487 } 1488 1489 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1490 { 1491 int i; 1492 1493 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1494 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1495 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1496 *p += ETH_GSTRING_LEN; 1497 } 1498 } 1499 1500 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1501 u32 stringset, u8 *data) 1502 { 1503 u8 *p = data; 1504 int i; 1505 1506 switch (stringset) { 1507 case ETH_SS_STATS: 1508 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1509 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1510 ETH_GSTRING_LEN); 1511 p += ETH_GSTRING_LEN; 1512 } 1513 1514 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1515 mlxsw_sp_port_get_prio_strings(&p, i); 1516 1517 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1518 mlxsw_sp_port_get_tc_strings(&p, i); 1519 1520 break; 1521 } 1522 } 1523 1524 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1525 enum ethtool_phys_id_state state) 1526 { 1527 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1529 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1530 bool active; 1531 1532 switch (state) { 1533 case ETHTOOL_ID_ACTIVE: 1534 active = true; 1535 break; 1536 case ETHTOOL_ID_INACTIVE: 1537 active = false; 1538 break; 1539 default: 1540 return -EOPNOTSUPP; 1541 } 1542 1543 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1545 } 1546 1547 static int 1548 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1549 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1550 { 1551 switch (grp) { 1552 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1553 *p_hw_stats = mlxsw_sp_port_hw_stats; 1554 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1555 break; 1556 case MLXSW_REG_PPCNT_PRIO_CNT: 1557 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1558 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1559 break; 1560 case MLXSW_REG_PPCNT_TC_CNT: 1561 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1562 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1563 break; 1564 default: 1565 WARN_ON(1); 1566 return -ENOTSUPP; 1567 } 1568 return 0; 1569 } 1570 1571 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1572 enum mlxsw_reg_ppcnt_grp grp, int prio, 1573 u64 *data, int data_index) 1574 { 1575 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1577 struct mlxsw_sp_port_hw_stats *hw_stats; 1578 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1579 int i, len; 1580 int err; 1581 1582 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1583 if (err) 1584 return; 1585 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1586 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1587 for (i = 0; i < len; i++) 1588 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; 1589 } 1590 1591 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1592 struct ethtool_stats *stats, u64 *data) 1593 { 1594 int i, data_index = 0; 1595 1596 /* IEEE 802.3 Counters */ 1597 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1598 data, data_index); 1599 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1600 1601 /* Per-Priority Counters */ 1602 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1603 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1604 data, data_index); 1605 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1606 } 1607 1608 /* Per-TC Counters */ 1609 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1610 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1611 data, data_index); 1612 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1613 } 1614 } 1615 1616 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1617 { 1618 switch (sset) { 1619 case ETH_SS_STATS: 1620 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1621 default: 1622 return -EOPNOTSUPP; 1623 } 1624 } 1625 1626 struct mlxsw_sp_port_link_mode { 1627 u32 mask; 1628 u32 supported; 1629 u32 advertised; 1630 u32 speed; 1631 }; 1632 1633 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1634 { 1635 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1636 .supported = SUPPORTED_100baseT_Full, 1637 .advertised = ADVERTISED_100baseT_Full, 1638 .speed = 100, 1639 }, 1640 { 1641 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1642 .speed = 100, 1643 }, 1644 { 1645 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1646 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1647 .supported = SUPPORTED_1000baseKX_Full, 1648 .advertised = ADVERTISED_1000baseKX_Full, 1649 .speed = 1000, 1650 }, 1651 { 1652 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1653 .supported = SUPPORTED_10000baseT_Full, 1654 .advertised = ADVERTISED_10000baseT_Full, 1655 .speed = 10000, 1656 }, 1657 { 1658 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1659 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1660 .supported = SUPPORTED_10000baseKX4_Full, 1661 .advertised = ADVERTISED_10000baseKX4_Full, 1662 .speed = 10000, 1663 }, 1664 { 1665 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1666 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1667 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1668 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1669 .supported = SUPPORTED_10000baseKR_Full, 1670 .advertised = ADVERTISED_10000baseKR_Full, 1671 .speed = 10000, 1672 }, 1673 { 1674 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1675 .supported = SUPPORTED_20000baseKR2_Full, 1676 .advertised = ADVERTISED_20000baseKR2_Full, 1677 .speed = 20000, 1678 }, 1679 { 1680 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1681 .supported = SUPPORTED_40000baseCR4_Full, 1682 .advertised = ADVERTISED_40000baseCR4_Full, 1683 .speed = 40000, 1684 }, 1685 { 1686 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1687 .supported = SUPPORTED_40000baseKR4_Full, 1688 .advertised = ADVERTISED_40000baseKR4_Full, 1689 .speed = 40000, 1690 }, 1691 { 1692 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1693 .supported = SUPPORTED_40000baseSR4_Full, 1694 .advertised = ADVERTISED_40000baseSR4_Full, 1695 .speed = 40000, 1696 }, 1697 { 1698 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1699 .supported = SUPPORTED_40000baseLR4_Full, 1700 .advertised = ADVERTISED_40000baseLR4_Full, 1701 .speed = 40000, 1702 }, 1703 { 1704 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1705 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1706 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1707 .speed = 25000, 1708 }, 1709 { 1710 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1711 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1712 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1713 .speed = 50000, 1714 }, 1715 { 1716 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1717 .supported = SUPPORTED_56000baseKR4_Full, 1718 .advertised = ADVERTISED_56000baseKR4_Full, 1719 .speed = 56000, 1720 }, 1721 { 1722 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1723 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1724 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1725 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1726 .speed = 100000, 1727 }, 1728 }; 1729 1730 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1731 1732 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1733 { 1734 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1735 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1736 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1737 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1738 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1739 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1740 return SUPPORTED_FIBRE; 1741 1742 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1743 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1744 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1745 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1746 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1747 return SUPPORTED_Backplane; 1748 return 0; 1749 } 1750 1751 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1752 { 1753 u32 modes = 0; 1754 int i; 1755 1756 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1757 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1758 modes |= mlxsw_sp_port_link_mode[i].supported; 1759 } 1760 return modes; 1761 } 1762 1763 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1764 { 1765 u32 modes = 0; 1766 int i; 1767 1768 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1769 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1770 modes |= mlxsw_sp_port_link_mode[i].advertised; 1771 } 1772 return modes; 1773 } 1774 1775 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1776 struct ethtool_cmd *cmd) 1777 { 1778 u32 speed = SPEED_UNKNOWN; 1779 u8 duplex = DUPLEX_UNKNOWN; 1780 int i; 1781 1782 if (!carrier_ok) 1783 goto out; 1784 1785 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1786 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1787 speed = mlxsw_sp_port_link_mode[i].speed; 1788 duplex = DUPLEX_FULL; 1789 break; 1790 } 1791 } 1792 out: 1793 ethtool_cmd_speed_set(cmd, speed); 1794 cmd->duplex = duplex; 1795 } 1796 1797 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1798 { 1799 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1800 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1801 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1802 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1803 return PORT_FIBRE; 1804 1805 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1806 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1807 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1808 return PORT_DA; 1809 1810 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1811 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1812 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1813 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1814 return PORT_NONE; 1815 1816 return PORT_OTHER; 1817 } 1818 1819 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1820 struct ethtool_cmd *cmd) 1821 { 1822 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1823 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1824 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1825 u32 eth_proto_cap; 1826 u32 eth_proto_admin; 1827 u32 eth_proto_oper; 1828 int err; 1829 1830 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1831 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1832 if (err) { 1833 netdev_err(dev, "Failed to get proto"); 1834 return err; 1835 } 1836 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1837 ð_proto_admin, ð_proto_oper); 1838 1839 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1840 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1841 SUPPORTED_Pause | SUPPORTED_Asym_Pause | 1842 SUPPORTED_Autoneg; 1843 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1844 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1845 eth_proto_oper, cmd); 1846 1847 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1848 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1849 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1850 1851 cmd->transceiver = XCVR_INTERNAL; 1852 return 0; 1853 } 1854 1855 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1856 { 1857 u32 ptys_proto = 0; 1858 int i; 1859 1860 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1861 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1862 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1863 } 1864 return ptys_proto; 1865 } 1866 1867 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1868 { 1869 u32 ptys_proto = 0; 1870 int i; 1871 1872 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1873 if (speed == mlxsw_sp_port_link_mode[i].speed) 1874 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1875 } 1876 return ptys_proto; 1877 } 1878 1879 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1880 { 1881 u32 ptys_proto = 0; 1882 int i; 1883 1884 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1885 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1886 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1887 } 1888 return ptys_proto; 1889 } 1890 1891 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1892 struct ethtool_cmd *cmd) 1893 { 1894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1896 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1897 u32 speed; 1898 u32 eth_proto_new; 1899 u32 eth_proto_cap; 1900 u32 eth_proto_admin; 1901 int err; 1902 1903 speed = ethtool_cmd_speed(cmd); 1904 1905 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1906 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1907 mlxsw_sp_to_ptys_speed(speed); 1908 1909 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1910 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1911 if (err) { 1912 netdev_err(dev, "Failed to get proto"); 1913 return err; 1914 } 1915 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1916 1917 eth_proto_new = eth_proto_new & eth_proto_cap; 1918 if (!eth_proto_new) { 1919 netdev_err(dev, "Not supported proto admin requested"); 1920 return -EINVAL; 1921 } 1922 if (eth_proto_new == eth_proto_admin) 1923 return 0; 1924 1925 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1926 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1927 if (err) { 1928 netdev_err(dev, "Failed to set proto admin"); 1929 return err; 1930 } 1931 1932 if (!netif_running(dev)) 1933 return 0; 1934 1935 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1936 if (err) { 1937 netdev_err(dev, "Failed to set admin status"); 1938 return err; 1939 } 1940 1941 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1942 if (err) { 1943 netdev_err(dev, "Failed to set admin status"); 1944 return err; 1945 } 1946 1947 return 0; 1948 } 1949 1950 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1951 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1952 .get_link = ethtool_op_get_link, 1953 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 1954 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 1955 .get_strings = mlxsw_sp_port_get_strings, 1956 .set_phys_id = mlxsw_sp_port_set_phys_id, 1957 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1958 .get_sset_count = mlxsw_sp_port_get_sset_count, 1959 .get_settings = mlxsw_sp_port_get_settings, 1960 .set_settings = mlxsw_sp_port_set_settings, 1961 }; 1962 1963 static int 1964 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 1965 { 1966 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1967 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 1968 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1969 u32 eth_proto_admin; 1970 1971 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 1972 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 1973 eth_proto_admin); 1974 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1975 } 1976 1977 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1978 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1979 bool dwrr, u8 dwrr_weight) 1980 { 1981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1982 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1983 1984 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1985 next_index); 1986 mlxsw_reg_qeec_de_set(qeec_pl, true); 1987 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1988 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1989 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1990 } 1991 1992 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1993 enum mlxsw_reg_qeec_hr hr, u8 index, 1994 u8 next_index, u32 maxrate) 1995 { 1996 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1997 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1998 1999 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2000 next_index); 2001 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2002 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2003 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2004 } 2005 2006 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2007 u8 switch_prio, u8 tclass) 2008 { 2009 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2010 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2011 2012 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2013 tclass); 2014 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2015 } 2016 2017 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2018 { 2019 int err, i; 2020 2021 /* Setup the elements hierarcy, so that each TC is linked to 2022 * one subgroup, which are all member in the same group. 2023 */ 2024 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2025 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2026 0); 2027 if (err) 2028 return err; 2029 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2030 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2031 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2032 0, false, 0); 2033 if (err) 2034 return err; 2035 } 2036 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2037 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2038 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2039 false, 0); 2040 if (err) 2041 return err; 2042 } 2043 2044 /* Make sure the max shaper is disabled in all hierarcies that 2045 * support it. 2046 */ 2047 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2048 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2049 MLXSW_REG_QEEC_MAS_DIS); 2050 if (err) 2051 return err; 2052 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2053 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2054 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2055 i, 0, 2056 MLXSW_REG_QEEC_MAS_DIS); 2057 if (err) 2058 return err; 2059 } 2060 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2061 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2062 MLXSW_REG_QEEC_HIERARCY_TC, 2063 i, i, 2064 MLXSW_REG_QEEC_MAS_DIS); 2065 if (err) 2066 return err; 2067 } 2068 2069 /* Map all priorities to traffic class 0. */ 2070 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2071 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2072 if (err) 2073 return err; 2074 } 2075 2076 return 0; 2077 } 2078 2079 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2080 bool split, u8 module, u8 width, u8 lane) 2081 { 2082 struct mlxsw_sp_port *mlxsw_sp_port; 2083 struct net_device *dev; 2084 size_t bytes; 2085 int err; 2086 2087 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2088 if (!dev) 2089 return -ENOMEM; 2090 mlxsw_sp_port = netdev_priv(dev); 2091 mlxsw_sp_port->dev = dev; 2092 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2093 mlxsw_sp_port->local_port = local_port; 2094 mlxsw_sp_port->split = split; 2095 mlxsw_sp_port->mapping.module = module; 2096 mlxsw_sp_port->mapping.width = width; 2097 mlxsw_sp_port->mapping.lane = lane; 2098 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2099 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2100 if (!mlxsw_sp_port->active_vlans) { 2101 err = -ENOMEM; 2102 goto err_port_active_vlans_alloc; 2103 } 2104 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2105 if (!mlxsw_sp_port->untagged_vlans) { 2106 err = -ENOMEM; 2107 goto err_port_untagged_vlans_alloc; 2108 } 2109 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2110 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2111 2112 mlxsw_sp_port->pcpu_stats = 2113 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2114 if (!mlxsw_sp_port->pcpu_stats) { 2115 err = -ENOMEM; 2116 goto err_alloc_stats; 2117 } 2118 2119 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2120 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2121 2122 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2123 if (err) { 2124 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2125 mlxsw_sp_port->local_port); 2126 goto err_dev_addr_init; 2127 } 2128 2129 netif_carrier_off(dev); 2130 2131 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2132 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2133 dev->hw_features |= NETIF_F_HW_TC; 2134 2135 /* Each packet needs to have a Tx header (metadata) on top all other 2136 * headers. 2137 */ 2138 dev->hard_header_len += MLXSW_TXHDR_LEN; 2139 2140 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2141 if (err) { 2142 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2143 mlxsw_sp_port->local_port); 2144 goto err_port_system_port_mapping_set; 2145 } 2146 2147 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2148 if (err) { 2149 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2150 mlxsw_sp_port->local_port); 2151 goto err_port_swid_set; 2152 } 2153 2154 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2155 if (err) { 2156 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2157 mlxsw_sp_port->local_port); 2158 goto err_port_speed_by_width_set; 2159 } 2160 2161 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2162 if (err) { 2163 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2164 mlxsw_sp_port->local_port); 2165 goto err_port_mtu_set; 2166 } 2167 2168 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2169 if (err) 2170 goto err_port_admin_status_set; 2171 2172 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2173 if (err) { 2174 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2175 mlxsw_sp_port->local_port); 2176 goto err_port_buffers_init; 2177 } 2178 2179 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2180 if (err) { 2181 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2182 mlxsw_sp_port->local_port); 2183 goto err_port_ets_init; 2184 } 2185 2186 /* ETS and buffers must be initialized before DCB. */ 2187 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2188 if (err) { 2189 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2190 mlxsw_sp_port->local_port); 2191 goto err_port_dcb_init; 2192 } 2193 2194 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2195 err = register_netdev(dev); 2196 if (err) { 2197 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2198 mlxsw_sp_port->local_port); 2199 goto err_register_netdev; 2200 } 2201 2202 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, 2203 mlxsw_sp_port->local_port, dev, 2204 mlxsw_sp_port->split, module); 2205 if (err) { 2206 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2207 mlxsw_sp_port->local_port); 2208 goto err_core_port_init; 2209 } 2210 2211 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 2212 if (err) 2213 goto err_port_vlan_init; 2214 2215 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2216 return 0; 2217 2218 err_port_vlan_init: 2219 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2220 err_core_port_init: 2221 unregister_netdev(dev); 2222 err_register_netdev: 2223 err_port_dcb_init: 2224 err_port_ets_init: 2225 err_port_buffers_init: 2226 err_port_admin_status_set: 2227 err_port_mtu_set: 2228 err_port_speed_by_width_set: 2229 err_port_swid_set: 2230 err_port_system_port_mapping_set: 2231 err_dev_addr_init: 2232 free_percpu(mlxsw_sp_port->pcpu_stats); 2233 err_alloc_stats: 2234 kfree(mlxsw_sp_port->untagged_vlans); 2235 err_port_untagged_vlans_alloc: 2236 kfree(mlxsw_sp_port->active_vlans); 2237 err_port_active_vlans_alloc: 2238 free_netdev(dev); 2239 return err; 2240 } 2241 2242 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2243 { 2244 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2245 2246 if (!mlxsw_sp_port) 2247 return; 2248 mlxsw_sp->ports[local_port] = NULL; 2249 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2250 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2251 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2252 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2253 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2254 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2255 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2256 free_percpu(mlxsw_sp_port->pcpu_stats); 2257 kfree(mlxsw_sp_port->untagged_vlans); 2258 kfree(mlxsw_sp_port->active_vlans); 2259 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2260 free_netdev(mlxsw_sp_port->dev); 2261 } 2262 2263 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2264 { 2265 int i; 2266 2267 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 2268 mlxsw_sp_port_remove(mlxsw_sp, i); 2269 kfree(mlxsw_sp->ports); 2270 } 2271 2272 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2273 { 2274 u8 module, width, lane; 2275 size_t alloc_size; 2276 int i; 2277 int err; 2278 2279 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 2280 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2281 if (!mlxsw_sp->ports) 2282 return -ENOMEM; 2283 2284 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 2285 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2286 &width, &lane); 2287 if (err) 2288 goto err_port_module_info_get; 2289 if (!width) 2290 continue; 2291 mlxsw_sp->port_to_module[i] = module; 2292 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 2293 lane); 2294 if (err) 2295 goto err_port_create; 2296 } 2297 return 0; 2298 2299 err_port_create: 2300 err_port_module_info_get: 2301 for (i--; i >= 1; i--) 2302 mlxsw_sp_port_remove(mlxsw_sp, i); 2303 kfree(mlxsw_sp->ports); 2304 return err; 2305 } 2306 2307 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2308 { 2309 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2310 2311 return local_port - offset; 2312 } 2313 2314 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2315 u8 module, unsigned int count) 2316 { 2317 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2318 int err, i; 2319 2320 for (i = 0; i < count; i++) { 2321 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2322 width, i * width); 2323 if (err) 2324 goto err_port_module_map; 2325 } 2326 2327 for (i = 0; i < count; i++) { 2328 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2329 if (err) 2330 goto err_port_swid_set; 2331 } 2332 2333 for (i = 0; i < count; i++) { 2334 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2335 module, width, i * width); 2336 if (err) 2337 goto err_port_create; 2338 } 2339 2340 return 0; 2341 2342 err_port_create: 2343 for (i--; i >= 0; i--) 2344 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2345 i = count; 2346 err_port_swid_set: 2347 for (i--; i >= 0; i--) 2348 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2349 MLXSW_PORT_SWID_DISABLED_PORT); 2350 i = count; 2351 err_port_module_map: 2352 for (i--; i >= 0; i--) 2353 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2354 return err; 2355 } 2356 2357 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2358 u8 base_port, unsigned int count) 2359 { 2360 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2361 int i; 2362 2363 /* Split by four means we need to re-create two ports, otherwise 2364 * only one. 2365 */ 2366 count = count / 2; 2367 2368 for (i = 0; i < count; i++) { 2369 local_port = base_port + i * 2; 2370 module = mlxsw_sp->port_to_module[local_port]; 2371 2372 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2373 0); 2374 } 2375 2376 for (i = 0; i < count; i++) 2377 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2378 2379 for (i = 0; i < count; i++) { 2380 local_port = base_port + i * 2; 2381 module = mlxsw_sp->port_to_module[local_port]; 2382 2383 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2384 width, 0); 2385 } 2386 } 2387 2388 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2389 unsigned int count) 2390 { 2391 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2392 struct mlxsw_sp_port *mlxsw_sp_port; 2393 u8 module, cur_width, base_port; 2394 int i; 2395 int err; 2396 2397 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2398 if (!mlxsw_sp_port) { 2399 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2400 local_port); 2401 return -EINVAL; 2402 } 2403 2404 module = mlxsw_sp_port->mapping.module; 2405 cur_width = mlxsw_sp_port->mapping.width; 2406 2407 if (count != 2 && count != 4) { 2408 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2409 return -EINVAL; 2410 } 2411 2412 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2413 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2414 return -EINVAL; 2415 } 2416 2417 /* Make sure we have enough slave (even) ports for the split. */ 2418 if (count == 2) { 2419 base_port = local_port; 2420 if (mlxsw_sp->ports[base_port + 1]) { 2421 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2422 return -EINVAL; 2423 } 2424 } else { 2425 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2426 if (mlxsw_sp->ports[base_port + 1] || 2427 mlxsw_sp->ports[base_port + 3]) { 2428 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2429 return -EINVAL; 2430 } 2431 } 2432 2433 for (i = 0; i < count; i++) 2434 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2435 2436 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2437 if (err) { 2438 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2439 goto err_port_split_create; 2440 } 2441 2442 return 0; 2443 2444 err_port_split_create: 2445 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2446 return err; 2447 } 2448 2449 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2450 { 2451 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2452 struct mlxsw_sp_port *mlxsw_sp_port; 2453 u8 cur_width, base_port; 2454 unsigned int count; 2455 int i; 2456 2457 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2458 if (!mlxsw_sp_port) { 2459 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2460 local_port); 2461 return -EINVAL; 2462 } 2463 2464 if (!mlxsw_sp_port->split) { 2465 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2466 return -EINVAL; 2467 } 2468 2469 cur_width = mlxsw_sp_port->mapping.width; 2470 count = cur_width == 1 ? 4 : 2; 2471 2472 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2473 2474 /* Determine which ports to remove. */ 2475 if (count == 2 && local_port >= base_port + 2) 2476 base_port = base_port + 2; 2477 2478 for (i = 0; i < count; i++) 2479 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2480 2481 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2482 2483 return 0; 2484 } 2485 2486 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2487 char *pude_pl, void *priv) 2488 { 2489 struct mlxsw_sp *mlxsw_sp = priv; 2490 struct mlxsw_sp_port *mlxsw_sp_port; 2491 enum mlxsw_reg_pude_oper_status status; 2492 u8 local_port; 2493 2494 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2495 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2496 if (!mlxsw_sp_port) 2497 return; 2498 2499 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2500 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2501 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2502 netif_carrier_on(mlxsw_sp_port->dev); 2503 } else { 2504 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2505 netif_carrier_off(mlxsw_sp_port->dev); 2506 } 2507 } 2508 2509 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 2510 .func = mlxsw_sp_pude_event_func, 2511 .trap_id = MLXSW_TRAP_ID_PUDE, 2512 }; 2513 2514 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 2515 enum mlxsw_event_trap_id trap_id) 2516 { 2517 struct mlxsw_event_listener *el; 2518 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2519 int err; 2520 2521 switch (trap_id) { 2522 case MLXSW_TRAP_ID_PUDE: 2523 el = &mlxsw_sp_pude_event; 2524 break; 2525 } 2526 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 2527 if (err) 2528 return err; 2529 2530 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 2531 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2532 if (err) 2533 goto err_event_trap_set; 2534 2535 return 0; 2536 2537 err_event_trap_set: 2538 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2539 return err; 2540 } 2541 2542 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 2543 enum mlxsw_event_trap_id trap_id) 2544 { 2545 struct mlxsw_event_listener *el; 2546 2547 switch (trap_id) { 2548 case MLXSW_TRAP_ID_PUDE: 2549 el = &mlxsw_sp_pude_event; 2550 break; 2551 } 2552 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2553 } 2554 2555 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 2556 void *priv) 2557 { 2558 struct mlxsw_sp *mlxsw_sp = priv; 2559 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2560 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2561 2562 if (unlikely(!mlxsw_sp_port)) { 2563 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2564 local_port); 2565 return; 2566 } 2567 2568 skb->dev = mlxsw_sp_port->dev; 2569 2570 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2571 u64_stats_update_begin(&pcpu_stats->syncp); 2572 pcpu_stats->rx_packets++; 2573 pcpu_stats->rx_bytes += skb->len; 2574 u64_stats_update_end(&pcpu_stats->syncp); 2575 2576 skb->protocol = eth_type_trans(skb, skb->dev); 2577 netif_receive_skb(skb); 2578 } 2579 2580 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 2581 { 2582 .func = mlxsw_sp_rx_listener_func, 2583 .local_port = MLXSW_PORT_DONT_CARE, 2584 .trap_id = MLXSW_TRAP_ID_FDB_MC, 2585 }, 2586 /* Traps for specific L2 packet types, not trapped as FDB MC */ 2587 { 2588 .func = mlxsw_sp_rx_listener_func, 2589 .local_port = MLXSW_PORT_DONT_CARE, 2590 .trap_id = MLXSW_TRAP_ID_STP, 2591 }, 2592 { 2593 .func = mlxsw_sp_rx_listener_func, 2594 .local_port = MLXSW_PORT_DONT_CARE, 2595 .trap_id = MLXSW_TRAP_ID_LACP, 2596 }, 2597 { 2598 .func = mlxsw_sp_rx_listener_func, 2599 .local_port = MLXSW_PORT_DONT_CARE, 2600 .trap_id = MLXSW_TRAP_ID_EAPOL, 2601 }, 2602 { 2603 .func = mlxsw_sp_rx_listener_func, 2604 .local_port = MLXSW_PORT_DONT_CARE, 2605 .trap_id = MLXSW_TRAP_ID_LLDP, 2606 }, 2607 { 2608 .func = mlxsw_sp_rx_listener_func, 2609 .local_port = MLXSW_PORT_DONT_CARE, 2610 .trap_id = MLXSW_TRAP_ID_MMRP, 2611 }, 2612 { 2613 .func = mlxsw_sp_rx_listener_func, 2614 .local_port = MLXSW_PORT_DONT_CARE, 2615 .trap_id = MLXSW_TRAP_ID_MVRP, 2616 }, 2617 { 2618 .func = mlxsw_sp_rx_listener_func, 2619 .local_port = MLXSW_PORT_DONT_CARE, 2620 .trap_id = MLXSW_TRAP_ID_RPVST, 2621 }, 2622 { 2623 .func = mlxsw_sp_rx_listener_func, 2624 .local_port = MLXSW_PORT_DONT_CARE, 2625 .trap_id = MLXSW_TRAP_ID_DHCP, 2626 }, 2627 { 2628 .func = mlxsw_sp_rx_listener_func, 2629 .local_port = MLXSW_PORT_DONT_CARE, 2630 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 2631 }, 2632 { 2633 .func = mlxsw_sp_rx_listener_func, 2634 .local_port = MLXSW_PORT_DONT_CARE, 2635 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 2636 }, 2637 { 2638 .func = mlxsw_sp_rx_listener_func, 2639 .local_port = MLXSW_PORT_DONT_CARE, 2640 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 2641 }, 2642 { 2643 .func = mlxsw_sp_rx_listener_func, 2644 .local_port = MLXSW_PORT_DONT_CARE, 2645 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 2646 }, 2647 { 2648 .func = mlxsw_sp_rx_listener_func, 2649 .local_port = MLXSW_PORT_DONT_CARE, 2650 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 2651 }, 2652 { 2653 .func = mlxsw_sp_rx_listener_func, 2654 .local_port = MLXSW_PORT_DONT_CARE, 2655 .trap_id = MLXSW_TRAP_ID_ARPBC, 2656 }, 2657 { 2658 .func = mlxsw_sp_rx_listener_func, 2659 .local_port = MLXSW_PORT_DONT_CARE, 2660 .trap_id = MLXSW_TRAP_ID_ARPUC, 2661 }, 2662 { 2663 .func = mlxsw_sp_rx_listener_func, 2664 .local_port = MLXSW_PORT_DONT_CARE, 2665 .trap_id = MLXSW_TRAP_ID_IP2ME, 2666 }, 2667 { 2668 .func = mlxsw_sp_rx_listener_func, 2669 .local_port = MLXSW_PORT_DONT_CARE, 2670 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, 2671 }, 2672 { 2673 .func = mlxsw_sp_rx_listener_func, 2674 .local_port = MLXSW_PORT_DONT_CARE, 2675 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, 2676 }, 2677 }; 2678 2679 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2680 { 2681 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2682 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2683 int i; 2684 int err; 2685 2686 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 2687 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2688 if (err) 2689 return err; 2690 2691 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 2692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2693 if (err) 2694 return err; 2695 2696 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2697 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 2698 &mlxsw_sp_rx_listener[i], 2699 mlxsw_sp); 2700 if (err) 2701 goto err_rx_listener_register; 2702 2703 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 2704 mlxsw_sp_rx_listener[i].trap_id); 2705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2706 if (err) 2707 goto err_rx_trap_set; 2708 } 2709 return 0; 2710 2711 err_rx_trap_set: 2712 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2713 &mlxsw_sp_rx_listener[i], 2714 mlxsw_sp); 2715 err_rx_listener_register: 2716 for (i--; i >= 0; i--) { 2717 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2718 mlxsw_sp_rx_listener[i].trap_id); 2719 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2720 2721 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2722 &mlxsw_sp_rx_listener[i], 2723 mlxsw_sp); 2724 } 2725 return err; 2726 } 2727 2728 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2729 { 2730 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2731 int i; 2732 2733 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2734 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, 2735 mlxsw_sp_rx_listener[i].trap_id); 2736 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2737 2738 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2739 &mlxsw_sp_rx_listener[i], 2740 mlxsw_sp); 2741 } 2742 } 2743 2744 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2745 enum mlxsw_reg_sfgc_type type, 2746 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2747 { 2748 enum mlxsw_flood_table_type table_type; 2749 enum mlxsw_sp_flood_table flood_table; 2750 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2751 2752 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2753 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2754 else 2755 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2756 2757 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2758 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2759 else 2760 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2761 2762 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2763 flood_table); 2764 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2765 } 2766 2767 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2768 { 2769 int type, err; 2770 2771 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2772 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2773 continue; 2774 2775 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2776 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2777 if (err) 2778 return err; 2779 2780 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2781 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2782 if (err) 2783 return err; 2784 } 2785 2786 return 0; 2787 } 2788 2789 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2790 { 2791 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2792 2793 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2794 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2795 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2796 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2797 MLXSW_REG_SLCR_LAG_HASH_SIP | 2798 MLXSW_REG_SLCR_LAG_HASH_DIP | 2799 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2800 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2801 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2802 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2803 } 2804 2805 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2806 const struct mlxsw_bus_info *mlxsw_bus_info) 2807 { 2808 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2809 int err; 2810 2811 mlxsw_sp->core = mlxsw_core; 2812 mlxsw_sp->bus_info = mlxsw_bus_info; 2813 INIT_LIST_HEAD(&mlxsw_sp->fids); 2814 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 2815 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2816 2817 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2818 if (err) { 2819 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2820 return err; 2821 } 2822 2823 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2824 if (err) { 2825 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2826 return err; 2827 } 2828 2829 err = mlxsw_sp_traps_init(mlxsw_sp); 2830 if (err) { 2831 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2832 goto err_rx_listener_register; 2833 } 2834 2835 err = mlxsw_sp_flood_init(mlxsw_sp); 2836 if (err) { 2837 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2838 goto err_flood_init; 2839 } 2840 2841 err = mlxsw_sp_buffers_init(mlxsw_sp); 2842 if (err) { 2843 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2844 goto err_buffers_init; 2845 } 2846 2847 err = mlxsw_sp_lag_init(mlxsw_sp); 2848 if (err) { 2849 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2850 goto err_lag_init; 2851 } 2852 2853 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2854 if (err) { 2855 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2856 goto err_switchdev_init; 2857 } 2858 2859 err = mlxsw_sp_router_init(mlxsw_sp); 2860 if (err) { 2861 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2862 goto err_router_init; 2863 } 2864 2865 err = mlxsw_sp_span_init(mlxsw_sp); 2866 if (err) { 2867 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2868 goto err_span_init; 2869 } 2870 2871 err = mlxsw_sp_ports_create(mlxsw_sp); 2872 if (err) { 2873 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2874 goto err_ports_create; 2875 } 2876 2877 return 0; 2878 2879 err_ports_create: 2880 mlxsw_sp_span_fini(mlxsw_sp); 2881 err_span_init: 2882 mlxsw_sp_router_fini(mlxsw_sp); 2883 err_router_init: 2884 mlxsw_sp_switchdev_fini(mlxsw_sp); 2885 err_switchdev_init: 2886 err_lag_init: 2887 mlxsw_sp_buffers_fini(mlxsw_sp); 2888 err_buffers_init: 2889 err_flood_init: 2890 mlxsw_sp_traps_fini(mlxsw_sp); 2891 err_rx_listener_register: 2892 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2893 return err; 2894 } 2895 2896 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 2897 { 2898 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2899 int i; 2900 2901 mlxsw_sp_ports_remove(mlxsw_sp); 2902 mlxsw_sp_span_fini(mlxsw_sp); 2903 mlxsw_sp_router_fini(mlxsw_sp); 2904 mlxsw_sp_switchdev_fini(mlxsw_sp); 2905 mlxsw_sp_buffers_fini(mlxsw_sp); 2906 mlxsw_sp_traps_fini(mlxsw_sp); 2907 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2908 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 2909 WARN_ON(!list_empty(&mlxsw_sp->fids)); 2910 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 2911 WARN_ON_ONCE(mlxsw_sp->rifs[i]); 2912 } 2913 2914 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2915 .used_max_vepa_channels = 1, 2916 .max_vepa_channels = 0, 2917 .used_max_lag = 1, 2918 .max_lag = MLXSW_SP_LAG_MAX, 2919 .used_max_port_per_lag = 1, 2920 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 2921 .used_max_mid = 1, 2922 .max_mid = MLXSW_SP_MID_MAX, 2923 .used_max_pgt = 1, 2924 .max_pgt = 0, 2925 .used_max_system_port = 1, 2926 .max_system_port = 64, 2927 .used_max_vlan_groups = 1, 2928 .max_vlan_groups = 127, 2929 .used_max_regions = 1, 2930 .max_regions = 400, 2931 .used_flood_tables = 1, 2932 .used_flood_mode = 1, 2933 .flood_mode = 3, 2934 .max_fid_offset_flood_tables = 2, 2935 .fid_offset_flood_table_size = VLAN_N_VID - 1, 2936 .max_fid_flood_tables = 2, 2937 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 2938 .used_max_ib_mc = 1, 2939 .max_ib_mc = 0, 2940 .used_max_pkey = 1, 2941 .max_pkey = 0, 2942 .used_kvd_sizes = 1, 2943 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 2944 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, 2945 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, 2946 .swid_config = { 2947 { 2948 .used_type = 1, 2949 .type = MLXSW_PORT_SWID_TYPE_ETH, 2950 } 2951 }, 2952 .resource_query_enable = 1, 2953 }; 2954 2955 static struct mlxsw_driver mlxsw_sp_driver = { 2956 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2957 .owner = THIS_MODULE, 2958 .priv_size = sizeof(struct mlxsw_sp), 2959 .init = mlxsw_sp_init, 2960 .fini = mlxsw_sp_fini, 2961 .port_split = mlxsw_sp_port_split, 2962 .port_unsplit = mlxsw_sp_port_unsplit, 2963 .sb_pool_get = mlxsw_sp_sb_pool_get, 2964 .sb_pool_set = mlxsw_sp_sb_pool_set, 2965 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 2966 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 2967 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 2968 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 2969 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 2970 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 2971 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 2972 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 2973 .txhdr_construct = mlxsw_sp_txhdr_construct, 2974 .txhdr_len = MLXSW_TXHDR_LEN, 2975 .profile = &mlxsw_sp_config_profile, 2976 }; 2977 2978 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2979 { 2980 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2981 } 2982 2983 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 2984 { 2985 struct net_device *lower_dev; 2986 struct list_head *iter; 2987 2988 if (mlxsw_sp_port_dev_check(dev)) 2989 return netdev_priv(dev); 2990 2991 netdev_for_each_all_lower_dev(dev, lower_dev, iter) { 2992 if (mlxsw_sp_port_dev_check(lower_dev)) 2993 return netdev_priv(lower_dev); 2994 } 2995 return NULL; 2996 } 2997 2998 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 2999 { 3000 struct mlxsw_sp_port *mlxsw_sp_port; 3001 3002 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3003 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3004 } 3005 3006 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3007 { 3008 struct net_device *lower_dev; 3009 struct list_head *iter; 3010 3011 if (mlxsw_sp_port_dev_check(dev)) 3012 return netdev_priv(dev); 3013 3014 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { 3015 if (mlxsw_sp_port_dev_check(lower_dev)) 3016 return netdev_priv(lower_dev); 3017 } 3018 return NULL; 3019 } 3020 3021 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3022 { 3023 struct mlxsw_sp_port *mlxsw_sp_port; 3024 3025 rcu_read_lock(); 3026 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3027 if (mlxsw_sp_port) 3028 dev_hold(mlxsw_sp_port->dev); 3029 rcu_read_unlock(); 3030 return mlxsw_sp_port; 3031 } 3032 3033 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3034 { 3035 dev_put(mlxsw_sp_port->dev); 3036 } 3037 3038 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, 3039 unsigned long event) 3040 { 3041 switch (event) { 3042 case NETDEV_UP: 3043 if (!r) 3044 return true; 3045 r->ref_count++; 3046 return false; 3047 case NETDEV_DOWN: 3048 if (r && --r->ref_count == 0) 3049 return true; 3050 /* It is possible we already removed the RIF ourselves 3051 * if it was assigned to a netdev that is now a bridge 3052 * or LAG slave. 3053 */ 3054 return false; 3055 } 3056 3057 return false; 3058 } 3059 3060 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) 3061 { 3062 int i; 3063 3064 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 3065 if (!mlxsw_sp->rifs[i]) 3066 return i; 3067 3068 return MLXSW_SP_RIF_MAX; 3069 } 3070 3071 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, 3072 bool *p_lagged, u16 *p_system_port) 3073 { 3074 u8 local_port = mlxsw_sp_vport->local_port; 3075 3076 *p_lagged = mlxsw_sp_vport->lagged; 3077 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; 3078 } 3079 3080 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, 3081 struct net_device *l3_dev, u16 rif, 3082 bool create) 3083 { 3084 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3085 bool lagged = mlxsw_sp_vport->lagged; 3086 char ritr_pl[MLXSW_REG_RITR_LEN]; 3087 u16 system_port; 3088 3089 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, 3090 l3_dev->mtu, l3_dev->dev_addr); 3091 3092 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); 3093 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, 3094 mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); 3095 3096 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3097 } 3098 3099 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3100 3101 static struct mlxsw_sp_fid * 3102 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) 3103 { 3104 struct mlxsw_sp_fid *f; 3105 3106 f = kzalloc(sizeof(*f), GFP_KERNEL); 3107 if (!f) 3108 return NULL; 3109 3110 f->leave = mlxsw_sp_vport_rif_sp_leave; 3111 f->ref_count = 0; 3112 f->dev = l3_dev; 3113 f->fid = fid; 3114 3115 return f; 3116 } 3117 3118 static struct mlxsw_sp_rif * 3119 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) 3120 { 3121 struct mlxsw_sp_rif *r; 3122 3123 r = kzalloc(sizeof(*r), GFP_KERNEL); 3124 if (!r) 3125 return NULL; 3126 3127 ether_addr_copy(r->addr, l3_dev->dev_addr); 3128 r->mtu = l3_dev->mtu; 3129 r->ref_count = 1; 3130 r->dev = l3_dev; 3131 r->rif = rif; 3132 r->f = f; 3133 3134 return r; 3135 } 3136 3137 static struct mlxsw_sp_rif * 3138 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, 3139 struct net_device *l3_dev) 3140 { 3141 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3142 struct mlxsw_sp_fid *f; 3143 struct mlxsw_sp_rif *r; 3144 u16 fid, rif; 3145 int err; 3146 3147 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3148 if (rif == MLXSW_SP_RIF_MAX) 3149 return ERR_PTR(-ERANGE); 3150 3151 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); 3152 if (err) 3153 return ERR_PTR(err); 3154 3155 fid = mlxsw_sp_rif_sp_to_fid(rif); 3156 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); 3157 if (err) 3158 goto err_rif_fdb_op; 3159 3160 f = mlxsw_sp_rfid_alloc(fid, l3_dev); 3161 if (!f) { 3162 err = -ENOMEM; 3163 goto err_rfid_alloc; 3164 } 3165 3166 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3167 if (!r) { 3168 err = -ENOMEM; 3169 goto err_rif_alloc; 3170 } 3171 3172 f->r = r; 3173 mlxsw_sp->rifs[rif] = r; 3174 3175 return r; 3176 3177 err_rif_alloc: 3178 kfree(f); 3179 err_rfid_alloc: 3180 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3181 err_rif_fdb_op: 3182 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3183 return ERR_PTR(err); 3184 } 3185 3186 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, 3187 struct mlxsw_sp_rif *r) 3188 { 3189 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3190 struct net_device *l3_dev = r->dev; 3191 struct mlxsw_sp_fid *f = r->f; 3192 u16 fid = f->fid; 3193 u16 rif = r->rif; 3194 3195 mlxsw_sp->rifs[rif] = NULL; 3196 f->r = NULL; 3197 3198 kfree(r); 3199 3200 kfree(f); 3201 3202 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3203 3204 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3205 } 3206 3207 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3208 struct net_device *l3_dev) 3209 { 3210 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3211 struct mlxsw_sp_rif *r; 3212 3213 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 3214 if (!r) { 3215 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); 3216 if (IS_ERR(r)) 3217 return PTR_ERR(r); 3218 } 3219 3220 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); 3221 r->f->ref_count++; 3222 3223 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); 3224 3225 return 0; 3226 } 3227 3228 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3229 { 3230 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3231 3232 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 3233 3234 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3235 if (--f->ref_count == 0) 3236 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); 3237 } 3238 3239 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, 3240 struct net_device *port_dev, 3241 unsigned long event, u16 vid) 3242 { 3243 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 3244 struct mlxsw_sp_port *mlxsw_sp_vport; 3245 3246 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3247 if (WARN_ON(!mlxsw_sp_vport)) 3248 return -EINVAL; 3249 3250 switch (event) { 3251 case NETDEV_UP: 3252 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); 3253 case NETDEV_DOWN: 3254 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); 3255 break; 3256 } 3257 3258 return 0; 3259 } 3260 3261 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 3262 unsigned long event) 3263 { 3264 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) 3265 return 0; 3266 3267 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); 3268 } 3269 3270 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 3271 struct net_device *lag_dev, 3272 unsigned long event, u16 vid) 3273 { 3274 struct net_device *port_dev; 3275 struct list_head *iter; 3276 int err; 3277 3278 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 3279 if (mlxsw_sp_port_dev_check(port_dev)) { 3280 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, 3281 event, vid); 3282 if (err) 3283 return err; 3284 } 3285 } 3286 3287 return 0; 3288 } 3289 3290 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 3291 unsigned long event) 3292 { 3293 if (netif_is_bridge_port(lag_dev)) 3294 return 0; 3295 3296 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); 3297 } 3298 3299 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 3300 struct net_device *l3_dev) 3301 { 3302 u16 fid; 3303 3304 if (is_vlan_dev(l3_dev)) 3305 fid = vlan_dev_vlan_id(l3_dev); 3306 else if (mlxsw_sp->master_bridge.dev == l3_dev) 3307 fid = 1; 3308 else 3309 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); 3310 3311 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3312 } 3313 3314 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3315 { 3316 if (mlxsw_sp_fid_is_vfid(fid)) 3317 return MLXSW_REG_RITR_FID_IF; 3318 else 3319 return MLXSW_REG_RITR_VLAN_IF; 3320 } 3321 3322 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, 3323 struct net_device *l3_dev, 3324 u16 fid, u16 rif, 3325 bool create) 3326 { 3327 enum mlxsw_reg_ritr_if_type rif_type; 3328 char ritr_pl[MLXSW_REG_RITR_LEN]; 3329 3330 rif_type = mlxsw_sp_rif_type_get(fid); 3331 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, 3332 l3_dev->dev_addr); 3333 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); 3334 3335 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3336 } 3337 3338 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, 3339 struct net_device *l3_dev, 3340 struct mlxsw_sp_fid *f) 3341 { 3342 struct mlxsw_sp_rif *r; 3343 u16 rif; 3344 int err; 3345 3346 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3347 if (rif == MLXSW_SP_RIF_MAX) 3348 return -ERANGE; 3349 3350 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3351 if (err) 3352 return err; 3353 3354 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3355 if (err) 3356 goto err_rif_fdb_op; 3357 3358 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3359 if (!r) { 3360 err = -ENOMEM; 3361 goto err_rif_alloc; 3362 } 3363 3364 f->r = r; 3365 mlxsw_sp->rifs[rif] = r; 3366 3367 netdev_dbg(l3_dev, "RIF=%d created\n", rif); 3368 3369 return 0; 3370 3371 err_rif_alloc: 3372 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3373 err_rif_fdb_op: 3374 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3375 return err; 3376 } 3377 3378 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 3379 struct mlxsw_sp_rif *r) 3380 { 3381 struct net_device *l3_dev = r->dev; 3382 struct mlxsw_sp_fid *f = r->f; 3383 u16 rif = r->rif; 3384 3385 mlxsw_sp->rifs[rif] = NULL; 3386 f->r = NULL; 3387 3388 kfree(r); 3389 3390 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3391 3392 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3393 3394 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3395 } 3396 3397 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, 3398 struct net_device *br_dev, 3399 unsigned long event) 3400 { 3401 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 3402 struct mlxsw_sp_fid *f; 3403 3404 /* FID can either be an actual FID if the L3 device is the 3405 * VLAN-aware bridge or a VLAN device on top. Otherwise, the 3406 * L3 device is a VLAN-unaware bridge and we get a vFID. 3407 */ 3408 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); 3409 if (WARN_ON(!f)) 3410 return -EINVAL; 3411 3412 switch (event) { 3413 case NETDEV_UP: 3414 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); 3415 case NETDEV_DOWN: 3416 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 3417 break; 3418 } 3419 3420 return 0; 3421 } 3422 3423 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, 3424 unsigned long event) 3425 { 3426 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3428 u16 vid = vlan_dev_vlan_id(vlan_dev); 3429 3430 if (mlxsw_sp_port_dev_check(real_dev)) 3431 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3432 vid); 3433 else if (netif_is_lag_master(real_dev)) 3434 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 3435 vid); 3436 else if (netif_is_bridge_master(real_dev) && 3437 mlxsw_sp->master_bridge.dev == real_dev) 3438 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, 3439 event); 3440 3441 return 0; 3442 } 3443 3444 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 3445 unsigned long event, void *ptr) 3446 { 3447 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3448 struct net_device *dev = ifa->ifa_dev->dev; 3449 struct mlxsw_sp *mlxsw_sp; 3450 struct mlxsw_sp_rif *r; 3451 int err = 0; 3452 3453 mlxsw_sp = mlxsw_sp_lower_get(dev); 3454 if (!mlxsw_sp) 3455 goto out; 3456 3457 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3458 if (!mlxsw_sp_rif_should_config(r, event)) 3459 goto out; 3460 3461 if (mlxsw_sp_port_dev_check(dev)) 3462 err = mlxsw_sp_inetaddr_port_event(dev, event); 3463 else if (netif_is_lag_master(dev)) 3464 err = mlxsw_sp_inetaddr_lag_event(dev, event); 3465 else if (netif_is_bridge_master(dev)) 3466 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); 3467 else if (is_vlan_dev(dev)) 3468 err = mlxsw_sp_inetaddr_vlan_event(dev, event); 3469 3470 out: 3471 return notifier_from_errno(err); 3472 } 3473 3474 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, 3475 const char *mac, int mtu) 3476 { 3477 char ritr_pl[MLXSW_REG_RITR_LEN]; 3478 int err; 3479 3480 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 3481 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3482 if (err) 3483 return err; 3484 3485 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 3486 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 3487 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 3488 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3489 } 3490 3491 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) 3492 { 3493 struct mlxsw_sp *mlxsw_sp; 3494 struct mlxsw_sp_rif *r; 3495 int err; 3496 3497 mlxsw_sp = mlxsw_sp_lower_get(dev); 3498 if (!mlxsw_sp) 3499 return 0; 3500 3501 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3502 if (!r) 3503 return 0; 3504 3505 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); 3506 if (err) 3507 return err; 3508 3509 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); 3510 if (err) 3511 goto err_rif_edit; 3512 3513 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); 3514 if (err) 3515 goto err_rif_fdb_op; 3516 3517 ether_addr_copy(r->addr, dev->dev_addr); 3518 r->mtu = dev->mtu; 3519 3520 netdev_dbg(dev, "Updated RIF=%d\n", r->rif); 3521 3522 return 0; 3523 3524 err_rif_fdb_op: 3525 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); 3526 err_rif_edit: 3527 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); 3528 return err; 3529 } 3530 3531 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3532 u16 fid) 3533 { 3534 if (mlxsw_sp_fid_is_vfid(fid)) 3535 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3536 else 3537 return test_bit(fid, lag_port->active_vlans); 3538 } 3539 3540 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3541 u16 fid) 3542 { 3543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3544 u8 local_port = mlxsw_sp_port->local_port; 3545 u16 lag_id = mlxsw_sp_port->lag_id; 3546 int i, count = 0; 3547 3548 if (!mlxsw_sp_port->lagged) 3549 return true; 3550 3551 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3552 struct mlxsw_sp_port *lag_port; 3553 3554 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3555 if (!lag_port || lag_port->local_port == local_port) 3556 continue; 3557 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3558 count++; 3559 } 3560 3561 return !count; 3562 } 3563 3564 static int 3565 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3566 u16 fid) 3567 { 3568 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3569 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3570 3571 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3572 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3573 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3574 mlxsw_sp_port->local_port); 3575 3576 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3577 mlxsw_sp_port->local_port, fid); 3578 3579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3580 } 3581 3582 static int 3583 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3584 u16 fid) 3585 { 3586 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3587 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3588 3589 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3590 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3591 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3592 3593 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3594 mlxsw_sp_port->lag_id, fid); 3595 3596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3597 } 3598 3599 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 3600 { 3601 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 3602 return 0; 3603 3604 if (mlxsw_sp_port->lagged) 3605 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 3606 fid); 3607 else 3608 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3609 } 3610 3611 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 3612 { 3613 struct mlxsw_sp_fid *f, *tmp; 3614 3615 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 3616 if (--f->ref_count == 0) 3617 mlxsw_sp_fid_destroy(mlxsw_sp, f); 3618 else 3619 WARN_ON_ONCE(1); 3620 } 3621 3622 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3623 struct net_device *br_dev) 3624 { 3625 return !mlxsw_sp->master_bridge.dev || 3626 mlxsw_sp->master_bridge.dev == br_dev; 3627 } 3628 3629 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 3630 struct net_device *br_dev) 3631 { 3632 mlxsw_sp->master_bridge.dev = br_dev; 3633 mlxsw_sp->master_bridge.ref_count++; 3634 } 3635 3636 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3637 { 3638 if (--mlxsw_sp->master_bridge.ref_count == 0) { 3639 mlxsw_sp->master_bridge.dev = NULL; 3640 /* It's possible upper VLAN devices are still holding 3641 * references to underlying FIDs. Drop the reference 3642 * and release the resources if it was the last one. 3643 * If it wasn't, then something bad happened. 3644 */ 3645 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 3646 } 3647 } 3648 3649 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3650 struct net_device *br_dev) 3651 { 3652 struct net_device *dev = mlxsw_sp_port->dev; 3653 int err; 3654 3655 /* When port is not bridged untagged packets are tagged with 3656 * PVID=VID=1, thereby creating an implicit VLAN interface in 3657 * the device. Remove it and let bridge code take care of its 3658 * own VLANs. 3659 */ 3660 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 3661 if (err) 3662 return err; 3663 3664 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 3665 3666 mlxsw_sp_port->learning = 1; 3667 mlxsw_sp_port->learning_sync = 1; 3668 mlxsw_sp_port->uc_flood = 1; 3669 mlxsw_sp_port->bridged = 1; 3670 3671 return 0; 3672 } 3673 3674 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3675 { 3676 struct net_device *dev = mlxsw_sp_port->dev; 3677 3678 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 3679 3680 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 3681 3682 mlxsw_sp_port->learning = 0; 3683 mlxsw_sp_port->learning_sync = 0; 3684 mlxsw_sp_port->uc_flood = 0; 3685 mlxsw_sp_port->bridged = 0; 3686 3687 /* Add implicit VLAN interface in the device, so that untagged 3688 * packets will be classified to the default vFID. 3689 */ 3690 mlxsw_sp_port_add_vid(dev, 0, 1); 3691 } 3692 3693 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3694 { 3695 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3696 3697 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3699 } 3700 3701 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3702 { 3703 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3704 3705 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3706 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3707 } 3708 3709 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3710 u16 lag_id, u8 port_index) 3711 { 3712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3713 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3714 3715 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3716 lag_id, port_index); 3717 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3718 } 3719 3720 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3721 u16 lag_id) 3722 { 3723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3724 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3725 3726 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3727 lag_id); 3728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3729 } 3730 3731 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3732 u16 lag_id) 3733 { 3734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3735 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3736 3737 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3738 lag_id); 3739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3740 } 3741 3742 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3743 u16 lag_id) 3744 { 3745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3746 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3747 3748 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3749 lag_id); 3750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3751 } 3752 3753 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3754 struct net_device *lag_dev, 3755 u16 *p_lag_id) 3756 { 3757 struct mlxsw_sp_upper *lag; 3758 int free_lag_id = -1; 3759 int i; 3760 3761 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 3762 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3763 if (lag->ref_count) { 3764 if (lag->dev == lag_dev) { 3765 *p_lag_id = i; 3766 return 0; 3767 } 3768 } else if (free_lag_id < 0) { 3769 free_lag_id = i; 3770 } 3771 } 3772 if (free_lag_id < 0) 3773 return -EBUSY; 3774 *p_lag_id = free_lag_id; 3775 return 0; 3776 } 3777 3778 static bool 3779 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3780 struct net_device *lag_dev, 3781 struct netdev_lag_upper_info *lag_upper_info) 3782 { 3783 u16 lag_id; 3784 3785 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3786 return false; 3787 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3788 return false; 3789 return true; 3790 } 3791 3792 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3793 u16 lag_id, u8 *p_port_index) 3794 { 3795 int i; 3796 3797 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 3798 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3799 *p_port_index = i; 3800 return 0; 3801 } 3802 } 3803 return -EBUSY; 3804 } 3805 3806 static void 3807 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3808 u16 lag_id) 3809 { 3810 struct mlxsw_sp_port *mlxsw_sp_vport; 3811 struct mlxsw_sp_fid *f; 3812 3813 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3814 if (WARN_ON(!mlxsw_sp_vport)) 3815 return; 3816 3817 /* If vPort is assigned a RIF, then leave it since it's no 3818 * longer valid. 3819 */ 3820 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3821 if (f) 3822 f->leave(mlxsw_sp_vport); 3823 3824 mlxsw_sp_vport->lag_id = lag_id; 3825 mlxsw_sp_vport->lagged = 1; 3826 } 3827 3828 static void 3829 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3830 { 3831 struct mlxsw_sp_port *mlxsw_sp_vport; 3832 struct mlxsw_sp_fid *f; 3833 3834 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3835 if (WARN_ON(!mlxsw_sp_vport)) 3836 return; 3837 3838 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3839 if (f) 3840 f->leave(mlxsw_sp_vport); 3841 3842 mlxsw_sp_vport->lagged = 0; 3843 } 3844 3845 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3846 struct net_device *lag_dev) 3847 { 3848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3849 struct mlxsw_sp_upper *lag; 3850 u16 lag_id; 3851 u8 port_index; 3852 int err; 3853 3854 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3855 if (err) 3856 return err; 3857 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3858 if (!lag->ref_count) { 3859 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3860 if (err) 3861 return err; 3862 lag->dev = lag_dev; 3863 } 3864 3865 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3866 if (err) 3867 return err; 3868 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3869 if (err) 3870 goto err_col_port_add; 3871 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 3872 if (err) 3873 goto err_col_port_enable; 3874 3875 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3876 mlxsw_sp_port->local_port); 3877 mlxsw_sp_port->lag_id = lag_id; 3878 mlxsw_sp_port->lagged = 1; 3879 lag->ref_count++; 3880 3881 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 3882 3883 return 0; 3884 3885 err_col_port_enable: 3886 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3887 err_col_port_add: 3888 if (!lag->ref_count) 3889 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3890 return err; 3891 } 3892 3893 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3894 struct net_device *lag_dev) 3895 { 3896 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3897 u16 lag_id = mlxsw_sp_port->lag_id; 3898 struct mlxsw_sp_upper *lag; 3899 3900 if (!mlxsw_sp_port->lagged) 3901 return; 3902 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3903 WARN_ON(lag->ref_count == 0); 3904 3905 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 3906 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3907 3908 if (mlxsw_sp_port->bridged) { 3909 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 3910 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 3911 } 3912 3913 if (lag->ref_count == 1) 3914 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3915 3916 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3917 mlxsw_sp_port->local_port); 3918 mlxsw_sp_port->lagged = 0; 3919 lag->ref_count--; 3920 3921 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 3922 } 3923 3924 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3925 u16 lag_id) 3926 { 3927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3928 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3929 3930 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3931 mlxsw_sp_port->local_port); 3932 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3933 } 3934 3935 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3936 u16 lag_id) 3937 { 3938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3939 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3940 3941 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3942 mlxsw_sp_port->local_port); 3943 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3944 } 3945 3946 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 3947 bool lag_tx_enabled) 3948 { 3949 if (lag_tx_enabled) 3950 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 3951 mlxsw_sp_port->lag_id); 3952 else 3953 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3954 mlxsw_sp_port->lag_id); 3955 } 3956 3957 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 3958 struct netdev_lag_lower_state_info *info) 3959 { 3960 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 3961 } 3962 3963 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 3964 struct net_device *vlan_dev) 3965 { 3966 struct mlxsw_sp_port *mlxsw_sp_vport; 3967 u16 vid = vlan_dev_vlan_id(vlan_dev); 3968 3969 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3970 if (WARN_ON(!mlxsw_sp_vport)) 3971 return -EINVAL; 3972 3973 mlxsw_sp_vport->dev = vlan_dev; 3974 3975 return 0; 3976 } 3977 3978 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 3979 struct net_device *vlan_dev) 3980 { 3981 struct mlxsw_sp_port *mlxsw_sp_vport; 3982 u16 vid = vlan_dev_vlan_id(vlan_dev); 3983 3984 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3985 if (WARN_ON(!mlxsw_sp_vport)) 3986 return; 3987 3988 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 3989 } 3990 3991 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 3992 unsigned long event, void *ptr) 3993 { 3994 struct netdev_notifier_changeupper_info *info; 3995 struct mlxsw_sp_port *mlxsw_sp_port; 3996 struct net_device *upper_dev; 3997 struct mlxsw_sp *mlxsw_sp; 3998 int err = 0; 3999 4000 mlxsw_sp_port = netdev_priv(dev); 4001 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4002 info = ptr; 4003 4004 switch (event) { 4005 case NETDEV_PRECHANGEUPPER: 4006 upper_dev = info->upper_dev; 4007 if (!is_vlan_dev(upper_dev) && 4008 !netif_is_lag_master(upper_dev) && 4009 !netif_is_bridge_master(upper_dev)) 4010 return -EINVAL; 4011 if (!info->linking) 4012 break; 4013 /* HW limitation forbids to put ports to multiple bridges. */ 4014 if (netif_is_bridge_master(upper_dev) && 4015 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4016 return -EINVAL; 4017 if (netif_is_lag_master(upper_dev) && 4018 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4019 info->upper_info)) 4020 return -EINVAL; 4021 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4022 return -EINVAL; 4023 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4024 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4025 return -EINVAL; 4026 break; 4027 case NETDEV_CHANGEUPPER: 4028 upper_dev = info->upper_dev; 4029 if (is_vlan_dev(upper_dev)) { 4030 if (info->linking) 4031 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4032 upper_dev); 4033 else 4034 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4035 upper_dev); 4036 } else if (netif_is_bridge_master(upper_dev)) { 4037 if (info->linking) 4038 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4039 upper_dev); 4040 else 4041 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4042 } else if (netif_is_lag_master(upper_dev)) { 4043 if (info->linking) 4044 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4045 upper_dev); 4046 else 4047 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4048 upper_dev); 4049 } else { 4050 err = -EINVAL; 4051 WARN_ON(1); 4052 } 4053 break; 4054 } 4055 4056 return err; 4057 } 4058 4059 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4060 unsigned long event, void *ptr) 4061 { 4062 struct netdev_notifier_changelowerstate_info *info; 4063 struct mlxsw_sp_port *mlxsw_sp_port; 4064 int err; 4065 4066 mlxsw_sp_port = netdev_priv(dev); 4067 info = ptr; 4068 4069 switch (event) { 4070 case NETDEV_CHANGELOWERSTATE: 4071 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4072 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4073 info->lower_state_info); 4074 if (err) 4075 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4076 } 4077 break; 4078 } 4079 4080 return 0; 4081 } 4082 4083 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4084 unsigned long event, void *ptr) 4085 { 4086 switch (event) { 4087 case NETDEV_PRECHANGEUPPER: 4088 case NETDEV_CHANGEUPPER: 4089 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4090 case NETDEV_CHANGELOWERSTATE: 4091 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4092 } 4093 4094 return 0; 4095 } 4096 4097 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4098 unsigned long event, void *ptr) 4099 { 4100 struct net_device *dev; 4101 struct list_head *iter; 4102 int ret; 4103 4104 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4105 if (mlxsw_sp_port_dev_check(dev)) { 4106 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4107 if (ret) 4108 return ret; 4109 } 4110 } 4111 4112 return 0; 4113 } 4114 4115 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4116 struct net_device *vlan_dev) 4117 { 4118 u16 fid = vlan_dev_vlan_id(vlan_dev); 4119 struct mlxsw_sp_fid *f; 4120 4121 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4122 if (!f) { 4123 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4124 if (IS_ERR(f)) 4125 return PTR_ERR(f); 4126 } 4127 4128 f->ref_count++; 4129 4130 return 0; 4131 } 4132 4133 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4134 struct net_device *vlan_dev) 4135 { 4136 u16 fid = vlan_dev_vlan_id(vlan_dev); 4137 struct mlxsw_sp_fid *f; 4138 4139 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4140 if (f && f->r) 4141 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4142 if (f && --f->ref_count == 0) 4143 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4144 } 4145 4146 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4147 unsigned long event, void *ptr) 4148 { 4149 struct netdev_notifier_changeupper_info *info; 4150 struct net_device *upper_dev; 4151 struct mlxsw_sp *mlxsw_sp; 4152 int err; 4153 4154 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4155 if (!mlxsw_sp) 4156 return 0; 4157 if (br_dev != mlxsw_sp->master_bridge.dev) 4158 return 0; 4159 4160 info = ptr; 4161 4162 switch (event) { 4163 case NETDEV_CHANGEUPPER: 4164 upper_dev = info->upper_dev; 4165 if (!is_vlan_dev(upper_dev)) 4166 break; 4167 if (info->linking) { 4168 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4169 upper_dev); 4170 if (err) 4171 return err; 4172 } else { 4173 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4174 } 4175 break; 4176 } 4177 4178 return 0; 4179 } 4180 4181 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4182 { 4183 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4184 MLXSW_SP_VFID_MAX); 4185 } 4186 4187 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4188 { 4189 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4190 4191 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4193 } 4194 4195 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4196 4197 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4198 struct net_device *br_dev) 4199 { 4200 struct device *dev = mlxsw_sp->bus_info->dev; 4201 struct mlxsw_sp_fid *f; 4202 u16 vfid, fid; 4203 int err; 4204 4205 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4206 if (vfid == MLXSW_SP_VFID_MAX) { 4207 dev_err(dev, "No available vFIDs\n"); 4208 return ERR_PTR(-ERANGE); 4209 } 4210 4211 fid = mlxsw_sp_vfid_to_fid(vfid); 4212 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4213 if (err) { 4214 dev_err(dev, "Failed to create FID=%d\n", fid); 4215 return ERR_PTR(err); 4216 } 4217 4218 f = kzalloc(sizeof(*f), GFP_KERNEL); 4219 if (!f) 4220 goto err_allocate_vfid; 4221 4222 f->leave = mlxsw_sp_vport_vfid_leave; 4223 f->fid = fid; 4224 f->dev = br_dev; 4225 4226 list_add(&f->list, &mlxsw_sp->vfids.list); 4227 set_bit(vfid, mlxsw_sp->vfids.mapped); 4228 4229 return f; 4230 4231 err_allocate_vfid: 4232 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4233 return ERR_PTR(-ENOMEM); 4234 } 4235 4236 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4237 struct mlxsw_sp_fid *f) 4238 { 4239 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4240 u16 fid = f->fid; 4241 4242 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4243 list_del(&f->list); 4244 4245 if (f->r) 4246 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4247 4248 kfree(f); 4249 4250 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4251 } 4252 4253 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4254 bool valid) 4255 { 4256 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4257 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4258 4259 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4260 vid); 4261 } 4262 4263 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4264 struct net_device *br_dev) 4265 { 4266 struct mlxsw_sp_fid *f; 4267 int err; 4268 4269 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4270 if (!f) { 4271 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4272 if (IS_ERR(f)) 4273 return PTR_ERR(f); 4274 } 4275 4276 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4277 if (err) 4278 goto err_vport_flood_set; 4279 4280 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4281 if (err) 4282 goto err_vport_fid_map; 4283 4284 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4285 f->ref_count++; 4286 4287 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4288 4289 return 0; 4290 4291 err_vport_fid_map: 4292 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4293 err_vport_flood_set: 4294 if (!f->ref_count) 4295 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4296 return err; 4297 } 4298 4299 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4300 { 4301 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4302 4303 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4304 4305 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4306 4307 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4308 4309 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4310 4311 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4312 if (--f->ref_count == 0) 4313 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4314 } 4315 4316 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4317 struct net_device *br_dev) 4318 { 4319 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4320 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4321 struct net_device *dev = mlxsw_sp_vport->dev; 4322 int err; 4323 4324 if (f && !WARN_ON(!f->leave)) 4325 f->leave(mlxsw_sp_vport); 4326 4327 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4328 if (err) { 4329 netdev_err(dev, "Failed to join vFID\n"); 4330 return err; 4331 } 4332 4333 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4334 if (err) { 4335 netdev_err(dev, "Failed to enable learning\n"); 4336 goto err_port_vid_learning_set; 4337 } 4338 4339 mlxsw_sp_vport->learning = 1; 4340 mlxsw_sp_vport->learning_sync = 1; 4341 mlxsw_sp_vport->uc_flood = 1; 4342 mlxsw_sp_vport->bridged = 1; 4343 4344 return 0; 4345 4346 err_port_vid_learning_set: 4347 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4348 return err; 4349 } 4350 4351 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4352 { 4353 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4354 4355 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4356 4357 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4358 4359 mlxsw_sp_vport->learning = 0; 4360 mlxsw_sp_vport->learning_sync = 0; 4361 mlxsw_sp_vport->uc_flood = 0; 4362 mlxsw_sp_vport->bridged = 0; 4363 } 4364 4365 static bool 4366 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4367 const struct net_device *br_dev) 4368 { 4369 struct mlxsw_sp_port *mlxsw_sp_vport; 4370 4371 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4372 vport.list) { 4373 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4374 4375 if (dev && dev == br_dev) 4376 return false; 4377 } 4378 4379 return true; 4380 } 4381 4382 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4383 unsigned long event, void *ptr, 4384 u16 vid) 4385 { 4386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4387 struct netdev_notifier_changeupper_info *info = ptr; 4388 struct mlxsw_sp_port *mlxsw_sp_vport; 4389 struct net_device *upper_dev; 4390 int err = 0; 4391 4392 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4393 4394 switch (event) { 4395 case NETDEV_PRECHANGEUPPER: 4396 upper_dev = info->upper_dev; 4397 if (!netif_is_bridge_master(upper_dev)) 4398 return -EINVAL; 4399 if (!info->linking) 4400 break; 4401 /* We can't have multiple VLAN interfaces configured on 4402 * the same port and being members in the same bridge. 4403 */ 4404 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4405 upper_dev)) 4406 return -EINVAL; 4407 break; 4408 case NETDEV_CHANGEUPPER: 4409 upper_dev = info->upper_dev; 4410 if (info->linking) { 4411 if (WARN_ON(!mlxsw_sp_vport)) 4412 return -EINVAL; 4413 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4414 upper_dev); 4415 } else { 4416 if (!mlxsw_sp_vport) 4417 return 0; 4418 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4419 } 4420 } 4421 4422 return err; 4423 } 4424 4425 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4426 unsigned long event, void *ptr, 4427 u16 vid) 4428 { 4429 struct net_device *dev; 4430 struct list_head *iter; 4431 int ret; 4432 4433 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4434 if (mlxsw_sp_port_dev_check(dev)) { 4435 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4436 vid); 4437 if (ret) 4438 return ret; 4439 } 4440 } 4441 4442 return 0; 4443 } 4444 4445 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4446 unsigned long event, void *ptr) 4447 { 4448 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4449 u16 vid = vlan_dev_vlan_id(vlan_dev); 4450 4451 if (mlxsw_sp_port_dev_check(real_dev)) 4452 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4453 vid); 4454 else if (netif_is_lag_master(real_dev)) 4455 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4456 vid); 4457 4458 return 0; 4459 } 4460 4461 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4462 unsigned long event, void *ptr) 4463 { 4464 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4465 int err = 0; 4466 4467 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4468 err = mlxsw_sp_netdevice_router_port_event(dev); 4469 else if (mlxsw_sp_port_dev_check(dev)) 4470 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4471 else if (netif_is_lag_master(dev)) 4472 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4473 else if (netif_is_bridge_master(dev)) 4474 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4475 else if (is_vlan_dev(dev)) 4476 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4477 4478 return notifier_from_errno(err); 4479 } 4480 4481 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4482 .notifier_call = mlxsw_sp_netdevice_event, 4483 }; 4484 4485 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4486 .notifier_call = mlxsw_sp_inetaddr_event, 4487 .priority = 10, /* Must be called before FIB notifier block */ 4488 }; 4489 4490 static int __init mlxsw_sp_module_init(void) 4491 { 4492 int err; 4493 4494 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4495 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4496 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4497 if (err) 4498 goto err_core_driver_register; 4499 return 0; 4500 4501 err_core_driver_register: 4502 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4503 return err; 4504 } 4505 4506 static void __exit mlxsw_sp_module_exit(void) 4507 { 4508 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4509 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4510 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4511 } 4512 4513 module_init(mlxsw_sp_module_init); 4514 module_exit(mlxsw_sp_module_exit); 4515 4516 MODULE_LICENSE("Dual BSD/GPL"); 4517 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4518 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4519 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 4520