1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <net/switchdev.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 #include <net/netevent.h> 60 #include <net/tc_act/tc_sample.h> 61 62 #include "spectrum.h" 63 #include "pci.h" 64 #include "core.h" 65 #include "reg.h" 66 #include "port.h" 67 #include "trap.h" 68 #include "txheader.h" 69 70 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 71 static const char mlxsw_sp_driver_version[] = "1.0"; 72 73 /* tx_hdr_version 74 * Tx header version. 75 * Must be set to 1. 76 */ 77 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 78 79 /* tx_hdr_ctl 80 * Packet control type. 81 * 0 - Ethernet control (e.g. EMADs, LACP) 82 * 1 - Ethernet data 83 */ 84 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 85 86 /* tx_hdr_proto 87 * Packet protocol type. Must be set to 1 (Ethernet). 88 */ 89 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 90 91 /* tx_hdr_rx_is_router 92 * Packet is sent from the router. Valid for data packets only. 93 */ 94 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 95 96 /* tx_hdr_fid_valid 97 * Indicates if the 'fid' field is valid and should be used for 98 * forwarding lookup. Valid for data packets only. 99 */ 100 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 101 102 /* tx_hdr_swid 103 * Switch partition ID. Must be set to 0. 104 */ 105 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 106 107 /* tx_hdr_control_tclass 108 * Indicates if the packet should use the control TClass and not one 109 * of the data TClasses. 110 */ 111 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 112 113 /* tx_hdr_etclass 114 * Egress TClass to be used on the egress device on the egress port. 115 */ 116 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 117 118 /* tx_hdr_port_mid 119 * Destination local port for unicast packets. 120 * Destination multicast ID for multicast packets. 121 * 122 * Control packets are directed to a specific egress port, while data 123 * packets are transmitted through the CPU port (0) into the switch partition, 124 * where forwarding rules are applied. 125 */ 126 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 127 128 /* tx_hdr_fid 129 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 130 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 131 * Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 134 135 /* tx_hdr_type 136 * 0 - Data packets 137 * 6 - Control packets 138 */ 139 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 140 141 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 142 const struct mlxsw_tx_info *tx_info) 143 { 144 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 145 146 memset(txhdr, 0, MLXSW_TXHDR_LEN); 147 148 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 149 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 150 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 151 mlxsw_tx_hdr_swid_set(txhdr, 0); 152 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 153 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 154 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 155 } 156 157 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 158 { 159 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 160 int err; 161 162 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 163 if (err) 164 return err; 165 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 166 return 0; 167 } 168 169 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 170 { 171 int i; 172 173 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 174 return -EIO; 175 176 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 177 MAX_SPAN); 178 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 179 sizeof(struct mlxsw_sp_span_entry), 180 GFP_KERNEL); 181 if (!mlxsw_sp->span.entries) 182 return -ENOMEM; 183 184 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 185 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 186 187 return 0; 188 } 189 190 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 191 { 192 int i; 193 194 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 195 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 196 197 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 198 } 199 kfree(mlxsw_sp->span.entries); 200 } 201 202 static struct mlxsw_sp_span_entry * 203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 204 { 205 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 206 struct mlxsw_sp_span_entry *span_entry; 207 char mpat_pl[MLXSW_REG_MPAT_LEN]; 208 u8 local_port = port->local_port; 209 int index; 210 int i; 211 int err; 212 213 /* find a free entry to use */ 214 index = -1; 215 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 216 if (!mlxsw_sp->span.entries[i].used) { 217 index = i; 218 span_entry = &mlxsw_sp->span.entries[i]; 219 break; 220 } 221 } 222 if (index < 0) 223 return NULL; 224 225 /* create a new port analayzer entry for local_port */ 226 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 227 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 228 if (err) 229 return NULL; 230 231 span_entry->used = true; 232 span_entry->id = index; 233 span_entry->ref_count = 1; 234 span_entry->local_port = local_port; 235 return span_entry; 236 } 237 238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 239 struct mlxsw_sp_span_entry *span_entry) 240 { 241 u8 local_port = span_entry->local_port; 242 char mpat_pl[MLXSW_REG_MPAT_LEN]; 243 int pa_id = span_entry->id; 244 245 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 246 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 247 span_entry->used = false; 248 } 249 250 static struct mlxsw_sp_span_entry * 251 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 252 { 253 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 254 int i; 255 256 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 257 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 258 259 if (curr->used && curr->local_port == port->local_port) 260 return curr; 261 } 262 return NULL; 263 } 264 265 static struct mlxsw_sp_span_entry 266 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 267 { 268 struct mlxsw_sp_span_entry *span_entry; 269 270 span_entry = mlxsw_sp_span_entry_find(port); 271 if (span_entry) { 272 /* Already exists, just take a reference */ 273 span_entry->ref_count++; 274 return span_entry; 275 } 276 277 return mlxsw_sp_span_entry_create(port); 278 } 279 280 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 281 struct mlxsw_sp_span_entry *span_entry) 282 { 283 WARN_ON(!span_entry->ref_count); 284 if (--span_entry->ref_count == 0) 285 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 286 return 0; 287 } 288 289 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 290 { 291 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 292 struct mlxsw_sp_span_inspected_port *p; 293 int i; 294 295 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 296 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 297 298 list_for_each_entry(p, &curr->bound_ports_list, list) 299 if (p->local_port == port->local_port && 300 p->type == MLXSW_SP_SPAN_EGRESS) 301 return true; 302 } 303 304 return false; 305 } 306 307 static int mlxsw_sp_span_mtu_to_buffsize(int mtu) 308 { 309 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; 310 } 311 312 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 313 { 314 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 315 char sbib_pl[MLXSW_REG_SBIB_LEN]; 316 int err; 317 318 /* If port is egress mirrored, the shared buffer size should be 319 * updated according to the mtu value 320 */ 321 if (mlxsw_sp_span_is_egress_mirror(port)) { 322 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 323 mlxsw_sp_span_mtu_to_buffsize(mtu)); 324 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 325 if (err) { 326 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 327 return err; 328 } 329 } 330 331 return 0; 332 } 333 334 static struct mlxsw_sp_span_inspected_port * 335 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 336 struct mlxsw_sp_span_entry *span_entry) 337 { 338 struct mlxsw_sp_span_inspected_port *p; 339 340 list_for_each_entry(p, &span_entry->bound_ports_list, list) 341 if (port->local_port == p->local_port) 342 return p; 343 return NULL; 344 } 345 346 static int 347 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 348 struct mlxsw_sp_span_entry *span_entry, 349 enum mlxsw_sp_span_type type) 350 { 351 struct mlxsw_sp_span_inspected_port *inspected_port; 352 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 353 char mpar_pl[MLXSW_REG_MPAR_LEN]; 354 char sbib_pl[MLXSW_REG_SBIB_LEN]; 355 int pa_id = span_entry->id; 356 int err; 357 358 /* if it is an egress SPAN, bind a shared buffer to it */ 359 if (type == MLXSW_SP_SPAN_EGRESS) { 360 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 361 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); 362 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 363 if (err) { 364 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 365 return err; 366 } 367 } 368 369 /* bind the port to the SPAN entry */ 370 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 371 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 372 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 373 if (err) 374 goto err_mpar_reg_write; 375 376 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 377 if (!inspected_port) { 378 err = -ENOMEM; 379 goto err_inspected_port_alloc; 380 } 381 inspected_port->local_port = port->local_port; 382 inspected_port->type = type; 383 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 384 385 return 0; 386 387 err_mpar_reg_write: 388 err_inspected_port_alloc: 389 if (type == MLXSW_SP_SPAN_EGRESS) { 390 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 391 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 392 } 393 return err; 394 } 395 396 static void 397 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 398 struct mlxsw_sp_span_entry *span_entry, 399 enum mlxsw_sp_span_type type) 400 { 401 struct mlxsw_sp_span_inspected_port *inspected_port; 402 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 403 char mpar_pl[MLXSW_REG_MPAR_LEN]; 404 char sbib_pl[MLXSW_REG_SBIB_LEN]; 405 int pa_id = span_entry->id; 406 407 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 408 if (!inspected_port) 409 return; 410 411 /* remove the inspected port */ 412 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 413 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 414 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 415 416 /* remove the SBIB buffer if it was egress SPAN */ 417 if (type == MLXSW_SP_SPAN_EGRESS) { 418 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 419 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 420 } 421 422 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 423 424 list_del(&inspected_port->list); 425 kfree(inspected_port); 426 } 427 428 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 429 struct mlxsw_sp_port *to, 430 enum mlxsw_sp_span_type type) 431 { 432 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 433 struct mlxsw_sp_span_entry *span_entry; 434 int err; 435 436 span_entry = mlxsw_sp_span_entry_get(to); 437 if (!span_entry) 438 return -ENOENT; 439 440 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 441 span_entry->id); 442 443 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 444 if (err) 445 goto err_port_bind; 446 447 return 0; 448 449 err_port_bind: 450 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 451 return err; 452 } 453 454 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 455 struct mlxsw_sp_port *to, 456 enum mlxsw_sp_span_type type) 457 { 458 struct mlxsw_sp_span_entry *span_entry; 459 460 span_entry = mlxsw_sp_span_entry_find(to); 461 if (!span_entry) { 462 netdev_err(from->dev, "no span entry found\n"); 463 return; 464 } 465 466 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 467 span_entry->id); 468 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 469 } 470 471 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 472 bool enable, u32 rate) 473 { 474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 475 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 476 477 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 478 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 479 } 480 481 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 482 bool is_up) 483 { 484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 485 char paos_pl[MLXSW_REG_PAOS_LEN]; 486 487 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 488 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 489 MLXSW_PORT_ADMIN_STATUS_DOWN); 490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 491 } 492 493 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 494 unsigned char *addr) 495 { 496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 497 char ppad_pl[MLXSW_REG_PPAD_LEN]; 498 499 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 500 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 502 } 503 504 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 505 { 506 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 507 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 508 509 ether_addr_copy(addr, mlxsw_sp->base_mac); 510 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 511 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 512 } 513 514 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 515 { 516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 517 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 518 int max_mtu; 519 int err; 520 521 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 522 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 523 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 524 if (err) 525 return err; 526 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 527 528 if (mtu > max_mtu) 529 return -EINVAL; 530 531 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 532 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 533 } 534 535 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 536 u8 swid) 537 { 538 char pspa_pl[MLXSW_REG_PSPA_LEN]; 539 540 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 542 } 543 544 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 545 { 546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 547 548 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 549 swid); 550 } 551 552 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 553 bool enable) 554 { 555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 556 char svpe_pl[MLXSW_REG_SVPE_LEN]; 557 558 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 560 } 561 562 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 563 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 564 u16 vid) 565 { 566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 567 char svfa_pl[MLXSW_REG_SVFA_LEN]; 568 569 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 570 fid, vid); 571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 572 } 573 574 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 575 u16 vid_begin, u16 vid_end, 576 bool learn_enable) 577 { 578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 579 char *spvmlr_pl; 580 int err; 581 582 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 583 if (!spvmlr_pl) 584 return -ENOMEM; 585 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, 586 vid_end, learn_enable); 587 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 588 kfree(spvmlr_pl); 589 return err; 590 } 591 592 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 593 u16 vid, bool learn_enable) 594 { 595 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, 596 learn_enable); 597 } 598 599 static int 600 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 601 { 602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 603 char sspr_pl[MLXSW_REG_SSPR_LEN]; 604 605 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 606 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 607 } 608 609 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 610 u8 local_port, u8 *p_module, 611 u8 *p_width, u8 *p_lane) 612 { 613 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 614 int err; 615 616 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 617 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 618 if (err) 619 return err; 620 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 621 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 622 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 623 return 0; 624 } 625 626 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 627 u8 module, u8 width, u8 lane) 628 { 629 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 630 int i; 631 632 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 633 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 634 for (i = 0; i < width; i++) { 635 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 636 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 637 } 638 639 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 640 } 641 642 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 643 { 644 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 645 646 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 647 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 648 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 649 } 650 651 static int mlxsw_sp_port_open(struct net_device *dev) 652 { 653 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 654 int err; 655 656 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 657 if (err) 658 return err; 659 netif_start_queue(dev); 660 return 0; 661 } 662 663 static int mlxsw_sp_port_stop(struct net_device *dev) 664 { 665 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 666 667 netif_stop_queue(dev); 668 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 669 } 670 671 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 672 struct net_device *dev) 673 { 674 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 675 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 676 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 677 const struct mlxsw_tx_info tx_info = { 678 .local_port = mlxsw_sp_port->local_port, 679 .is_emad = false, 680 }; 681 u64 len; 682 int err; 683 684 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 685 return NETDEV_TX_BUSY; 686 687 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 688 struct sk_buff *skb_orig = skb; 689 690 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 691 if (!skb) { 692 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 693 dev_kfree_skb_any(skb_orig); 694 return NETDEV_TX_OK; 695 } 696 dev_consume_skb_any(skb_orig); 697 } 698 699 if (eth_skb_pad(skb)) { 700 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 701 return NETDEV_TX_OK; 702 } 703 704 mlxsw_sp_txhdr_construct(skb, &tx_info); 705 /* TX header is consumed by HW on the way so we shouldn't count its 706 * bytes as being sent. 707 */ 708 len = skb->len - MLXSW_TXHDR_LEN; 709 710 /* Due to a race we might fail here because of a full queue. In that 711 * unlikely case we simply drop the packet. 712 */ 713 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 714 715 if (!err) { 716 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 717 u64_stats_update_begin(&pcpu_stats->syncp); 718 pcpu_stats->tx_packets++; 719 pcpu_stats->tx_bytes += len; 720 u64_stats_update_end(&pcpu_stats->syncp); 721 } else { 722 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 723 dev_kfree_skb_any(skb); 724 } 725 return NETDEV_TX_OK; 726 } 727 728 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 729 { 730 } 731 732 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 733 { 734 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 735 struct sockaddr *addr = p; 736 int err; 737 738 if (!is_valid_ether_addr(addr->sa_data)) 739 return -EADDRNOTAVAIL; 740 741 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 742 if (err) 743 return err; 744 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 745 return 0; 746 } 747 748 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 749 bool pause_en, bool pfc_en, u16 delay) 750 { 751 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 752 753 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 754 MLXSW_SP_PAUSE_DELAY; 755 756 if (pause_en || pfc_en) 757 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 758 pg_size + delay, pg_size); 759 else 760 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 761 } 762 763 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 764 u8 *prio_tc, bool pause_en, 765 struct ieee_pfc *my_pfc) 766 { 767 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 768 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 769 u16 delay = !!my_pfc ? my_pfc->delay : 0; 770 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 771 int i, j, err; 772 773 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 774 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 775 if (err) 776 return err; 777 778 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 779 bool configure = false; 780 bool pfc = false; 781 782 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 783 if (prio_tc[j] == i) { 784 pfc = pfc_en & BIT(j); 785 configure = true; 786 break; 787 } 788 } 789 790 if (!configure) 791 continue; 792 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 793 } 794 795 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 796 } 797 798 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 799 int mtu, bool pause_en) 800 { 801 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 802 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 803 struct ieee_pfc *my_pfc; 804 u8 *prio_tc; 805 806 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 807 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 808 809 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 810 pause_en, my_pfc); 811 } 812 813 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 814 { 815 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 816 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 817 int err; 818 819 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 820 if (err) 821 return err; 822 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 823 if (err) 824 goto err_span_port_mtu_update; 825 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 826 if (err) 827 goto err_port_mtu_set; 828 dev->mtu = mtu; 829 return 0; 830 831 err_port_mtu_set: 832 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 833 err_span_port_mtu_update: 834 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 835 return err; 836 } 837 838 static int 839 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 840 struct rtnl_link_stats64 *stats) 841 { 842 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 843 struct mlxsw_sp_port_pcpu_stats *p; 844 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 845 u32 tx_dropped = 0; 846 unsigned int start; 847 int i; 848 849 for_each_possible_cpu(i) { 850 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 851 do { 852 start = u64_stats_fetch_begin_irq(&p->syncp); 853 rx_packets = p->rx_packets; 854 rx_bytes = p->rx_bytes; 855 tx_packets = p->tx_packets; 856 tx_bytes = p->tx_bytes; 857 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 858 859 stats->rx_packets += rx_packets; 860 stats->rx_bytes += rx_bytes; 861 stats->tx_packets += tx_packets; 862 stats->tx_bytes += tx_bytes; 863 /* tx_dropped is u32, updated without syncp protection. */ 864 tx_dropped += p->tx_dropped; 865 } 866 stats->tx_dropped = tx_dropped; 867 return 0; 868 } 869 870 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 871 { 872 switch (attr_id) { 873 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 874 return true; 875 } 876 877 return false; 878 } 879 880 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 881 void *sp) 882 { 883 switch (attr_id) { 884 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 885 return mlxsw_sp_port_get_sw_stats64(dev, sp); 886 } 887 888 return -EINVAL; 889 } 890 891 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 892 int prio, char *ppcnt_pl) 893 { 894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 896 897 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 898 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 899 } 900 901 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 902 struct rtnl_link_stats64 *stats) 903 { 904 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 905 int err; 906 907 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 908 0, ppcnt_pl); 909 if (err) 910 goto out; 911 912 stats->tx_packets = 913 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 914 stats->rx_packets = 915 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 916 stats->tx_bytes = 917 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 918 stats->rx_bytes = 919 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 920 stats->multicast = 921 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 922 923 stats->rx_crc_errors = 924 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 925 stats->rx_frame_errors = 926 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 927 928 stats->rx_length_errors = ( 929 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 930 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 931 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 932 933 stats->rx_errors = (stats->rx_crc_errors + 934 stats->rx_frame_errors + stats->rx_length_errors); 935 936 out: 937 return err; 938 } 939 940 static void update_stats_cache(struct work_struct *work) 941 { 942 struct mlxsw_sp_port *mlxsw_sp_port = 943 container_of(work, struct mlxsw_sp_port, 944 hw_stats.update_dw.work); 945 946 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 947 goto out; 948 949 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 950 mlxsw_sp_port->hw_stats.cache); 951 952 out: 953 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 954 MLXSW_HW_STATS_UPDATE_TIME); 955 } 956 957 /* Return the stats from a cache that is updated periodically, 958 * as this function might get called in an atomic context. 959 */ 960 static void 961 mlxsw_sp_port_get_stats64(struct net_device *dev, 962 struct rtnl_link_stats64 *stats) 963 { 964 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 965 966 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 967 } 968 969 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 970 u16 vid_end, bool is_member, bool untagged) 971 { 972 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 973 char *spvm_pl; 974 int err; 975 976 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 977 if (!spvm_pl) 978 return -ENOMEM; 979 980 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 981 vid_end, is_member, untagged); 982 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 983 kfree(spvm_pl); 984 return err; 985 } 986 987 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 988 { 989 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 990 u16 vid, last_visited_vid; 991 int err; 992 993 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 994 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 995 vid); 996 if (err) { 997 last_visited_vid = vid; 998 goto err_port_vid_to_fid_set; 999 } 1000 } 1001 1002 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 1003 if (err) { 1004 last_visited_vid = VLAN_N_VID; 1005 goto err_port_vid_to_fid_set; 1006 } 1007 1008 return 0; 1009 1010 err_port_vid_to_fid_set: 1011 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 1012 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 1013 vid); 1014 return err; 1015 } 1016 1017 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 1018 { 1019 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 1020 u16 vid; 1021 int err; 1022 1023 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 1024 if (err) 1025 return err; 1026 1027 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1028 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 1029 vid, vid); 1030 if (err) 1031 return err; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static struct mlxsw_sp_port * 1038 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1039 { 1040 struct mlxsw_sp_port *mlxsw_sp_vport; 1041 1042 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 1043 if (!mlxsw_sp_vport) 1044 return NULL; 1045 1046 /* dev will be set correctly after the VLAN device is linked 1047 * with the real device. In case of bridge SELF invocation, dev 1048 * will remain as is. 1049 */ 1050 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 1051 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1052 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 1053 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 1054 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 1055 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 1056 mlxsw_sp_vport->vport.vid = vid; 1057 1058 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 1059 1060 return mlxsw_sp_vport; 1061 } 1062 1063 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 1064 { 1065 list_del(&mlxsw_sp_vport->vport.list); 1066 kfree(mlxsw_sp_vport); 1067 } 1068 1069 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1070 __be16 __always_unused proto, u16 vid) 1071 { 1072 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1073 struct mlxsw_sp_port *mlxsw_sp_vport; 1074 bool untagged = vid == 1; 1075 int err; 1076 1077 /* VLAN 0 is added to HW filter when device goes up, but it is 1078 * reserved in our case, so simply return. 1079 */ 1080 if (!vid) 1081 return 0; 1082 1083 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) 1084 return 0; 1085 1086 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 1087 if (!mlxsw_sp_vport) 1088 return -ENOMEM; 1089 1090 /* When adding the first VLAN interface on a bridged port we need to 1091 * transition all the active 802.1Q bridge VLANs to use explicit 1092 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 1093 */ 1094 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1095 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 1096 if (err) 1097 goto err_port_vp_mode_trans; 1098 } 1099 1100 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 1101 if (err) 1102 goto err_port_add_vid; 1103 1104 return 0; 1105 1106 err_port_add_vid: 1107 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1108 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1109 err_port_vp_mode_trans: 1110 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1111 return err; 1112 } 1113 1114 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1115 __be16 __always_unused proto, u16 vid) 1116 { 1117 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1118 struct mlxsw_sp_port *mlxsw_sp_vport; 1119 struct mlxsw_sp_fid *f; 1120 1121 /* VLAN 0 is removed from HW filter when device goes down, but 1122 * it is reserved in our case, so simply return. 1123 */ 1124 if (!vid) 1125 return 0; 1126 1127 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1128 if (WARN_ON(!mlxsw_sp_vport)) 1129 return 0; 1130 1131 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1132 1133 /* Drop FID reference. If this was the last reference the 1134 * resources will be freed. 1135 */ 1136 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1137 if (f && !WARN_ON(!f->leave)) 1138 f->leave(mlxsw_sp_vport); 1139 1140 /* When removing the last VLAN interface on a bridged port we need to 1141 * transition all active 802.1Q bridge VLANs to use VID to FID 1142 * mappings and set port's mode to VLAN mode. 1143 */ 1144 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1145 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1146 1147 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1148 1149 return 0; 1150 } 1151 1152 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1153 size_t len) 1154 { 1155 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1156 u8 module = mlxsw_sp_port->mapping.module; 1157 u8 width = mlxsw_sp_port->mapping.width; 1158 u8 lane = mlxsw_sp_port->mapping.lane; 1159 int err; 1160 1161 if (!mlxsw_sp_port->split) 1162 err = snprintf(name, len, "p%d", module + 1); 1163 else 1164 err = snprintf(name, len, "p%ds%d", module + 1, 1165 lane / width); 1166 1167 if (err >= len) 1168 return -EINVAL; 1169 1170 return 0; 1171 } 1172 1173 static struct mlxsw_sp_port_mall_tc_entry * 1174 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1175 unsigned long cookie) { 1176 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1177 1178 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1179 if (mall_tc_entry->cookie == cookie) 1180 return mall_tc_entry; 1181 1182 return NULL; 1183 } 1184 1185 static int 1186 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1187 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1188 const struct tc_action *a, 1189 bool ingress) 1190 { 1191 struct net *net = dev_net(mlxsw_sp_port->dev); 1192 enum mlxsw_sp_span_type span_type; 1193 struct mlxsw_sp_port *to_port; 1194 struct net_device *to_dev; 1195 int ifindex; 1196 1197 ifindex = tcf_mirred_ifindex(a); 1198 to_dev = __dev_get_by_index(net, ifindex); 1199 if (!to_dev) { 1200 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1201 return -EINVAL; 1202 } 1203 1204 if (!mlxsw_sp_port_dev_check(to_dev)) { 1205 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1206 return -EOPNOTSUPP; 1207 } 1208 to_port = netdev_priv(to_dev); 1209 1210 mirror->to_local_port = to_port->local_port; 1211 mirror->ingress = ingress; 1212 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1213 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1214 } 1215 1216 static void 1217 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1218 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1219 { 1220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1221 enum mlxsw_sp_span_type span_type; 1222 struct mlxsw_sp_port *to_port; 1223 1224 to_port = mlxsw_sp->ports[mirror->to_local_port]; 1225 span_type = mirror->ingress ? 1226 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1227 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1228 } 1229 1230 static int 1231 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1232 struct tc_cls_matchall_offload *cls, 1233 const struct tc_action *a, 1234 bool ingress) 1235 { 1236 int err; 1237 1238 if (!mlxsw_sp_port->sample) 1239 return -EOPNOTSUPP; 1240 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1241 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1242 return -EEXIST; 1243 } 1244 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1245 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1246 return -EOPNOTSUPP; 1247 } 1248 1249 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1250 tcf_sample_psample_group(a)); 1251 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1252 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1253 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1254 1255 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1256 if (err) 1257 goto err_port_sample_set; 1258 return 0; 1259 1260 err_port_sample_set: 1261 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1262 return err; 1263 } 1264 1265 static void 1266 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1267 { 1268 if (!mlxsw_sp_port->sample) 1269 return; 1270 1271 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1272 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1273 } 1274 1275 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1276 __be16 protocol, 1277 struct tc_cls_matchall_offload *cls, 1278 bool ingress) 1279 { 1280 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1281 const struct tc_action *a; 1282 LIST_HEAD(actions); 1283 int err; 1284 1285 if (!tc_single_action(cls->exts)) { 1286 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1287 return -EOPNOTSUPP; 1288 } 1289 1290 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1291 if (!mall_tc_entry) 1292 return -ENOMEM; 1293 mall_tc_entry->cookie = cls->cookie; 1294 1295 tcf_exts_to_list(cls->exts, &actions); 1296 a = list_first_entry(&actions, struct tc_action, list); 1297 1298 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1299 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1300 1301 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1302 mirror = &mall_tc_entry->mirror; 1303 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1304 mirror, a, ingress); 1305 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1306 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1307 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, 1308 a, ingress); 1309 } else { 1310 err = -EOPNOTSUPP; 1311 } 1312 1313 if (err) 1314 goto err_add_action; 1315 1316 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1317 return 0; 1318 1319 err_add_action: 1320 kfree(mall_tc_entry); 1321 return err; 1322 } 1323 1324 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1325 struct tc_cls_matchall_offload *cls) 1326 { 1327 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1328 1329 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1330 cls->cookie); 1331 if (!mall_tc_entry) { 1332 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1333 return; 1334 } 1335 list_del(&mall_tc_entry->list); 1336 1337 switch (mall_tc_entry->type) { 1338 case MLXSW_SP_PORT_MALL_MIRROR: 1339 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1340 &mall_tc_entry->mirror); 1341 break; 1342 case MLXSW_SP_PORT_MALL_SAMPLE: 1343 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1344 break; 1345 default: 1346 WARN_ON(1); 1347 } 1348 1349 kfree(mall_tc_entry); 1350 } 1351 1352 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1353 __be16 proto, struct tc_to_netdev *tc) 1354 { 1355 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1356 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1357 1358 switch (tc->type) { 1359 case TC_SETUP_MATCHALL: 1360 switch (tc->cls_mall->command) { 1361 case TC_CLSMATCHALL_REPLACE: 1362 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1363 proto, 1364 tc->cls_mall, 1365 ingress); 1366 case TC_CLSMATCHALL_DESTROY: 1367 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1368 tc->cls_mall); 1369 return 0; 1370 default: 1371 return -EINVAL; 1372 } 1373 case TC_SETUP_CLSFLOWER: 1374 switch (tc->cls_flower->command) { 1375 case TC_CLSFLOWER_REPLACE: 1376 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, 1377 proto, tc->cls_flower); 1378 case TC_CLSFLOWER_DESTROY: 1379 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, 1380 tc->cls_flower); 1381 return 0; 1382 default: 1383 return -EOPNOTSUPP; 1384 } 1385 } 1386 1387 return -EOPNOTSUPP; 1388 } 1389 1390 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1391 .ndo_open = mlxsw_sp_port_open, 1392 .ndo_stop = mlxsw_sp_port_stop, 1393 .ndo_start_xmit = mlxsw_sp_port_xmit, 1394 .ndo_setup_tc = mlxsw_sp_setup_tc, 1395 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1396 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1397 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1398 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1399 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1400 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1401 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1402 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1403 .ndo_fdb_add = switchdev_port_fdb_add, 1404 .ndo_fdb_del = switchdev_port_fdb_del, 1405 .ndo_fdb_dump = switchdev_port_fdb_dump, 1406 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1407 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1408 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1409 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1410 }; 1411 1412 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1413 struct ethtool_drvinfo *drvinfo) 1414 { 1415 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1416 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1417 1418 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1419 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1420 sizeof(drvinfo->version)); 1421 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1422 "%d.%d.%d", 1423 mlxsw_sp->bus_info->fw_rev.major, 1424 mlxsw_sp->bus_info->fw_rev.minor, 1425 mlxsw_sp->bus_info->fw_rev.subminor); 1426 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1427 sizeof(drvinfo->bus_info)); 1428 } 1429 1430 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1431 struct ethtool_pauseparam *pause) 1432 { 1433 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1434 1435 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1436 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1437 } 1438 1439 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1440 struct ethtool_pauseparam *pause) 1441 { 1442 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1443 1444 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1445 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1446 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1447 1448 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1449 pfcc_pl); 1450 } 1451 1452 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1453 struct ethtool_pauseparam *pause) 1454 { 1455 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1456 bool pause_en = pause->tx_pause || pause->rx_pause; 1457 int err; 1458 1459 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1460 netdev_err(dev, "PFC already enabled on port\n"); 1461 return -EINVAL; 1462 } 1463 1464 if (pause->autoneg) { 1465 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1466 return -EINVAL; 1467 } 1468 1469 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1470 if (err) { 1471 netdev_err(dev, "Failed to configure port's headroom\n"); 1472 return err; 1473 } 1474 1475 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1476 if (err) { 1477 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1478 goto err_port_pause_configure; 1479 } 1480 1481 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1482 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1483 1484 return 0; 1485 1486 err_port_pause_configure: 1487 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1488 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1489 return err; 1490 } 1491 1492 struct mlxsw_sp_port_hw_stats { 1493 char str[ETH_GSTRING_LEN]; 1494 u64 (*getter)(const char *payload); 1495 }; 1496 1497 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1498 { 1499 .str = "a_frames_transmitted_ok", 1500 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1501 }, 1502 { 1503 .str = "a_frames_received_ok", 1504 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1505 }, 1506 { 1507 .str = "a_frame_check_sequence_errors", 1508 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1509 }, 1510 { 1511 .str = "a_alignment_errors", 1512 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1513 }, 1514 { 1515 .str = "a_octets_transmitted_ok", 1516 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1517 }, 1518 { 1519 .str = "a_octets_received_ok", 1520 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1521 }, 1522 { 1523 .str = "a_multicast_frames_xmitted_ok", 1524 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1525 }, 1526 { 1527 .str = "a_broadcast_frames_xmitted_ok", 1528 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1529 }, 1530 { 1531 .str = "a_multicast_frames_received_ok", 1532 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1533 }, 1534 { 1535 .str = "a_broadcast_frames_received_ok", 1536 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1537 }, 1538 { 1539 .str = "a_in_range_length_errors", 1540 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1541 }, 1542 { 1543 .str = "a_out_of_range_length_field", 1544 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1545 }, 1546 { 1547 .str = "a_frame_too_long_errors", 1548 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1549 }, 1550 { 1551 .str = "a_symbol_error_during_carrier", 1552 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1553 }, 1554 { 1555 .str = "a_mac_control_frames_transmitted", 1556 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1557 }, 1558 { 1559 .str = "a_mac_control_frames_received", 1560 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1561 }, 1562 { 1563 .str = "a_unsupported_opcodes_received", 1564 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1565 }, 1566 { 1567 .str = "a_pause_mac_ctrl_frames_received", 1568 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1569 }, 1570 { 1571 .str = "a_pause_mac_ctrl_frames_xmitted", 1572 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1573 }, 1574 }; 1575 1576 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1577 1578 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1579 { 1580 .str = "rx_octets_prio", 1581 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1582 }, 1583 { 1584 .str = "rx_frames_prio", 1585 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1586 }, 1587 { 1588 .str = "tx_octets_prio", 1589 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1590 }, 1591 { 1592 .str = "tx_frames_prio", 1593 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1594 }, 1595 { 1596 .str = "rx_pause_prio", 1597 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1598 }, 1599 { 1600 .str = "rx_pause_duration_prio", 1601 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1602 }, 1603 { 1604 .str = "tx_pause_prio", 1605 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1606 }, 1607 { 1608 .str = "tx_pause_duration_prio", 1609 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1610 }, 1611 }; 1612 1613 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1614 1615 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl) 1616 { 1617 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1618 1619 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); 1620 } 1621 1622 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1623 { 1624 .str = "tc_transmit_queue_tc", 1625 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, 1626 }, 1627 { 1628 .str = "tc_no_buffer_discard_uc_tc", 1629 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1630 }, 1631 }; 1632 1633 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1634 1635 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1636 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1637 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1638 IEEE_8021QAZ_MAX_TCS) 1639 1640 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1641 { 1642 int i; 1643 1644 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1645 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1646 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1647 *p += ETH_GSTRING_LEN; 1648 } 1649 } 1650 1651 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1652 { 1653 int i; 1654 1655 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1656 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1657 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1658 *p += ETH_GSTRING_LEN; 1659 } 1660 } 1661 1662 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1663 u32 stringset, u8 *data) 1664 { 1665 u8 *p = data; 1666 int i; 1667 1668 switch (stringset) { 1669 case ETH_SS_STATS: 1670 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1671 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1672 ETH_GSTRING_LEN); 1673 p += ETH_GSTRING_LEN; 1674 } 1675 1676 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1677 mlxsw_sp_port_get_prio_strings(&p, i); 1678 1679 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1680 mlxsw_sp_port_get_tc_strings(&p, i); 1681 1682 break; 1683 } 1684 } 1685 1686 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1687 enum ethtool_phys_id_state state) 1688 { 1689 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1690 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1691 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1692 bool active; 1693 1694 switch (state) { 1695 case ETHTOOL_ID_ACTIVE: 1696 active = true; 1697 break; 1698 case ETHTOOL_ID_INACTIVE: 1699 active = false; 1700 break; 1701 default: 1702 return -EOPNOTSUPP; 1703 } 1704 1705 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1706 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1707 } 1708 1709 static int 1710 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1711 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1712 { 1713 switch (grp) { 1714 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1715 *p_hw_stats = mlxsw_sp_port_hw_stats; 1716 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1717 break; 1718 case MLXSW_REG_PPCNT_PRIO_CNT: 1719 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1720 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1721 break; 1722 case MLXSW_REG_PPCNT_TC_CNT: 1723 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1724 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1725 break; 1726 default: 1727 WARN_ON(1); 1728 return -EOPNOTSUPP; 1729 } 1730 return 0; 1731 } 1732 1733 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1734 enum mlxsw_reg_ppcnt_grp grp, int prio, 1735 u64 *data, int data_index) 1736 { 1737 struct mlxsw_sp_port_hw_stats *hw_stats; 1738 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1739 int i, len; 1740 int err; 1741 1742 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1743 if (err) 1744 return; 1745 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 1746 for (i = 0; i < len; i++) 1747 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 1748 } 1749 1750 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1751 struct ethtool_stats *stats, u64 *data) 1752 { 1753 int i, data_index = 0; 1754 1755 /* IEEE 802.3 Counters */ 1756 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1757 data, data_index); 1758 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1759 1760 /* Per-Priority Counters */ 1761 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1762 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1763 data, data_index); 1764 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1765 } 1766 1767 /* Per-TC Counters */ 1768 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1769 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1770 data, data_index); 1771 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1772 } 1773 } 1774 1775 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1776 { 1777 switch (sset) { 1778 case ETH_SS_STATS: 1779 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1780 default: 1781 return -EOPNOTSUPP; 1782 } 1783 } 1784 1785 struct mlxsw_sp_port_link_mode { 1786 enum ethtool_link_mode_bit_indices mask_ethtool; 1787 u32 mask; 1788 u32 speed; 1789 }; 1790 1791 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1792 { 1793 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1794 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1795 .speed = SPEED_100, 1796 }, 1797 { 1798 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1799 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1800 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1801 .speed = SPEED_1000, 1802 }, 1803 { 1804 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1805 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1806 .speed = SPEED_10000, 1807 }, 1808 { 1809 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1810 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1811 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 1812 .speed = SPEED_10000, 1813 }, 1814 { 1815 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1816 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1817 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1818 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1819 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1820 .speed = SPEED_10000, 1821 }, 1822 { 1823 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1824 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 1825 .speed = SPEED_20000, 1826 }, 1827 { 1828 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1829 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1830 .speed = SPEED_40000, 1831 }, 1832 { 1833 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1834 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1835 .speed = SPEED_40000, 1836 }, 1837 { 1838 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1839 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1840 .speed = SPEED_40000, 1841 }, 1842 { 1843 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1844 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1845 .speed = SPEED_40000, 1846 }, 1847 { 1848 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 1849 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1850 .speed = SPEED_25000, 1851 }, 1852 { 1853 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 1854 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1855 .speed = SPEED_25000, 1856 }, 1857 { 1858 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1859 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1860 .speed = SPEED_25000, 1861 }, 1862 { 1863 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1864 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1865 .speed = SPEED_25000, 1866 }, 1867 { 1868 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 1869 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 1870 .speed = SPEED_50000, 1871 }, 1872 { 1873 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1874 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 1875 .speed = SPEED_50000, 1876 }, 1877 { 1878 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 1879 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1880 .speed = SPEED_50000, 1881 }, 1882 { 1883 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1884 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 1885 .speed = SPEED_56000, 1886 }, 1887 { 1888 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1889 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 1890 .speed = SPEED_56000, 1891 }, 1892 { 1893 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1894 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 1895 .speed = SPEED_56000, 1896 }, 1897 { 1898 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1899 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 1900 .speed = SPEED_56000, 1901 }, 1902 { 1903 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 1904 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1905 .speed = SPEED_100000, 1906 }, 1907 { 1908 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 1909 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1910 .speed = SPEED_100000, 1911 }, 1912 { 1913 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 1914 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 1915 .speed = SPEED_100000, 1916 }, 1917 { 1918 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1919 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1920 .speed = SPEED_100000, 1921 }, 1922 }; 1923 1924 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1925 1926 static void 1927 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 1928 struct ethtool_link_ksettings *cmd) 1929 { 1930 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1931 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1932 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1933 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1934 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1935 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1936 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 1937 1938 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1939 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1940 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1941 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1942 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1943 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 1944 } 1945 1946 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 1947 { 1948 int i; 1949 1950 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1951 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1952 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 1953 mode); 1954 } 1955 } 1956 1957 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1958 struct ethtool_link_ksettings *cmd) 1959 { 1960 u32 speed = SPEED_UNKNOWN; 1961 u8 duplex = DUPLEX_UNKNOWN; 1962 int i; 1963 1964 if (!carrier_ok) 1965 goto out; 1966 1967 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1968 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1969 speed = mlxsw_sp_port_link_mode[i].speed; 1970 duplex = DUPLEX_FULL; 1971 break; 1972 } 1973 } 1974 out: 1975 cmd->base.speed = speed; 1976 cmd->base.duplex = duplex; 1977 } 1978 1979 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1980 { 1981 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1982 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1983 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1984 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1985 return PORT_FIBRE; 1986 1987 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1988 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1989 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1990 return PORT_DA; 1991 1992 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1993 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1994 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1995 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1996 return PORT_NONE; 1997 1998 return PORT_OTHER; 1999 } 2000 2001 static u32 2002 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2003 { 2004 u32 ptys_proto = 0; 2005 int i; 2006 2007 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2008 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2009 cmd->link_modes.advertising)) 2010 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2011 } 2012 return ptys_proto; 2013 } 2014 2015 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2016 { 2017 u32 ptys_proto = 0; 2018 int i; 2019 2020 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2021 if (speed == mlxsw_sp_port_link_mode[i].speed) 2022 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2023 } 2024 return ptys_proto; 2025 } 2026 2027 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2028 { 2029 u32 ptys_proto = 0; 2030 int i; 2031 2032 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2033 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2034 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2035 } 2036 return ptys_proto; 2037 } 2038 2039 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2040 struct ethtool_link_ksettings *cmd) 2041 { 2042 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2043 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2044 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2045 2046 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2047 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2048 } 2049 2050 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2051 struct ethtool_link_ksettings *cmd) 2052 { 2053 if (!autoneg) 2054 return; 2055 2056 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2057 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2058 } 2059 2060 static void 2061 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2062 struct ethtool_link_ksettings *cmd) 2063 { 2064 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2065 return; 2066 2067 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2068 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2069 } 2070 2071 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2072 struct ethtool_link_ksettings *cmd) 2073 { 2074 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2077 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2078 u8 autoneg_status; 2079 bool autoneg; 2080 int err; 2081 2082 autoneg = mlxsw_sp_port->link.autoneg; 2083 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2084 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2085 if (err) 2086 return err; 2087 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2088 ð_proto_oper); 2089 2090 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2091 2092 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2093 2094 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2095 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2096 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2097 2098 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2099 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2100 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2101 cmd); 2102 2103 return 0; 2104 } 2105 2106 static int 2107 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2108 const struct ethtool_link_ksettings *cmd) 2109 { 2110 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2111 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2112 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2113 u32 eth_proto_cap, eth_proto_new; 2114 bool autoneg; 2115 int err; 2116 2117 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2118 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2119 if (err) 2120 return err; 2121 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2122 2123 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2124 eth_proto_new = autoneg ? 2125 mlxsw_sp_to_ptys_advert_link(cmd) : 2126 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2127 2128 eth_proto_new = eth_proto_new & eth_proto_cap; 2129 if (!eth_proto_new) { 2130 netdev_err(dev, "No supported speed requested\n"); 2131 return -EINVAL; 2132 } 2133 2134 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2135 eth_proto_new); 2136 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2137 if (err) 2138 return err; 2139 2140 if (!netif_running(dev)) 2141 return 0; 2142 2143 mlxsw_sp_port->link.autoneg = autoneg; 2144 2145 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2146 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2147 2148 return 0; 2149 } 2150 2151 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2152 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2153 .get_link = ethtool_op_get_link, 2154 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2155 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2156 .get_strings = mlxsw_sp_port_get_strings, 2157 .set_phys_id = mlxsw_sp_port_set_phys_id, 2158 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2159 .get_sset_count = mlxsw_sp_port_get_sset_count, 2160 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2161 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2162 }; 2163 2164 static int 2165 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2166 { 2167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2168 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2169 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2170 u32 eth_proto_admin; 2171 2172 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2173 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2174 eth_proto_admin); 2175 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2176 } 2177 2178 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2179 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2180 bool dwrr, u8 dwrr_weight) 2181 { 2182 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2183 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2184 2185 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2186 next_index); 2187 mlxsw_reg_qeec_de_set(qeec_pl, true); 2188 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2189 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2190 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2191 } 2192 2193 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2194 enum mlxsw_reg_qeec_hr hr, u8 index, 2195 u8 next_index, u32 maxrate) 2196 { 2197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2198 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2199 2200 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2201 next_index); 2202 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2203 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2205 } 2206 2207 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2208 u8 switch_prio, u8 tclass) 2209 { 2210 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2211 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2212 2213 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2214 tclass); 2215 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2216 } 2217 2218 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2219 { 2220 int err, i; 2221 2222 /* Setup the elements hierarcy, so that each TC is linked to 2223 * one subgroup, which are all member in the same group. 2224 */ 2225 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2226 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2227 0); 2228 if (err) 2229 return err; 2230 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2231 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2232 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2233 0, false, 0); 2234 if (err) 2235 return err; 2236 } 2237 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2238 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2239 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2240 false, 0); 2241 if (err) 2242 return err; 2243 } 2244 2245 /* Make sure the max shaper is disabled in all hierarcies that 2246 * support it. 2247 */ 2248 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2249 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2250 MLXSW_REG_QEEC_MAS_DIS); 2251 if (err) 2252 return err; 2253 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2254 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2255 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2256 i, 0, 2257 MLXSW_REG_QEEC_MAS_DIS); 2258 if (err) 2259 return err; 2260 } 2261 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2262 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2263 MLXSW_REG_QEEC_HIERARCY_TC, 2264 i, i, 2265 MLXSW_REG_QEEC_MAS_DIS); 2266 if (err) 2267 return err; 2268 } 2269 2270 /* Map all priorities to traffic class 0. */ 2271 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2272 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2273 if (err) 2274 return err; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) 2281 { 2282 mlxsw_sp_port->pvid = 1; 2283 2284 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); 2285 } 2286 2287 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 2288 { 2289 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2290 } 2291 2292 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2293 bool split, u8 module, u8 width, u8 lane) 2294 { 2295 struct mlxsw_sp_port *mlxsw_sp_port; 2296 struct net_device *dev; 2297 size_t bytes; 2298 int err; 2299 2300 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2301 if (!dev) 2302 return -ENOMEM; 2303 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2304 mlxsw_sp_port = netdev_priv(dev); 2305 mlxsw_sp_port->dev = dev; 2306 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2307 mlxsw_sp_port->local_port = local_port; 2308 mlxsw_sp_port->split = split; 2309 mlxsw_sp_port->mapping.module = module; 2310 mlxsw_sp_port->mapping.width = width; 2311 mlxsw_sp_port->mapping.lane = lane; 2312 mlxsw_sp_port->link.autoneg = 1; 2313 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2314 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2315 if (!mlxsw_sp_port->active_vlans) { 2316 err = -ENOMEM; 2317 goto err_port_active_vlans_alloc; 2318 } 2319 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2320 if (!mlxsw_sp_port->untagged_vlans) { 2321 err = -ENOMEM; 2322 goto err_port_untagged_vlans_alloc; 2323 } 2324 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2325 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2326 2327 mlxsw_sp_port->pcpu_stats = 2328 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2329 if (!mlxsw_sp_port->pcpu_stats) { 2330 err = -ENOMEM; 2331 goto err_alloc_stats; 2332 } 2333 2334 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2335 GFP_KERNEL); 2336 if (!mlxsw_sp_port->sample) { 2337 err = -ENOMEM; 2338 goto err_alloc_sample; 2339 } 2340 2341 mlxsw_sp_port->hw_stats.cache = 2342 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2343 2344 if (!mlxsw_sp_port->hw_stats.cache) { 2345 err = -ENOMEM; 2346 goto err_alloc_hw_stats; 2347 } 2348 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2349 &update_stats_cache); 2350 2351 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2352 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2353 2354 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2355 if (err) { 2356 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2357 mlxsw_sp_port->local_port); 2358 goto err_port_swid_set; 2359 } 2360 2361 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2362 if (err) { 2363 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2364 mlxsw_sp_port->local_port); 2365 goto err_dev_addr_init; 2366 } 2367 2368 netif_carrier_off(dev); 2369 2370 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2371 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2372 dev->hw_features |= NETIF_F_HW_TC; 2373 2374 dev->min_mtu = 0; 2375 dev->max_mtu = ETH_MAX_MTU; 2376 2377 /* Each packet needs to have a Tx header (metadata) on top all other 2378 * headers. 2379 */ 2380 dev->needed_headroom = MLXSW_TXHDR_LEN; 2381 2382 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2383 if (err) { 2384 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2385 mlxsw_sp_port->local_port); 2386 goto err_port_system_port_mapping_set; 2387 } 2388 2389 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2390 if (err) { 2391 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2392 mlxsw_sp_port->local_port); 2393 goto err_port_speed_by_width_set; 2394 } 2395 2396 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2397 if (err) { 2398 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2399 mlxsw_sp_port->local_port); 2400 goto err_port_mtu_set; 2401 } 2402 2403 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2404 if (err) 2405 goto err_port_admin_status_set; 2406 2407 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2408 if (err) { 2409 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2410 mlxsw_sp_port->local_port); 2411 goto err_port_buffers_init; 2412 } 2413 2414 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2415 if (err) { 2416 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2417 mlxsw_sp_port->local_port); 2418 goto err_port_ets_init; 2419 } 2420 2421 /* ETS and buffers must be initialized before DCB. */ 2422 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2423 if (err) { 2424 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2425 mlxsw_sp_port->local_port); 2426 goto err_port_dcb_init; 2427 } 2428 2429 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); 2430 if (err) { 2431 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", 2432 mlxsw_sp_port->local_port); 2433 goto err_port_pvid_vport_create; 2434 } 2435 2436 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2437 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2438 err = register_netdev(dev); 2439 if (err) { 2440 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2441 mlxsw_sp_port->local_port); 2442 goto err_register_netdev; 2443 } 2444 2445 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 2446 mlxsw_sp_port, dev, mlxsw_sp_port->split, 2447 module); 2448 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2449 return 0; 2450 2451 err_register_netdev: 2452 mlxsw_sp->ports[local_port] = NULL; 2453 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2454 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2455 err_port_pvid_vport_create: 2456 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2457 err_port_dcb_init: 2458 err_port_ets_init: 2459 err_port_buffers_init: 2460 err_port_admin_status_set: 2461 err_port_mtu_set: 2462 err_port_speed_by_width_set: 2463 err_port_system_port_mapping_set: 2464 err_dev_addr_init: 2465 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2466 err_port_swid_set: 2467 kfree(mlxsw_sp_port->hw_stats.cache); 2468 err_alloc_hw_stats: 2469 kfree(mlxsw_sp_port->sample); 2470 err_alloc_sample: 2471 free_percpu(mlxsw_sp_port->pcpu_stats); 2472 err_alloc_stats: 2473 kfree(mlxsw_sp_port->untagged_vlans); 2474 err_port_untagged_vlans_alloc: 2475 kfree(mlxsw_sp_port->active_vlans); 2476 err_port_active_vlans_alloc: 2477 free_netdev(dev); 2478 return err; 2479 } 2480 2481 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2482 bool split, u8 module, u8 width, u8 lane) 2483 { 2484 int err; 2485 2486 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2487 if (err) { 2488 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2489 local_port); 2490 return err; 2491 } 2492 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, 2493 module, width, lane); 2494 if (err) 2495 goto err_port_create; 2496 return 0; 2497 2498 err_port_create: 2499 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2500 return err; 2501 } 2502 2503 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2504 { 2505 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2506 2507 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 2508 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 2509 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2510 mlxsw_sp->ports[local_port] = NULL; 2511 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2512 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2513 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2514 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2515 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2516 kfree(mlxsw_sp_port->hw_stats.cache); 2517 kfree(mlxsw_sp_port->sample); 2518 free_percpu(mlxsw_sp_port->pcpu_stats); 2519 kfree(mlxsw_sp_port->untagged_vlans); 2520 kfree(mlxsw_sp_port->active_vlans); 2521 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2522 free_netdev(mlxsw_sp_port->dev); 2523 } 2524 2525 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2526 { 2527 __mlxsw_sp_port_remove(mlxsw_sp, local_port); 2528 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2529 } 2530 2531 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2532 { 2533 return mlxsw_sp->ports[local_port] != NULL; 2534 } 2535 2536 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2537 { 2538 int i; 2539 2540 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 2541 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2542 mlxsw_sp_port_remove(mlxsw_sp, i); 2543 kfree(mlxsw_sp->ports); 2544 } 2545 2546 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2547 { 2548 u8 module, width, lane; 2549 size_t alloc_size; 2550 int i; 2551 int err; 2552 2553 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 2554 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2555 if (!mlxsw_sp->ports) 2556 return -ENOMEM; 2557 2558 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 2559 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2560 &width, &lane); 2561 if (err) 2562 goto err_port_module_info_get; 2563 if (!width) 2564 continue; 2565 mlxsw_sp->port_to_module[i] = module; 2566 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 2567 module, width, lane); 2568 if (err) 2569 goto err_port_create; 2570 } 2571 return 0; 2572 2573 err_port_create: 2574 err_port_module_info_get: 2575 for (i--; i >= 1; i--) 2576 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2577 mlxsw_sp_port_remove(mlxsw_sp, i); 2578 kfree(mlxsw_sp->ports); 2579 return err; 2580 } 2581 2582 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2583 { 2584 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2585 2586 return local_port - offset; 2587 } 2588 2589 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2590 u8 module, unsigned int count) 2591 { 2592 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2593 int err, i; 2594 2595 for (i = 0; i < count; i++) { 2596 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2597 width, i * width); 2598 if (err) 2599 goto err_port_module_map; 2600 } 2601 2602 for (i = 0; i < count; i++) { 2603 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2604 if (err) 2605 goto err_port_swid_set; 2606 } 2607 2608 for (i = 0; i < count; i++) { 2609 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2610 module, width, i * width); 2611 if (err) 2612 goto err_port_create; 2613 } 2614 2615 return 0; 2616 2617 err_port_create: 2618 for (i--; i >= 0; i--) 2619 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2620 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2621 i = count; 2622 err_port_swid_set: 2623 for (i--; i >= 0; i--) 2624 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2625 MLXSW_PORT_SWID_DISABLED_PORT); 2626 i = count; 2627 err_port_module_map: 2628 for (i--; i >= 0; i--) 2629 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2630 return err; 2631 } 2632 2633 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2634 u8 base_port, unsigned int count) 2635 { 2636 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2637 int i; 2638 2639 /* Split by four means we need to re-create two ports, otherwise 2640 * only one. 2641 */ 2642 count = count / 2; 2643 2644 for (i = 0; i < count; i++) { 2645 local_port = base_port + i * 2; 2646 module = mlxsw_sp->port_to_module[local_port]; 2647 2648 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2649 0); 2650 } 2651 2652 for (i = 0; i < count; i++) 2653 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2654 2655 for (i = 0; i < count; i++) { 2656 local_port = base_port + i * 2; 2657 module = mlxsw_sp->port_to_module[local_port]; 2658 2659 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2660 width, 0); 2661 } 2662 } 2663 2664 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2665 unsigned int count) 2666 { 2667 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2668 struct mlxsw_sp_port *mlxsw_sp_port; 2669 u8 module, cur_width, base_port; 2670 int i; 2671 int err; 2672 2673 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2674 if (!mlxsw_sp_port) { 2675 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2676 local_port); 2677 return -EINVAL; 2678 } 2679 2680 module = mlxsw_sp_port->mapping.module; 2681 cur_width = mlxsw_sp_port->mapping.width; 2682 2683 if (count != 2 && count != 4) { 2684 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2685 return -EINVAL; 2686 } 2687 2688 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2689 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2690 return -EINVAL; 2691 } 2692 2693 /* Make sure we have enough slave (even) ports for the split. */ 2694 if (count == 2) { 2695 base_port = local_port; 2696 if (mlxsw_sp->ports[base_port + 1]) { 2697 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2698 return -EINVAL; 2699 } 2700 } else { 2701 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2702 if (mlxsw_sp->ports[base_port + 1] || 2703 mlxsw_sp->ports[base_port + 3]) { 2704 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2705 return -EINVAL; 2706 } 2707 } 2708 2709 for (i = 0; i < count; i++) 2710 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2711 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2712 2713 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2714 if (err) { 2715 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2716 goto err_port_split_create; 2717 } 2718 2719 return 0; 2720 2721 err_port_split_create: 2722 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2723 return err; 2724 } 2725 2726 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2727 { 2728 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2729 struct mlxsw_sp_port *mlxsw_sp_port; 2730 u8 cur_width, base_port; 2731 unsigned int count; 2732 int i; 2733 2734 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2735 if (!mlxsw_sp_port) { 2736 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2737 local_port); 2738 return -EINVAL; 2739 } 2740 2741 if (!mlxsw_sp_port->split) { 2742 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2743 return -EINVAL; 2744 } 2745 2746 cur_width = mlxsw_sp_port->mapping.width; 2747 count = cur_width == 1 ? 4 : 2; 2748 2749 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2750 2751 /* Determine which ports to remove. */ 2752 if (count == 2 && local_port >= base_port + 2) 2753 base_port = base_port + 2; 2754 2755 for (i = 0; i < count; i++) 2756 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2757 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2758 2759 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2760 2761 return 0; 2762 } 2763 2764 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2765 char *pude_pl, void *priv) 2766 { 2767 struct mlxsw_sp *mlxsw_sp = priv; 2768 struct mlxsw_sp_port *mlxsw_sp_port; 2769 enum mlxsw_reg_pude_oper_status status; 2770 u8 local_port; 2771 2772 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2773 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2774 if (!mlxsw_sp_port) 2775 return; 2776 2777 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2778 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2779 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2780 netif_carrier_on(mlxsw_sp_port->dev); 2781 } else { 2782 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2783 netif_carrier_off(mlxsw_sp_port->dev); 2784 } 2785 } 2786 2787 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2788 u8 local_port, void *priv) 2789 { 2790 struct mlxsw_sp *mlxsw_sp = priv; 2791 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2792 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2793 2794 if (unlikely(!mlxsw_sp_port)) { 2795 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2796 local_port); 2797 return; 2798 } 2799 2800 skb->dev = mlxsw_sp_port->dev; 2801 2802 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2803 u64_stats_update_begin(&pcpu_stats->syncp); 2804 pcpu_stats->rx_packets++; 2805 pcpu_stats->rx_bytes += skb->len; 2806 u64_stats_update_end(&pcpu_stats->syncp); 2807 2808 skb->protocol = eth_type_trans(skb, skb->dev); 2809 netif_receive_skb(skb); 2810 } 2811 2812 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2813 void *priv) 2814 { 2815 skb->offload_fwd_mark = 1; 2816 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2817 } 2818 2819 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 2820 void *priv) 2821 { 2822 struct mlxsw_sp *mlxsw_sp = priv; 2823 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2824 struct psample_group *psample_group; 2825 u32 size; 2826 2827 if (unlikely(!mlxsw_sp_port)) { 2828 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 2829 local_port); 2830 goto out; 2831 } 2832 if (unlikely(!mlxsw_sp_port->sample)) { 2833 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 2834 local_port); 2835 goto out; 2836 } 2837 2838 size = mlxsw_sp_port->sample->truncate ? 2839 mlxsw_sp_port->sample->trunc_size : skb->len; 2840 2841 rcu_read_lock(); 2842 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 2843 if (!psample_group) 2844 goto out_unlock; 2845 psample_sample_packet(psample_group, skb, size, 2846 mlxsw_sp_port->dev->ifindex, 0, 2847 mlxsw_sp_port->sample->rate); 2848 out_unlock: 2849 rcu_read_unlock(); 2850 out: 2851 consume_skb(skb); 2852 } 2853 2854 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2855 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2856 _is_ctrl, SP_##_trap_group, DISCARD) 2857 2858 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2859 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2860 _is_ctrl, SP_##_trap_group, DISCARD) 2861 2862 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2863 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2864 2865 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2866 /* Events */ 2867 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2868 /* L2 traps */ 2869 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 2870 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 2871 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 2872 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 2873 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 2874 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 2875 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 2876 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 2877 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 2878 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 2879 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 2880 /* L3 traps */ 2881 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 2882 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 2883 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 2884 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), 2885 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 2886 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 2887 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), 2888 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), 2889 /* PKT Sample trap */ 2890 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 2891 false, SP_IP2ME, DISCARD) 2892 }; 2893 2894 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2895 { 2896 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2897 enum mlxsw_reg_qpcr_ir_units ir_units; 2898 int max_cpu_policers; 2899 bool is_bytes; 2900 u8 burst_size; 2901 u32 rate; 2902 int i, err; 2903 2904 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2905 return -EIO; 2906 2907 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2908 2909 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2910 for (i = 0; i < max_cpu_policers; i++) { 2911 is_bytes = false; 2912 switch (i) { 2913 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 2914 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 2915 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 2916 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 2917 rate = 128; 2918 burst_size = 7; 2919 break; 2920 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 2921 rate = 16 * 1024; 2922 burst_size = 10; 2923 break; 2924 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 2925 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 2926 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 2927 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 2928 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2929 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 2930 rate = 1024; 2931 burst_size = 7; 2932 break; 2933 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 2934 is_bytes = true; 2935 rate = 4 * 1024; 2936 burst_size = 4; 2937 break; 2938 default: 2939 continue; 2940 } 2941 2942 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2943 burst_size); 2944 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2945 if (err) 2946 return err; 2947 } 2948 2949 return 0; 2950 } 2951 2952 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2953 { 2954 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2955 enum mlxsw_reg_htgt_trap_group i; 2956 int max_cpu_policers; 2957 int max_trap_groups; 2958 u8 priority, tc; 2959 u16 policer_id; 2960 int err; 2961 2962 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2963 return -EIO; 2964 2965 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2966 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2967 2968 for (i = 0; i < max_trap_groups; i++) { 2969 policer_id = i; 2970 switch (i) { 2971 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 2972 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 2973 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 2974 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 2975 priority = 5; 2976 tc = 5; 2977 break; 2978 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 2979 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 2980 priority = 4; 2981 tc = 4; 2982 break; 2983 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 2984 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 2985 priority = 3; 2986 tc = 3; 2987 break; 2988 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 2989 priority = 2; 2990 tc = 2; 2991 break; 2992 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 2993 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2994 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 2995 priority = 1; 2996 tc = 1; 2997 break; 2998 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2999 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3000 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3001 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3002 break; 3003 default: 3004 continue; 3005 } 3006 3007 if (max_cpu_policers <= policer_id && 3008 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3009 return -EIO; 3010 3011 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3012 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3013 if (err) 3014 return err; 3015 } 3016 3017 return 0; 3018 } 3019 3020 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3021 { 3022 int i; 3023 int err; 3024 3025 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3026 if (err) 3027 return err; 3028 3029 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3030 if (err) 3031 return err; 3032 3033 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3034 err = mlxsw_core_trap_register(mlxsw_sp->core, 3035 &mlxsw_sp_listener[i], 3036 mlxsw_sp); 3037 if (err) 3038 goto err_listener_register; 3039 3040 } 3041 return 0; 3042 3043 err_listener_register: 3044 for (i--; i >= 0; i--) { 3045 mlxsw_core_trap_unregister(mlxsw_sp->core, 3046 &mlxsw_sp_listener[i], 3047 mlxsw_sp); 3048 } 3049 return err; 3050 } 3051 3052 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3053 { 3054 int i; 3055 3056 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3057 mlxsw_core_trap_unregister(mlxsw_sp->core, 3058 &mlxsw_sp_listener[i], 3059 mlxsw_sp); 3060 } 3061 } 3062 3063 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 3064 enum mlxsw_reg_sfgc_type type, 3065 enum mlxsw_reg_sfgc_bridge_type bridge_type) 3066 { 3067 enum mlxsw_flood_table_type table_type; 3068 enum mlxsw_sp_flood_table flood_table; 3069 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 3070 3071 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 3072 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 3073 else 3074 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 3075 3076 switch (type) { 3077 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST: 3078 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 3079 break; 3080 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4: 3081 flood_table = MLXSW_SP_FLOOD_TABLE_MC; 3082 break; 3083 default: 3084 flood_table = MLXSW_SP_FLOOD_TABLE_BC; 3085 } 3086 3087 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 3088 flood_table); 3089 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 3090 } 3091 3092 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 3093 { 3094 int type, err; 3095 3096 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 3097 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 3098 continue; 3099 3100 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 3101 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 3102 if (err) 3103 return err; 3104 3105 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 3106 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 3107 if (err) 3108 return err; 3109 } 3110 3111 return 0; 3112 } 3113 3114 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3115 { 3116 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3117 int err; 3118 3119 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3120 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3121 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3122 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3123 MLXSW_REG_SLCR_LAG_HASH_SIP | 3124 MLXSW_REG_SLCR_LAG_HASH_DIP | 3125 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3126 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3127 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3128 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3129 if (err) 3130 return err; 3131 3132 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3133 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3134 return -EIO; 3135 3136 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3137 sizeof(struct mlxsw_sp_upper), 3138 GFP_KERNEL); 3139 if (!mlxsw_sp->lags) 3140 return -ENOMEM; 3141 3142 return 0; 3143 } 3144 3145 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3146 { 3147 kfree(mlxsw_sp->lags); 3148 } 3149 3150 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3151 { 3152 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3153 3154 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3155 MLXSW_REG_HTGT_INVALID_POLICER, 3156 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3157 MLXSW_REG_HTGT_DEFAULT_TC); 3158 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3159 } 3160 3161 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3162 const struct mlxsw_bus_info *mlxsw_bus_info) 3163 { 3164 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3165 int err; 3166 3167 mlxsw_sp->core = mlxsw_core; 3168 mlxsw_sp->bus_info = mlxsw_bus_info; 3169 INIT_LIST_HEAD(&mlxsw_sp->fids); 3170 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 3171 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 3172 3173 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3174 if (err) { 3175 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3176 return err; 3177 } 3178 3179 err = mlxsw_sp_traps_init(mlxsw_sp); 3180 if (err) { 3181 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3182 return err; 3183 } 3184 3185 err = mlxsw_sp_flood_init(mlxsw_sp); 3186 if (err) { 3187 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 3188 goto err_flood_init; 3189 } 3190 3191 err = mlxsw_sp_buffers_init(mlxsw_sp); 3192 if (err) { 3193 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3194 goto err_buffers_init; 3195 } 3196 3197 err = mlxsw_sp_lag_init(mlxsw_sp); 3198 if (err) { 3199 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3200 goto err_lag_init; 3201 } 3202 3203 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3204 if (err) { 3205 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3206 goto err_switchdev_init; 3207 } 3208 3209 err = mlxsw_sp_router_init(mlxsw_sp); 3210 if (err) { 3211 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3212 goto err_router_init; 3213 } 3214 3215 err = mlxsw_sp_span_init(mlxsw_sp); 3216 if (err) { 3217 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3218 goto err_span_init; 3219 } 3220 3221 err = mlxsw_sp_acl_init(mlxsw_sp); 3222 if (err) { 3223 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3224 goto err_acl_init; 3225 } 3226 3227 err = mlxsw_sp_ports_create(mlxsw_sp); 3228 if (err) { 3229 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3230 goto err_ports_create; 3231 } 3232 3233 return 0; 3234 3235 err_ports_create: 3236 mlxsw_sp_acl_fini(mlxsw_sp); 3237 err_acl_init: 3238 mlxsw_sp_span_fini(mlxsw_sp); 3239 err_span_init: 3240 mlxsw_sp_router_fini(mlxsw_sp); 3241 err_router_init: 3242 mlxsw_sp_switchdev_fini(mlxsw_sp); 3243 err_switchdev_init: 3244 mlxsw_sp_lag_fini(mlxsw_sp); 3245 err_lag_init: 3246 mlxsw_sp_buffers_fini(mlxsw_sp); 3247 err_buffers_init: 3248 err_flood_init: 3249 mlxsw_sp_traps_fini(mlxsw_sp); 3250 return err; 3251 } 3252 3253 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3254 { 3255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3256 3257 mlxsw_sp_ports_remove(mlxsw_sp); 3258 mlxsw_sp_acl_fini(mlxsw_sp); 3259 mlxsw_sp_span_fini(mlxsw_sp); 3260 mlxsw_sp_router_fini(mlxsw_sp); 3261 mlxsw_sp_switchdev_fini(mlxsw_sp); 3262 mlxsw_sp_lag_fini(mlxsw_sp); 3263 mlxsw_sp_buffers_fini(mlxsw_sp); 3264 mlxsw_sp_traps_fini(mlxsw_sp); 3265 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 3266 WARN_ON(!list_empty(&mlxsw_sp->fids)); 3267 } 3268 3269 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 3270 .used_max_vepa_channels = 1, 3271 .max_vepa_channels = 0, 3272 .used_max_mid = 1, 3273 .max_mid = MLXSW_SP_MID_MAX, 3274 .used_max_pgt = 1, 3275 .max_pgt = 0, 3276 .used_flood_tables = 1, 3277 .used_flood_mode = 1, 3278 .flood_mode = 3, 3279 .max_fid_offset_flood_tables = 3, 3280 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3281 .max_fid_flood_tables = 3, 3282 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 3283 .used_max_ib_mc = 1, 3284 .max_ib_mc = 0, 3285 .used_max_pkey = 1, 3286 .max_pkey = 0, 3287 .used_kvd_split_data = 1, 3288 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3289 .kvd_hash_single_parts = 2, 3290 .kvd_hash_double_parts = 1, 3291 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3292 .swid_config = { 3293 { 3294 .used_type = 1, 3295 .type = MLXSW_PORT_SWID_TYPE_ETH, 3296 } 3297 }, 3298 .resource_query_enable = 1, 3299 }; 3300 3301 static struct mlxsw_driver mlxsw_sp_driver = { 3302 .kind = mlxsw_sp_driver_name, 3303 .priv_size = sizeof(struct mlxsw_sp), 3304 .init = mlxsw_sp_init, 3305 .fini = mlxsw_sp_fini, 3306 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3307 .port_split = mlxsw_sp_port_split, 3308 .port_unsplit = mlxsw_sp_port_unsplit, 3309 .sb_pool_get = mlxsw_sp_sb_pool_get, 3310 .sb_pool_set = mlxsw_sp_sb_pool_set, 3311 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3312 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3313 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3314 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3315 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3316 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3317 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3318 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3319 .txhdr_construct = mlxsw_sp_txhdr_construct, 3320 .txhdr_len = MLXSW_TXHDR_LEN, 3321 .profile = &mlxsw_sp_config_profile, 3322 }; 3323 3324 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3325 { 3326 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3327 } 3328 3329 static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) 3330 { 3331 struct mlxsw_sp_port **port = data; 3332 int ret = 0; 3333 3334 if (mlxsw_sp_port_dev_check(lower_dev)) { 3335 *port = netdev_priv(lower_dev); 3336 ret = 1; 3337 } 3338 3339 return ret; 3340 } 3341 3342 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3343 { 3344 struct mlxsw_sp_port *port; 3345 3346 if (mlxsw_sp_port_dev_check(dev)) 3347 return netdev_priv(dev); 3348 3349 port = NULL; 3350 netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); 3351 3352 return port; 3353 } 3354 3355 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3356 { 3357 struct mlxsw_sp_port *mlxsw_sp_port; 3358 3359 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3360 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3361 } 3362 3363 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3364 { 3365 struct mlxsw_sp_port *port; 3366 3367 if (mlxsw_sp_port_dev_check(dev)) 3368 return netdev_priv(dev); 3369 3370 port = NULL; 3371 netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); 3372 3373 return port; 3374 } 3375 3376 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3377 { 3378 struct mlxsw_sp_port *mlxsw_sp_port; 3379 3380 rcu_read_lock(); 3381 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3382 if (mlxsw_sp_port) 3383 dev_hold(mlxsw_sp_port->dev); 3384 rcu_read_unlock(); 3385 return mlxsw_sp_port; 3386 } 3387 3388 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3389 { 3390 dev_put(mlxsw_sp_port->dev); 3391 } 3392 3393 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, 3394 unsigned long event) 3395 { 3396 switch (event) { 3397 case NETDEV_UP: 3398 if (!r) 3399 return true; 3400 r->ref_count++; 3401 return false; 3402 case NETDEV_DOWN: 3403 if (r && --r->ref_count == 0) 3404 return true; 3405 /* It is possible we already removed the RIF ourselves 3406 * if it was assigned to a netdev that is now a bridge 3407 * or LAG slave. 3408 */ 3409 return false; 3410 } 3411 3412 return false; 3413 } 3414 3415 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) 3416 { 3417 int i; 3418 3419 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 3420 if (!mlxsw_sp->rifs[i]) 3421 return i; 3422 3423 return MLXSW_SP_INVALID_RIF; 3424 } 3425 3426 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, 3427 bool *p_lagged, u16 *p_system_port) 3428 { 3429 u8 local_port = mlxsw_sp_vport->local_port; 3430 3431 *p_lagged = mlxsw_sp_vport->lagged; 3432 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; 3433 } 3434 3435 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, 3436 struct net_device *l3_dev, u16 rif, 3437 bool create) 3438 { 3439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3440 bool lagged = mlxsw_sp_vport->lagged; 3441 char ritr_pl[MLXSW_REG_RITR_LEN]; 3442 u16 system_port; 3443 3444 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, 3445 l3_dev->mtu, l3_dev->dev_addr); 3446 3447 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); 3448 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, 3449 mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); 3450 3451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3452 } 3453 3454 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3455 3456 static struct mlxsw_sp_fid * 3457 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) 3458 { 3459 struct mlxsw_sp_fid *f; 3460 3461 f = kzalloc(sizeof(*f), GFP_KERNEL); 3462 if (!f) 3463 return NULL; 3464 3465 f->leave = mlxsw_sp_vport_rif_sp_leave; 3466 f->ref_count = 0; 3467 f->dev = l3_dev; 3468 f->fid = fid; 3469 3470 return f; 3471 } 3472 3473 static struct mlxsw_sp_rif * 3474 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) 3475 { 3476 struct mlxsw_sp_rif *r; 3477 3478 r = kzalloc(sizeof(*r), GFP_KERNEL); 3479 if (!r) 3480 return NULL; 3481 3482 INIT_LIST_HEAD(&r->nexthop_list); 3483 INIT_LIST_HEAD(&r->neigh_list); 3484 ether_addr_copy(r->addr, l3_dev->dev_addr); 3485 r->mtu = l3_dev->mtu; 3486 r->ref_count = 1; 3487 r->dev = l3_dev; 3488 r->rif = rif; 3489 r->f = f; 3490 3491 return r; 3492 } 3493 3494 static struct mlxsw_sp_rif * 3495 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, 3496 struct net_device *l3_dev) 3497 { 3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3499 struct mlxsw_sp_fid *f; 3500 struct mlxsw_sp_rif *r; 3501 u16 fid, rif; 3502 int err; 3503 3504 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3505 if (rif == MLXSW_SP_INVALID_RIF) 3506 return ERR_PTR(-ERANGE); 3507 3508 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); 3509 if (err) 3510 return ERR_PTR(err); 3511 3512 fid = mlxsw_sp_rif_sp_to_fid(rif); 3513 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); 3514 if (err) 3515 goto err_rif_fdb_op; 3516 3517 f = mlxsw_sp_rfid_alloc(fid, l3_dev); 3518 if (!f) { 3519 err = -ENOMEM; 3520 goto err_rfid_alloc; 3521 } 3522 3523 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3524 if (!r) { 3525 err = -ENOMEM; 3526 goto err_rif_alloc; 3527 } 3528 3529 f->r = r; 3530 mlxsw_sp->rifs[rif] = r; 3531 3532 return r; 3533 3534 err_rif_alloc: 3535 kfree(f); 3536 err_rfid_alloc: 3537 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3538 err_rif_fdb_op: 3539 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3540 return ERR_PTR(err); 3541 } 3542 3543 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, 3544 struct mlxsw_sp_rif *r) 3545 { 3546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3547 struct net_device *l3_dev = r->dev; 3548 struct mlxsw_sp_fid *f = r->f; 3549 u16 fid = f->fid; 3550 u16 rif = r->rif; 3551 3552 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); 3553 3554 mlxsw_sp->rifs[rif] = NULL; 3555 f->r = NULL; 3556 3557 kfree(r); 3558 3559 kfree(f); 3560 3561 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); 3562 3563 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); 3564 } 3565 3566 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3567 struct net_device *l3_dev) 3568 { 3569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3570 struct mlxsw_sp_rif *r; 3571 3572 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 3573 if (!r) { 3574 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); 3575 if (IS_ERR(r)) 3576 return PTR_ERR(r); 3577 } 3578 3579 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); 3580 r->f->ref_count++; 3581 3582 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); 3583 3584 return 0; 3585 } 3586 3587 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3588 { 3589 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3590 3591 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 3592 3593 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3594 if (--f->ref_count == 0) 3595 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); 3596 } 3597 3598 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, 3599 struct net_device *port_dev, 3600 unsigned long event, u16 vid) 3601 { 3602 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 3603 struct mlxsw_sp_port *mlxsw_sp_vport; 3604 3605 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3606 if (WARN_ON(!mlxsw_sp_vport)) 3607 return -EINVAL; 3608 3609 switch (event) { 3610 case NETDEV_UP: 3611 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); 3612 case NETDEV_DOWN: 3613 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); 3614 break; 3615 } 3616 3617 return 0; 3618 } 3619 3620 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 3621 unsigned long event) 3622 { 3623 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) 3624 return 0; 3625 3626 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); 3627 } 3628 3629 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 3630 struct net_device *lag_dev, 3631 unsigned long event, u16 vid) 3632 { 3633 struct net_device *port_dev; 3634 struct list_head *iter; 3635 int err; 3636 3637 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 3638 if (mlxsw_sp_port_dev_check(port_dev)) { 3639 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, 3640 event, vid); 3641 if (err) 3642 return err; 3643 } 3644 } 3645 3646 return 0; 3647 } 3648 3649 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 3650 unsigned long event) 3651 { 3652 if (netif_is_bridge_port(lag_dev)) 3653 return 0; 3654 3655 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); 3656 } 3657 3658 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, 3659 struct net_device *l3_dev) 3660 { 3661 u16 fid; 3662 3663 if (is_vlan_dev(l3_dev)) 3664 fid = vlan_dev_vlan_id(l3_dev); 3665 else if (mlxsw_sp->master_bridge.dev == l3_dev) 3666 fid = 1; 3667 else 3668 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); 3669 3670 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3671 } 3672 3673 static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) 3674 { 3675 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : 3676 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 3677 } 3678 3679 static u16 mlxsw_sp_flood_table_index_get(u16 fid) 3680 { 3681 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; 3682 } 3683 3684 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, 3685 bool set) 3686 { 3687 enum mlxsw_flood_table_type table_type; 3688 char *sftr_pl; 3689 u16 index; 3690 int err; 3691 3692 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 3693 if (!sftr_pl) 3694 return -ENOMEM; 3695 3696 table_type = mlxsw_sp_flood_table_type_get(fid); 3697 index = mlxsw_sp_flood_table_index_get(fid); 3698 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, 3699 1, MLXSW_PORT_ROUTER_PORT, set); 3700 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 3701 3702 kfree(sftr_pl); 3703 return err; 3704 } 3705 3706 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3707 { 3708 if (mlxsw_sp_fid_is_vfid(fid)) 3709 return MLXSW_REG_RITR_FID_IF; 3710 else 3711 return MLXSW_REG_RITR_VLAN_IF; 3712 } 3713 3714 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, 3715 struct net_device *l3_dev, 3716 u16 fid, u16 rif, 3717 bool create) 3718 { 3719 enum mlxsw_reg_ritr_if_type rif_type; 3720 char ritr_pl[MLXSW_REG_RITR_LEN]; 3721 3722 rif_type = mlxsw_sp_rif_type_get(fid); 3723 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, 3724 l3_dev->dev_addr); 3725 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); 3726 3727 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3728 } 3729 3730 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, 3731 struct net_device *l3_dev, 3732 struct mlxsw_sp_fid *f) 3733 { 3734 struct mlxsw_sp_rif *r; 3735 u16 rif; 3736 int err; 3737 3738 rif = mlxsw_sp_avail_rif_get(mlxsw_sp); 3739 if (rif == MLXSW_SP_INVALID_RIF) 3740 return -ERANGE; 3741 3742 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); 3743 if (err) 3744 return err; 3745 3746 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3747 if (err) 3748 goto err_rif_bridge_op; 3749 3750 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3751 if (err) 3752 goto err_rif_fdb_op; 3753 3754 r = mlxsw_sp_rif_alloc(rif, l3_dev, f); 3755 if (!r) { 3756 err = -ENOMEM; 3757 goto err_rif_alloc; 3758 } 3759 3760 f->r = r; 3761 mlxsw_sp->rifs[rif] = r; 3762 3763 netdev_dbg(l3_dev, "RIF=%d created\n", rif); 3764 3765 return 0; 3766 3767 err_rif_alloc: 3768 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3769 err_rif_fdb_op: 3770 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3771 err_rif_bridge_op: 3772 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3773 return err; 3774 } 3775 3776 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 3777 struct mlxsw_sp_rif *r) 3778 { 3779 struct net_device *l3_dev = r->dev; 3780 struct mlxsw_sp_fid *f = r->f; 3781 u16 rif = r->rif; 3782 3783 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); 3784 3785 mlxsw_sp->rifs[rif] = NULL; 3786 f->r = NULL; 3787 3788 kfree(r); 3789 3790 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3791 3792 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3793 3794 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); 3795 3796 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3797 } 3798 3799 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, 3800 struct net_device *br_dev, 3801 unsigned long event) 3802 { 3803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 3804 struct mlxsw_sp_fid *f; 3805 3806 /* FID can either be an actual FID if the L3 device is the 3807 * VLAN-aware bridge or a VLAN device on top. Otherwise, the 3808 * L3 device is a VLAN-unaware bridge and we get a vFID. 3809 */ 3810 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); 3811 if (WARN_ON(!f)) 3812 return -EINVAL; 3813 3814 switch (event) { 3815 case NETDEV_UP: 3816 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); 3817 case NETDEV_DOWN: 3818 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 3819 break; 3820 } 3821 3822 return 0; 3823 } 3824 3825 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, 3826 unsigned long event) 3827 { 3828 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3830 u16 vid = vlan_dev_vlan_id(vlan_dev); 3831 3832 if (mlxsw_sp_port_dev_check(real_dev)) 3833 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3834 vid); 3835 else if (netif_is_lag_master(real_dev)) 3836 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 3837 vid); 3838 else if (netif_is_bridge_master(real_dev) && 3839 mlxsw_sp->master_bridge.dev == real_dev) 3840 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, 3841 event); 3842 3843 return 0; 3844 } 3845 3846 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 3847 unsigned long event, void *ptr) 3848 { 3849 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3850 struct net_device *dev = ifa->ifa_dev->dev; 3851 struct mlxsw_sp *mlxsw_sp; 3852 struct mlxsw_sp_rif *r; 3853 int err = 0; 3854 3855 mlxsw_sp = mlxsw_sp_lower_get(dev); 3856 if (!mlxsw_sp) 3857 goto out; 3858 3859 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3860 if (!mlxsw_sp_rif_should_config(r, event)) 3861 goto out; 3862 3863 if (mlxsw_sp_port_dev_check(dev)) 3864 err = mlxsw_sp_inetaddr_port_event(dev, event); 3865 else if (netif_is_lag_master(dev)) 3866 err = mlxsw_sp_inetaddr_lag_event(dev, event); 3867 else if (netif_is_bridge_master(dev)) 3868 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); 3869 else if (is_vlan_dev(dev)) 3870 err = mlxsw_sp_inetaddr_vlan_event(dev, event); 3871 3872 out: 3873 return notifier_from_errno(err); 3874 } 3875 3876 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, 3877 const char *mac, int mtu) 3878 { 3879 char ritr_pl[MLXSW_REG_RITR_LEN]; 3880 int err; 3881 3882 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 3883 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3884 if (err) 3885 return err; 3886 3887 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 3888 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 3889 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 3890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 3891 } 3892 3893 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) 3894 { 3895 struct mlxsw_sp *mlxsw_sp; 3896 struct mlxsw_sp_rif *r; 3897 int err; 3898 3899 mlxsw_sp = mlxsw_sp_lower_get(dev); 3900 if (!mlxsw_sp) 3901 return 0; 3902 3903 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3904 if (!r) 3905 return 0; 3906 3907 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); 3908 if (err) 3909 return err; 3910 3911 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); 3912 if (err) 3913 goto err_rif_edit; 3914 3915 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); 3916 if (err) 3917 goto err_rif_fdb_op; 3918 3919 ether_addr_copy(r->addr, dev->dev_addr); 3920 r->mtu = dev->mtu; 3921 3922 netdev_dbg(dev, "Updated RIF=%d\n", r->rif); 3923 3924 return 0; 3925 3926 err_rif_fdb_op: 3927 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); 3928 err_rif_edit: 3929 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); 3930 return err; 3931 } 3932 3933 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3934 u16 fid) 3935 { 3936 if (mlxsw_sp_fid_is_vfid(fid)) 3937 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3938 else 3939 return test_bit(fid, lag_port->active_vlans); 3940 } 3941 3942 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3943 u16 fid) 3944 { 3945 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3946 u8 local_port = mlxsw_sp_port->local_port; 3947 u16 lag_id = mlxsw_sp_port->lag_id; 3948 u64 max_lag_members; 3949 int i, count = 0; 3950 3951 if (!mlxsw_sp_port->lagged) 3952 return true; 3953 3954 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3955 MAX_LAG_MEMBERS); 3956 for (i = 0; i < max_lag_members; i++) { 3957 struct mlxsw_sp_port *lag_port; 3958 3959 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3960 if (!lag_port || lag_port->local_port == local_port) 3961 continue; 3962 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3963 count++; 3964 } 3965 3966 return !count; 3967 } 3968 3969 static int 3970 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3971 u16 fid) 3972 { 3973 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3974 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3975 3976 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3977 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3978 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3979 mlxsw_sp_port->local_port); 3980 3981 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3982 mlxsw_sp_port->local_port, fid); 3983 3984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3985 } 3986 3987 static int 3988 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3989 u16 fid) 3990 { 3991 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3992 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3993 3994 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3995 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3996 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3997 3998 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3999 mlxsw_sp_port->lag_id, fid); 4000 4001 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 4002 } 4003 4004 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 4005 { 4006 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 4007 return 0; 4008 4009 if (mlxsw_sp_port->lagged) 4010 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 4011 fid); 4012 else 4013 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 4014 } 4015 4016 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 4017 { 4018 struct mlxsw_sp_fid *f, *tmp; 4019 4020 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 4021 if (--f->ref_count == 0) 4022 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4023 else 4024 WARN_ON_ONCE(1); 4025 } 4026 4027 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 4028 struct net_device *br_dev) 4029 { 4030 return !mlxsw_sp->master_bridge.dev || 4031 mlxsw_sp->master_bridge.dev == br_dev; 4032 } 4033 4034 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 4035 struct net_device *br_dev) 4036 { 4037 mlxsw_sp->master_bridge.dev = br_dev; 4038 mlxsw_sp->master_bridge.ref_count++; 4039 } 4040 4041 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 4042 { 4043 if (--mlxsw_sp->master_bridge.ref_count == 0) { 4044 mlxsw_sp->master_bridge.dev = NULL; 4045 /* It's possible upper VLAN devices are still holding 4046 * references to underlying FIDs. Drop the reference 4047 * and release the resources if it was the last one. 4048 * If it wasn't, then something bad happened. 4049 */ 4050 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 4051 } 4052 } 4053 4054 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 4055 struct net_device *br_dev) 4056 { 4057 struct net_device *dev = mlxsw_sp_port->dev; 4058 int err; 4059 4060 /* When port is not bridged untagged packets are tagged with 4061 * PVID=VID=1, thereby creating an implicit VLAN interface in 4062 * the device. Remove it and let bridge code take care of its 4063 * own VLANs. 4064 */ 4065 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 4066 if (err) 4067 return err; 4068 4069 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 4070 4071 mlxsw_sp_port->learning = 1; 4072 mlxsw_sp_port->learning_sync = 1; 4073 mlxsw_sp_port->uc_flood = 1; 4074 mlxsw_sp_port->mc_flood = 1; 4075 mlxsw_sp_port->mc_router = 0; 4076 mlxsw_sp_port->mc_disabled = 1; 4077 mlxsw_sp_port->bridged = 1; 4078 4079 return 0; 4080 } 4081 4082 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4083 { 4084 struct net_device *dev = mlxsw_sp_port->dev; 4085 4086 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4087 4088 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 4089 4090 mlxsw_sp_port->learning = 0; 4091 mlxsw_sp_port->learning_sync = 0; 4092 mlxsw_sp_port->uc_flood = 0; 4093 mlxsw_sp_port->mc_flood = 0; 4094 mlxsw_sp_port->mc_router = 0; 4095 mlxsw_sp_port->bridged = 0; 4096 4097 /* Add implicit VLAN interface in the device, so that untagged 4098 * packets will be classified to the default vFID. 4099 */ 4100 mlxsw_sp_port_add_vid(dev, 0, 1); 4101 } 4102 4103 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4104 { 4105 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4106 4107 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4108 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4109 } 4110 4111 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4112 { 4113 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4114 4115 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4116 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4117 } 4118 4119 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4120 u16 lag_id, u8 port_index) 4121 { 4122 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4123 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4124 4125 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4126 lag_id, port_index); 4127 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4128 } 4129 4130 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4131 u16 lag_id) 4132 { 4133 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4134 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4135 4136 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4137 lag_id); 4138 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4139 } 4140 4141 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4142 u16 lag_id) 4143 { 4144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4145 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4146 4147 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4148 lag_id); 4149 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4150 } 4151 4152 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4153 u16 lag_id) 4154 { 4155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4156 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4157 4158 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4159 lag_id); 4160 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4161 } 4162 4163 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4164 struct net_device *lag_dev, 4165 u16 *p_lag_id) 4166 { 4167 struct mlxsw_sp_upper *lag; 4168 int free_lag_id = -1; 4169 u64 max_lag; 4170 int i; 4171 4172 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4173 for (i = 0; i < max_lag; i++) { 4174 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4175 if (lag->ref_count) { 4176 if (lag->dev == lag_dev) { 4177 *p_lag_id = i; 4178 return 0; 4179 } 4180 } else if (free_lag_id < 0) { 4181 free_lag_id = i; 4182 } 4183 } 4184 if (free_lag_id < 0) 4185 return -EBUSY; 4186 *p_lag_id = free_lag_id; 4187 return 0; 4188 } 4189 4190 static bool 4191 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4192 struct net_device *lag_dev, 4193 struct netdev_lag_upper_info *lag_upper_info) 4194 { 4195 u16 lag_id; 4196 4197 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 4198 return false; 4199 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 4200 return false; 4201 return true; 4202 } 4203 4204 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4205 u16 lag_id, u8 *p_port_index) 4206 { 4207 u64 max_lag_members; 4208 int i; 4209 4210 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4211 MAX_LAG_MEMBERS); 4212 for (i = 0; i < max_lag_members; i++) { 4213 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4214 *p_port_index = i; 4215 return 0; 4216 } 4217 } 4218 return -EBUSY; 4219 } 4220 4221 static void 4222 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4223 u16 lag_id) 4224 { 4225 struct mlxsw_sp_port *mlxsw_sp_vport; 4226 struct mlxsw_sp_fid *f; 4227 4228 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 4229 if (WARN_ON(!mlxsw_sp_vport)) 4230 return; 4231 4232 /* If vPort is assigned a RIF, then leave it since it's no 4233 * longer valid. 4234 */ 4235 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4236 if (f) 4237 f->leave(mlxsw_sp_vport); 4238 4239 mlxsw_sp_vport->lag_id = lag_id; 4240 mlxsw_sp_vport->lagged = 1; 4241 } 4242 4243 static void 4244 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4245 { 4246 struct mlxsw_sp_port *mlxsw_sp_vport; 4247 struct mlxsw_sp_fid *f; 4248 4249 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 4250 if (WARN_ON(!mlxsw_sp_vport)) 4251 return; 4252 4253 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4254 if (f) 4255 f->leave(mlxsw_sp_vport); 4256 4257 mlxsw_sp_vport->lagged = 0; 4258 } 4259 4260 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4261 struct net_device *lag_dev) 4262 { 4263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4264 struct mlxsw_sp_upper *lag; 4265 u16 lag_id; 4266 u8 port_index; 4267 int err; 4268 4269 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4270 if (err) 4271 return err; 4272 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4273 if (!lag->ref_count) { 4274 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4275 if (err) 4276 return err; 4277 lag->dev = lag_dev; 4278 } 4279 4280 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4281 if (err) 4282 return err; 4283 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4284 if (err) 4285 goto err_col_port_add; 4286 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4287 if (err) 4288 goto err_col_port_enable; 4289 4290 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4291 mlxsw_sp_port->local_port); 4292 mlxsw_sp_port->lag_id = lag_id; 4293 mlxsw_sp_port->lagged = 1; 4294 lag->ref_count++; 4295 4296 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); 4297 4298 return 0; 4299 4300 err_col_port_enable: 4301 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4302 err_col_port_add: 4303 if (!lag->ref_count) 4304 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4305 return err; 4306 } 4307 4308 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4309 struct net_device *lag_dev) 4310 { 4311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4312 u16 lag_id = mlxsw_sp_port->lag_id; 4313 struct mlxsw_sp_upper *lag; 4314 4315 if (!mlxsw_sp_port->lagged) 4316 return; 4317 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4318 WARN_ON(lag->ref_count == 0); 4319 4320 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4321 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4322 4323 if (mlxsw_sp_port->bridged) { 4324 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 4325 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4326 } 4327 4328 if (lag->ref_count == 1) 4329 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4330 4331 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4332 mlxsw_sp_port->local_port); 4333 mlxsw_sp_port->lagged = 0; 4334 lag->ref_count--; 4335 4336 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 4337 } 4338 4339 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4340 u16 lag_id) 4341 { 4342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4343 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4344 4345 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4346 mlxsw_sp_port->local_port); 4347 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4348 } 4349 4350 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4351 u16 lag_id) 4352 { 4353 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4354 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4355 4356 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4357 mlxsw_sp_port->local_port); 4358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4359 } 4360 4361 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4362 bool lag_tx_enabled) 4363 { 4364 if (lag_tx_enabled) 4365 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4366 mlxsw_sp_port->lag_id); 4367 else 4368 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4369 mlxsw_sp_port->lag_id); 4370 } 4371 4372 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4373 struct netdev_lag_lower_state_info *info) 4374 { 4375 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4376 } 4377 4378 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 4379 struct net_device *vlan_dev) 4380 { 4381 struct mlxsw_sp_port *mlxsw_sp_vport; 4382 u16 vid = vlan_dev_vlan_id(vlan_dev); 4383 4384 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4385 if (WARN_ON(!mlxsw_sp_vport)) 4386 return -EINVAL; 4387 4388 mlxsw_sp_vport->dev = vlan_dev; 4389 4390 return 0; 4391 } 4392 4393 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 4394 struct net_device *vlan_dev) 4395 { 4396 struct mlxsw_sp_port *mlxsw_sp_vport; 4397 u16 vid = vlan_dev_vlan_id(vlan_dev); 4398 4399 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4400 if (WARN_ON(!mlxsw_sp_vport)) 4401 return; 4402 4403 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 4404 } 4405 4406 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 4407 unsigned long event, void *ptr) 4408 { 4409 struct netdev_notifier_changeupper_info *info; 4410 struct mlxsw_sp_port *mlxsw_sp_port; 4411 struct net_device *upper_dev; 4412 struct mlxsw_sp *mlxsw_sp; 4413 int err = 0; 4414 4415 mlxsw_sp_port = netdev_priv(dev); 4416 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4417 info = ptr; 4418 4419 switch (event) { 4420 case NETDEV_PRECHANGEUPPER: 4421 upper_dev = info->upper_dev; 4422 if (!is_vlan_dev(upper_dev) && 4423 !netif_is_lag_master(upper_dev) && 4424 !netif_is_bridge_master(upper_dev)) 4425 return -EINVAL; 4426 if (!info->linking) 4427 break; 4428 /* HW limitation forbids to put ports to multiple bridges. */ 4429 if (netif_is_bridge_master(upper_dev) && 4430 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4431 return -EINVAL; 4432 if (netif_is_lag_master(upper_dev) && 4433 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4434 info->upper_info)) 4435 return -EINVAL; 4436 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4437 return -EINVAL; 4438 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4439 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4440 return -EINVAL; 4441 break; 4442 case NETDEV_CHANGEUPPER: 4443 upper_dev = info->upper_dev; 4444 if (is_vlan_dev(upper_dev)) { 4445 if (info->linking) 4446 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4447 upper_dev); 4448 else 4449 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4450 upper_dev); 4451 } else if (netif_is_bridge_master(upper_dev)) { 4452 if (info->linking) 4453 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4454 upper_dev); 4455 else 4456 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4457 } else if (netif_is_lag_master(upper_dev)) { 4458 if (info->linking) 4459 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4460 upper_dev); 4461 else 4462 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4463 upper_dev); 4464 } else { 4465 err = -EINVAL; 4466 WARN_ON(1); 4467 } 4468 break; 4469 } 4470 4471 return err; 4472 } 4473 4474 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4475 unsigned long event, void *ptr) 4476 { 4477 struct netdev_notifier_changelowerstate_info *info; 4478 struct mlxsw_sp_port *mlxsw_sp_port; 4479 int err; 4480 4481 mlxsw_sp_port = netdev_priv(dev); 4482 info = ptr; 4483 4484 switch (event) { 4485 case NETDEV_CHANGELOWERSTATE: 4486 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4487 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4488 info->lower_state_info); 4489 if (err) 4490 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4491 } 4492 break; 4493 } 4494 4495 return 0; 4496 } 4497 4498 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4499 unsigned long event, void *ptr) 4500 { 4501 switch (event) { 4502 case NETDEV_PRECHANGEUPPER: 4503 case NETDEV_CHANGEUPPER: 4504 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4505 case NETDEV_CHANGELOWERSTATE: 4506 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4507 } 4508 4509 return 0; 4510 } 4511 4512 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4513 unsigned long event, void *ptr) 4514 { 4515 struct net_device *dev; 4516 struct list_head *iter; 4517 int ret; 4518 4519 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4520 if (mlxsw_sp_port_dev_check(dev)) { 4521 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4522 if (ret) 4523 return ret; 4524 } 4525 } 4526 4527 return 0; 4528 } 4529 4530 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4531 struct net_device *vlan_dev) 4532 { 4533 u16 fid = vlan_dev_vlan_id(vlan_dev); 4534 struct mlxsw_sp_fid *f; 4535 4536 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4537 if (!f) { 4538 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4539 if (IS_ERR(f)) 4540 return PTR_ERR(f); 4541 } 4542 4543 f->ref_count++; 4544 4545 return 0; 4546 } 4547 4548 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4549 struct net_device *vlan_dev) 4550 { 4551 u16 fid = vlan_dev_vlan_id(vlan_dev); 4552 struct mlxsw_sp_fid *f; 4553 4554 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4555 if (f && f->r) 4556 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4557 if (f && --f->ref_count == 0) 4558 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4559 } 4560 4561 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4562 unsigned long event, void *ptr) 4563 { 4564 struct netdev_notifier_changeupper_info *info; 4565 struct net_device *upper_dev; 4566 struct mlxsw_sp *mlxsw_sp; 4567 int err; 4568 4569 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4570 if (!mlxsw_sp) 4571 return 0; 4572 if (br_dev != mlxsw_sp->master_bridge.dev) 4573 return 0; 4574 4575 info = ptr; 4576 4577 switch (event) { 4578 case NETDEV_CHANGEUPPER: 4579 upper_dev = info->upper_dev; 4580 if (!is_vlan_dev(upper_dev)) 4581 break; 4582 if (info->linking) { 4583 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4584 upper_dev); 4585 if (err) 4586 return err; 4587 } else { 4588 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); 4589 } 4590 break; 4591 } 4592 4593 return 0; 4594 } 4595 4596 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4597 { 4598 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4599 MLXSW_SP_VFID_MAX); 4600 } 4601 4602 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4603 { 4604 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4605 4606 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4607 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4608 } 4609 4610 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4611 4612 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4613 struct net_device *br_dev) 4614 { 4615 struct device *dev = mlxsw_sp->bus_info->dev; 4616 struct mlxsw_sp_fid *f; 4617 u16 vfid, fid; 4618 int err; 4619 4620 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4621 if (vfid == MLXSW_SP_VFID_MAX) { 4622 dev_err(dev, "No available vFIDs\n"); 4623 return ERR_PTR(-ERANGE); 4624 } 4625 4626 fid = mlxsw_sp_vfid_to_fid(vfid); 4627 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4628 if (err) { 4629 dev_err(dev, "Failed to create FID=%d\n", fid); 4630 return ERR_PTR(err); 4631 } 4632 4633 f = kzalloc(sizeof(*f), GFP_KERNEL); 4634 if (!f) 4635 goto err_allocate_vfid; 4636 4637 f->leave = mlxsw_sp_vport_vfid_leave; 4638 f->fid = fid; 4639 f->dev = br_dev; 4640 4641 list_add(&f->list, &mlxsw_sp->vfids.list); 4642 set_bit(vfid, mlxsw_sp->vfids.mapped); 4643 4644 return f; 4645 4646 err_allocate_vfid: 4647 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4648 return ERR_PTR(-ENOMEM); 4649 } 4650 4651 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4652 struct mlxsw_sp_fid *f) 4653 { 4654 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4655 u16 fid = f->fid; 4656 4657 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4658 list_del(&f->list); 4659 4660 if (f->r) 4661 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); 4662 4663 kfree(f); 4664 4665 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4666 } 4667 4668 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4669 bool valid) 4670 { 4671 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4672 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4673 4674 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4675 vid); 4676 } 4677 4678 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4679 struct net_device *br_dev) 4680 { 4681 struct mlxsw_sp_fid *f; 4682 int err; 4683 4684 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4685 if (!f) { 4686 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4687 if (IS_ERR(f)) 4688 return PTR_ERR(f); 4689 } 4690 4691 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4692 if (err) 4693 goto err_vport_flood_set; 4694 4695 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4696 if (err) 4697 goto err_vport_fid_map; 4698 4699 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4700 f->ref_count++; 4701 4702 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4703 4704 return 0; 4705 4706 err_vport_fid_map: 4707 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4708 err_vport_flood_set: 4709 if (!f->ref_count) 4710 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4711 return err; 4712 } 4713 4714 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4715 { 4716 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4717 4718 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4719 4720 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4721 4722 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4723 4724 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4725 4726 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4727 if (--f->ref_count == 0) 4728 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4729 } 4730 4731 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4732 struct net_device *br_dev) 4733 { 4734 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4735 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4736 struct net_device *dev = mlxsw_sp_vport->dev; 4737 int err; 4738 4739 if (f && !WARN_ON(!f->leave)) 4740 f->leave(mlxsw_sp_vport); 4741 4742 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4743 if (err) { 4744 netdev_err(dev, "Failed to join vFID\n"); 4745 return err; 4746 } 4747 4748 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4749 if (err) { 4750 netdev_err(dev, "Failed to enable learning\n"); 4751 goto err_port_vid_learning_set; 4752 } 4753 4754 mlxsw_sp_vport->learning = 1; 4755 mlxsw_sp_vport->learning_sync = 1; 4756 mlxsw_sp_vport->uc_flood = 1; 4757 mlxsw_sp_vport->mc_flood = 1; 4758 mlxsw_sp_vport->mc_router = 0; 4759 mlxsw_sp_vport->mc_disabled = 1; 4760 mlxsw_sp_vport->bridged = 1; 4761 4762 return 0; 4763 4764 err_port_vid_learning_set: 4765 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4766 return err; 4767 } 4768 4769 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4770 { 4771 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4772 4773 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4774 4775 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4776 4777 mlxsw_sp_vport->learning = 0; 4778 mlxsw_sp_vport->learning_sync = 0; 4779 mlxsw_sp_vport->uc_flood = 0; 4780 mlxsw_sp_vport->mc_flood = 0; 4781 mlxsw_sp_vport->mc_router = 0; 4782 mlxsw_sp_vport->bridged = 0; 4783 } 4784 4785 static bool 4786 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4787 const struct net_device *br_dev) 4788 { 4789 struct mlxsw_sp_port *mlxsw_sp_vport; 4790 4791 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4792 vport.list) { 4793 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4794 4795 if (dev && dev == br_dev) 4796 return false; 4797 } 4798 4799 return true; 4800 } 4801 4802 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4803 unsigned long event, void *ptr, 4804 u16 vid) 4805 { 4806 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4807 struct netdev_notifier_changeupper_info *info = ptr; 4808 struct mlxsw_sp_port *mlxsw_sp_vport; 4809 struct net_device *upper_dev; 4810 int err = 0; 4811 4812 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4813 4814 switch (event) { 4815 case NETDEV_PRECHANGEUPPER: 4816 upper_dev = info->upper_dev; 4817 if (!netif_is_bridge_master(upper_dev)) 4818 return -EINVAL; 4819 if (!info->linking) 4820 break; 4821 /* We can't have multiple VLAN interfaces configured on 4822 * the same port and being members in the same bridge. 4823 */ 4824 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4825 upper_dev)) 4826 return -EINVAL; 4827 break; 4828 case NETDEV_CHANGEUPPER: 4829 upper_dev = info->upper_dev; 4830 if (info->linking) { 4831 if (WARN_ON(!mlxsw_sp_vport)) 4832 return -EINVAL; 4833 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4834 upper_dev); 4835 } else { 4836 if (!mlxsw_sp_vport) 4837 return 0; 4838 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4839 } 4840 } 4841 4842 return err; 4843 } 4844 4845 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4846 unsigned long event, void *ptr, 4847 u16 vid) 4848 { 4849 struct net_device *dev; 4850 struct list_head *iter; 4851 int ret; 4852 4853 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4854 if (mlxsw_sp_port_dev_check(dev)) { 4855 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4856 vid); 4857 if (ret) 4858 return ret; 4859 } 4860 } 4861 4862 return 0; 4863 } 4864 4865 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4866 unsigned long event, void *ptr) 4867 { 4868 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4869 u16 vid = vlan_dev_vlan_id(vlan_dev); 4870 4871 if (mlxsw_sp_port_dev_check(real_dev)) 4872 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4873 vid); 4874 else if (netif_is_lag_master(real_dev)) 4875 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4876 vid); 4877 4878 return 0; 4879 } 4880 4881 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4882 unsigned long event, void *ptr) 4883 { 4884 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4885 int err = 0; 4886 4887 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4888 err = mlxsw_sp_netdevice_router_port_event(dev); 4889 else if (mlxsw_sp_port_dev_check(dev)) 4890 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4891 else if (netif_is_lag_master(dev)) 4892 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4893 else if (netif_is_bridge_master(dev)) 4894 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4895 else if (is_vlan_dev(dev)) 4896 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4897 4898 return notifier_from_errno(err); 4899 } 4900 4901 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4902 .notifier_call = mlxsw_sp_netdevice_event, 4903 }; 4904 4905 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4906 .notifier_call = mlxsw_sp_inetaddr_event, 4907 .priority = 10, /* Must be called before FIB notifier block */ 4908 }; 4909 4910 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4911 .notifier_call = mlxsw_sp_router_netevent_event, 4912 }; 4913 4914 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4915 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4916 {0, }, 4917 }; 4918 4919 static struct pci_driver mlxsw_sp_pci_driver = { 4920 .name = mlxsw_sp_driver_name, 4921 .id_table = mlxsw_sp_pci_id_table, 4922 }; 4923 4924 static int __init mlxsw_sp_module_init(void) 4925 { 4926 int err; 4927 4928 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4929 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4930 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4931 4932 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4933 if (err) 4934 goto err_core_driver_register; 4935 4936 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4937 if (err) 4938 goto err_pci_driver_register; 4939 4940 return 0; 4941 4942 err_pci_driver_register: 4943 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4944 err_core_driver_register: 4945 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4946 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4947 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4948 return err; 4949 } 4950 4951 static void __exit mlxsw_sp_module_exit(void) 4952 { 4953 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4954 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4955 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4956 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4957 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4958 } 4959 4960 module_init(mlxsw_sp_module_init); 4961 module_exit(mlxsw_sp_module_exit); 4962 4963 MODULE_LICENSE("Dual BSD/GPL"); 4964 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4965 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4966 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4967