1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <net/switchdev.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 #include <net/netevent.h> 60 #include <net/tc_act/tc_sample.h> 61 62 #include "spectrum.h" 63 #include "pci.h" 64 #include "core.h" 65 #include "reg.h" 66 #include "port.h" 67 #include "trap.h" 68 #include "txheader.h" 69 #include "spectrum_cnt.h" 70 #include "spectrum_dpipe.h" 71 72 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 73 static const char mlxsw_sp_driver_version[] = "1.0"; 74 75 /* tx_hdr_version 76 * Tx header version. 77 * Must be set to 1. 78 */ 79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 80 81 /* tx_hdr_ctl 82 * Packet control type. 83 * 0 - Ethernet control (e.g. EMADs, LACP) 84 * 1 - Ethernet data 85 */ 86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 87 88 /* tx_hdr_proto 89 * Packet protocol type. Must be set to 1 (Ethernet). 90 */ 91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 92 93 /* tx_hdr_rx_is_router 94 * Packet is sent from the router. Valid for data packets only. 95 */ 96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 97 98 /* tx_hdr_fid_valid 99 * Indicates if the 'fid' field is valid and should be used for 100 * forwarding lookup. Valid for data packets only. 101 */ 102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 103 104 /* tx_hdr_swid 105 * Switch partition ID. Must be set to 0. 106 */ 107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 108 109 /* tx_hdr_control_tclass 110 * Indicates if the packet should use the control TClass and not one 111 * of the data TClasses. 112 */ 113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 114 115 /* tx_hdr_etclass 116 * Egress TClass to be used on the egress device on the egress port. 117 */ 118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 119 120 /* tx_hdr_port_mid 121 * Destination local port for unicast packets. 122 * Destination multicast ID for multicast packets. 123 * 124 * Control packets are directed to a specific egress port, while data 125 * packets are transmitted through the CPU port (0) into the switch partition, 126 * where forwarding rules are applied. 127 */ 128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 129 130 /* tx_hdr_fid 131 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 132 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 133 * Valid for data packets only. 134 */ 135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 136 137 /* tx_hdr_type 138 * 0 - Data packets 139 * 6 - Control packets 140 */ 141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 142 143 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 144 unsigned int counter_index, u64 *packets, 145 u64 *bytes) 146 { 147 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 148 int err; 149 150 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 151 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); 152 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 153 if (err) 154 return err; 155 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 156 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 157 return 0; 158 } 159 160 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 161 unsigned int counter_index) 162 { 163 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 164 165 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 166 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); 167 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 168 } 169 170 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 171 unsigned int *p_counter_index) 172 { 173 int err; 174 175 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 176 p_counter_index); 177 if (err) 178 return err; 179 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 180 if (err) 181 goto err_counter_clear; 182 return 0; 183 184 err_counter_clear: 185 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 186 *p_counter_index); 187 return err; 188 } 189 190 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 191 unsigned int counter_index) 192 { 193 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 194 counter_index); 195 } 196 197 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 198 const struct mlxsw_tx_info *tx_info) 199 { 200 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 201 202 memset(txhdr, 0, MLXSW_TXHDR_LEN); 203 204 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 205 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 206 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 207 mlxsw_tx_hdr_swid_set(txhdr, 0); 208 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 209 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 210 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 211 } 212 213 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 214 { 215 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 216 int err; 217 218 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 219 if (err) 220 return err; 221 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 222 return 0; 223 } 224 225 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 226 { 227 int i; 228 229 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 230 return -EIO; 231 232 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 233 MAX_SPAN); 234 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 235 sizeof(struct mlxsw_sp_span_entry), 236 GFP_KERNEL); 237 if (!mlxsw_sp->span.entries) 238 return -ENOMEM; 239 240 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 241 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 242 243 return 0; 244 } 245 246 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 247 { 248 int i; 249 250 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 251 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 252 253 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 254 } 255 kfree(mlxsw_sp->span.entries); 256 } 257 258 static struct mlxsw_sp_span_entry * 259 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 260 { 261 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 262 struct mlxsw_sp_span_entry *span_entry; 263 char mpat_pl[MLXSW_REG_MPAT_LEN]; 264 u8 local_port = port->local_port; 265 int index; 266 int i; 267 int err; 268 269 /* find a free entry to use */ 270 index = -1; 271 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 272 if (!mlxsw_sp->span.entries[i].used) { 273 index = i; 274 span_entry = &mlxsw_sp->span.entries[i]; 275 break; 276 } 277 } 278 if (index < 0) 279 return NULL; 280 281 /* create a new port analayzer entry for local_port */ 282 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 283 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 284 if (err) 285 return NULL; 286 287 span_entry->used = true; 288 span_entry->id = index; 289 span_entry->ref_count = 1; 290 span_entry->local_port = local_port; 291 return span_entry; 292 } 293 294 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 295 struct mlxsw_sp_span_entry *span_entry) 296 { 297 u8 local_port = span_entry->local_port; 298 char mpat_pl[MLXSW_REG_MPAT_LEN]; 299 int pa_id = span_entry->id; 300 301 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 302 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 303 span_entry->used = false; 304 } 305 306 static struct mlxsw_sp_span_entry * 307 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 308 { 309 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 310 int i; 311 312 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 313 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 314 315 if (curr->used && curr->local_port == port->local_port) 316 return curr; 317 } 318 return NULL; 319 } 320 321 static struct mlxsw_sp_span_entry 322 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 323 { 324 struct mlxsw_sp_span_entry *span_entry; 325 326 span_entry = mlxsw_sp_span_entry_find(port); 327 if (span_entry) { 328 /* Already exists, just take a reference */ 329 span_entry->ref_count++; 330 return span_entry; 331 } 332 333 return mlxsw_sp_span_entry_create(port); 334 } 335 336 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 337 struct mlxsw_sp_span_entry *span_entry) 338 { 339 WARN_ON(!span_entry->ref_count); 340 if (--span_entry->ref_count == 0) 341 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 342 return 0; 343 } 344 345 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 346 { 347 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 348 struct mlxsw_sp_span_inspected_port *p; 349 int i; 350 351 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 352 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 353 354 list_for_each_entry(p, &curr->bound_ports_list, list) 355 if (p->local_port == port->local_port && 356 p->type == MLXSW_SP_SPAN_EGRESS) 357 return true; 358 } 359 360 return false; 361 } 362 363 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 364 int mtu) 365 { 366 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 367 } 368 369 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 370 { 371 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 372 char sbib_pl[MLXSW_REG_SBIB_LEN]; 373 int err; 374 375 /* If port is egress mirrored, the shared buffer size should be 376 * updated according to the mtu value 377 */ 378 if (mlxsw_sp_span_is_egress_mirror(port)) { 379 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 380 381 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 382 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 383 if (err) { 384 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 385 return err; 386 } 387 } 388 389 return 0; 390 } 391 392 static struct mlxsw_sp_span_inspected_port * 393 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 394 struct mlxsw_sp_span_entry *span_entry) 395 { 396 struct mlxsw_sp_span_inspected_port *p; 397 398 list_for_each_entry(p, &span_entry->bound_ports_list, list) 399 if (port->local_port == p->local_port) 400 return p; 401 return NULL; 402 } 403 404 static int 405 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 406 struct mlxsw_sp_span_entry *span_entry, 407 enum mlxsw_sp_span_type type) 408 { 409 struct mlxsw_sp_span_inspected_port *inspected_port; 410 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 411 char mpar_pl[MLXSW_REG_MPAR_LEN]; 412 char sbib_pl[MLXSW_REG_SBIB_LEN]; 413 int pa_id = span_entry->id; 414 int err; 415 416 /* if it is an egress SPAN, bind a shared buffer to it */ 417 if (type == MLXSW_SP_SPAN_EGRESS) { 418 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 419 port->dev->mtu); 420 421 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 422 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 423 if (err) { 424 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 425 return err; 426 } 427 } 428 429 /* bind the port to the SPAN entry */ 430 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 431 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 432 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 433 if (err) 434 goto err_mpar_reg_write; 435 436 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 437 if (!inspected_port) { 438 err = -ENOMEM; 439 goto err_inspected_port_alloc; 440 } 441 inspected_port->local_port = port->local_port; 442 inspected_port->type = type; 443 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 444 445 return 0; 446 447 err_mpar_reg_write: 448 err_inspected_port_alloc: 449 if (type == MLXSW_SP_SPAN_EGRESS) { 450 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 451 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 452 } 453 return err; 454 } 455 456 static void 457 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 458 struct mlxsw_sp_span_entry *span_entry, 459 enum mlxsw_sp_span_type type) 460 { 461 struct mlxsw_sp_span_inspected_port *inspected_port; 462 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 463 char mpar_pl[MLXSW_REG_MPAR_LEN]; 464 char sbib_pl[MLXSW_REG_SBIB_LEN]; 465 int pa_id = span_entry->id; 466 467 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 468 if (!inspected_port) 469 return; 470 471 /* remove the inspected port */ 472 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 473 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 475 476 /* remove the SBIB buffer if it was egress SPAN */ 477 if (type == MLXSW_SP_SPAN_EGRESS) { 478 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 479 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 480 } 481 482 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 483 484 list_del(&inspected_port->list); 485 kfree(inspected_port); 486 } 487 488 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 489 struct mlxsw_sp_port *to, 490 enum mlxsw_sp_span_type type) 491 { 492 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 493 struct mlxsw_sp_span_entry *span_entry; 494 int err; 495 496 span_entry = mlxsw_sp_span_entry_get(to); 497 if (!span_entry) 498 return -ENOENT; 499 500 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 501 span_entry->id); 502 503 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 504 if (err) 505 goto err_port_bind; 506 507 return 0; 508 509 err_port_bind: 510 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 511 return err; 512 } 513 514 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 515 struct mlxsw_sp_port *to, 516 enum mlxsw_sp_span_type type) 517 { 518 struct mlxsw_sp_span_entry *span_entry; 519 520 span_entry = mlxsw_sp_span_entry_find(to); 521 if (!span_entry) { 522 netdev_err(from->dev, "no span entry found\n"); 523 return; 524 } 525 526 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 527 span_entry->id); 528 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 529 } 530 531 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 532 bool enable, u32 rate) 533 { 534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 535 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 536 537 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 539 } 540 541 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 542 bool is_up) 543 { 544 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 545 char paos_pl[MLXSW_REG_PAOS_LEN]; 546 547 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 548 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 549 MLXSW_PORT_ADMIN_STATUS_DOWN); 550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 551 } 552 553 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 554 unsigned char *addr) 555 { 556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 557 char ppad_pl[MLXSW_REG_PPAD_LEN]; 558 559 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 560 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 562 } 563 564 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 565 { 566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 567 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 568 569 ether_addr_copy(addr, mlxsw_sp->base_mac); 570 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 571 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 572 } 573 574 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 575 { 576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 577 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 578 int max_mtu; 579 int err; 580 581 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 582 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 583 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 584 if (err) 585 return err; 586 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 587 588 if (mtu > max_mtu) 589 return -EINVAL; 590 591 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 593 } 594 595 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 596 u8 swid) 597 { 598 char pspa_pl[MLXSW_REG_PSPA_LEN]; 599 600 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 601 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 602 } 603 604 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 605 { 606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 607 608 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 609 swid); 610 } 611 612 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 613 bool enable) 614 { 615 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 616 char svpe_pl[MLXSW_REG_SVPE_LEN]; 617 618 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 620 } 621 622 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 623 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 624 u16 vid) 625 { 626 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 627 char svfa_pl[MLXSW_REG_SVFA_LEN]; 628 629 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 630 fid, vid); 631 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 632 } 633 634 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 635 u16 vid_begin, u16 vid_end, 636 bool learn_enable) 637 { 638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 639 char *spvmlr_pl; 640 int err; 641 642 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 643 if (!spvmlr_pl) 644 return -ENOMEM; 645 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, 646 vid_end, learn_enable); 647 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 648 kfree(spvmlr_pl); 649 return err; 650 } 651 652 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 653 u16 vid, bool learn_enable) 654 { 655 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, 656 learn_enable); 657 } 658 659 static int 660 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 661 { 662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 663 char sspr_pl[MLXSW_REG_SSPR_LEN]; 664 665 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 667 } 668 669 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 670 u8 local_port, u8 *p_module, 671 u8 *p_width, u8 *p_lane) 672 { 673 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 674 int err; 675 676 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 677 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 678 if (err) 679 return err; 680 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 681 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 682 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 683 return 0; 684 } 685 686 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 687 u8 module, u8 width, u8 lane) 688 { 689 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 690 int i; 691 692 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 693 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 694 for (i = 0; i < width; i++) { 695 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 696 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 697 } 698 699 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 700 } 701 702 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 703 { 704 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 705 706 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 707 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 709 } 710 711 static int mlxsw_sp_port_open(struct net_device *dev) 712 { 713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 714 int err; 715 716 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 717 if (err) 718 return err; 719 netif_start_queue(dev); 720 return 0; 721 } 722 723 static int mlxsw_sp_port_stop(struct net_device *dev) 724 { 725 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 726 727 netif_stop_queue(dev); 728 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 729 } 730 731 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 732 struct net_device *dev) 733 { 734 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 735 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 736 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 737 const struct mlxsw_tx_info tx_info = { 738 .local_port = mlxsw_sp_port->local_port, 739 .is_emad = false, 740 }; 741 u64 len; 742 int err; 743 744 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 745 return NETDEV_TX_BUSY; 746 747 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 748 struct sk_buff *skb_orig = skb; 749 750 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 751 if (!skb) { 752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 753 dev_kfree_skb_any(skb_orig); 754 return NETDEV_TX_OK; 755 } 756 dev_consume_skb_any(skb_orig); 757 } 758 759 if (eth_skb_pad(skb)) { 760 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 761 return NETDEV_TX_OK; 762 } 763 764 mlxsw_sp_txhdr_construct(skb, &tx_info); 765 /* TX header is consumed by HW on the way so we shouldn't count its 766 * bytes as being sent. 767 */ 768 len = skb->len - MLXSW_TXHDR_LEN; 769 770 /* Due to a race we might fail here because of a full queue. In that 771 * unlikely case we simply drop the packet. 772 */ 773 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 774 775 if (!err) { 776 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 777 u64_stats_update_begin(&pcpu_stats->syncp); 778 pcpu_stats->tx_packets++; 779 pcpu_stats->tx_bytes += len; 780 u64_stats_update_end(&pcpu_stats->syncp); 781 } else { 782 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 783 dev_kfree_skb_any(skb); 784 } 785 return NETDEV_TX_OK; 786 } 787 788 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 789 { 790 } 791 792 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 793 { 794 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 795 struct sockaddr *addr = p; 796 int err; 797 798 if (!is_valid_ether_addr(addr->sa_data)) 799 return -EADDRNOTAVAIL; 800 801 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 802 if (err) 803 return err; 804 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 805 return 0; 806 } 807 808 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 809 int mtu) 810 { 811 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 812 } 813 814 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 815 816 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 817 u16 delay) 818 { 819 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 820 BITS_PER_BYTE)); 821 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 822 mtu); 823 } 824 825 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 826 * Assumes 100m cable and maximum MTU. 827 */ 828 #define MLXSW_SP_PAUSE_DELAY 58752 829 830 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 831 u16 delay, bool pfc, bool pause) 832 { 833 if (pfc) 834 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 835 else if (pause) 836 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 837 else 838 return 0; 839 } 840 841 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 842 bool lossy) 843 { 844 if (lossy) 845 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 846 else 847 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 848 thres); 849 } 850 851 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 852 u8 *prio_tc, bool pause_en, 853 struct ieee_pfc *my_pfc) 854 { 855 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 856 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 857 u16 delay = !!my_pfc ? my_pfc->delay : 0; 858 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 859 int i, j, err; 860 861 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 862 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 863 if (err) 864 return err; 865 866 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 867 bool configure = false; 868 bool pfc = false; 869 bool lossy; 870 u16 thres; 871 872 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 873 if (prio_tc[j] == i) { 874 pfc = pfc_en & BIT(j); 875 configure = true; 876 break; 877 } 878 } 879 880 if (!configure) 881 continue; 882 883 lossy = !(pfc || pause_en); 884 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 885 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 886 pause_en); 887 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 888 } 889 890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 891 } 892 893 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 894 int mtu, bool pause_en) 895 { 896 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 897 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 898 struct ieee_pfc *my_pfc; 899 u8 *prio_tc; 900 901 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 902 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 903 904 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 905 pause_en, my_pfc); 906 } 907 908 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 909 { 910 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 911 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 912 int err; 913 914 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 915 if (err) 916 return err; 917 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 918 if (err) 919 goto err_span_port_mtu_update; 920 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 921 if (err) 922 goto err_port_mtu_set; 923 dev->mtu = mtu; 924 return 0; 925 926 err_port_mtu_set: 927 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 928 err_span_port_mtu_update: 929 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 930 return err; 931 } 932 933 static int 934 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 935 struct rtnl_link_stats64 *stats) 936 { 937 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 938 struct mlxsw_sp_port_pcpu_stats *p; 939 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 940 u32 tx_dropped = 0; 941 unsigned int start; 942 int i; 943 944 for_each_possible_cpu(i) { 945 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 946 do { 947 start = u64_stats_fetch_begin_irq(&p->syncp); 948 rx_packets = p->rx_packets; 949 rx_bytes = p->rx_bytes; 950 tx_packets = p->tx_packets; 951 tx_bytes = p->tx_bytes; 952 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 953 954 stats->rx_packets += rx_packets; 955 stats->rx_bytes += rx_bytes; 956 stats->tx_packets += tx_packets; 957 stats->tx_bytes += tx_bytes; 958 /* tx_dropped is u32, updated without syncp protection. */ 959 tx_dropped += p->tx_dropped; 960 } 961 stats->tx_dropped = tx_dropped; 962 return 0; 963 } 964 965 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 966 { 967 switch (attr_id) { 968 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 969 return true; 970 } 971 972 return false; 973 } 974 975 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 976 void *sp) 977 { 978 switch (attr_id) { 979 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 980 return mlxsw_sp_port_get_sw_stats64(dev, sp); 981 } 982 983 return -EINVAL; 984 } 985 986 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 987 int prio, char *ppcnt_pl) 988 { 989 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 991 992 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 993 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 994 } 995 996 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 997 struct rtnl_link_stats64 *stats) 998 { 999 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1000 int err; 1001 1002 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1003 0, ppcnt_pl); 1004 if (err) 1005 goto out; 1006 1007 stats->tx_packets = 1008 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1009 stats->rx_packets = 1010 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1011 stats->tx_bytes = 1012 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1013 stats->rx_bytes = 1014 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1015 stats->multicast = 1016 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1017 1018 stats->rx_crc_errors = 1019 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1020 stats->rx_frame_errors = 1021 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1022 1023 stats->rx_length_errors = ( 1024 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1025 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1026 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1027 1028 stats->rx_errors = (stats->rx_crc_errors + 1029 stats->rx_frame_errors + stats->rx_length_errors); 1030 1031 out: 1032 return err; 1033 } 1034 1035 static void update_stats_cache(struct work_struct *work) 1036 { 1037 struct mlxsw_sp_port *mlxsw_sp_port = 1038 container_of(work, struct mlxsw_sp_port, 1039 hw_stats.update_dw.work); 1040 1041 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1042 goto out; 1043 1044 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1045 mlxsw_sp_port->hw_stats.cache); 1046 1047 out: 1048 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 1049 MLXSW_HW_STATS_UPDATE_TIME); 1050 } 1051 1052 /* Return the stats from a cache that is updated periodically, 1053 * as this function might get called in an atomic context. 1054 */ 1055 static void 1056 mlxsw_sp_port_get_stats64(struct net_device *dev, 1057 struct rtnl_link_stats64 *stats) 1058 { 1059 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1060 1061 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 1062 } 1063 1064 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1065 u16 vid_begin, u16 vid_end, 1066 bool is_member, bool untagged) 1067 { 1068 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1069 char *spvm_pl; 1070 int err; 1071 1072 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1073 if (!spvm_pl) 1074 return -ENOMEM; 1075 1076 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1077 vid_end, is_member, untagged); 1078 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1079 kfree(spvm_pl); 1080 return err; 1081 } 1082 1083 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1084 u16 vid_end, bool is_member, bool untagged) 1085 { 1086 u16 vid, vid_e; 1087 int err; 1088 1089 for (vid = vid_begin; vid <= vid_end; 1090 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1091 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1092 vid_end); 1093 1094 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1095 is_member, untagged); 1096 if (err) 1097 return err; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 1104 { 1105 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 1106 u16 vid, last_visited_vid; 1107 int err; 1108 1109 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1110 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 1111 vid); 1112 if (err) { 1113 last_visited_vid = vid; 1114 goto err_port_vid_to_fid_set; 1115 } 1116 } 1117 1118 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 1119 if (err) { 1120 last_visited_vid = VLAN_N_VID; 1121 goto err_port_vid_to_fid_set; 1122 } 1123 1124 return 0; 1125 1126 err_port_vid_to_fid_set: 1127 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 1128 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 1129 vid); 1130 return err; 1131 } 1132 1133 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 1134 { 1135 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 1136 u16 vid; 1137 int err; 1138 1139 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 1140 if (err) 1141 return err; 1142 1143 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 1144 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 1145 vid, vid); 1146 if (err) 1147 return err; 1148 } 1149 1150 return 0; 1151 } 1152 1153 static struct mlxsw_sp_port * 1154 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1155 { 1156 struct mlxsw_sp_port *mlxsw_sp_vport; 1157 1158 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 1159 if (!mlxsw_sp_vport) 1160 return NULL; 1161 1162 /* dev will be set correctly after the VLAN device is linked 1163 * with the real device. In case of bridge SELF invocation, dev 1164 * will remain as is. 1165 */ 1166 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 1167 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1168 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 1169 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 1170 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 1171 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 1172 mlxsw_sp_vport->vport.vid = vid; 1173 1174 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 1175 1176 return mlxsw_sp_vport; 1177 } 1178 1179 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 1180 { 1181 list_del(&mlxsw_sp_vport->vport.list); 1182 kfree(mlxsw_sp_vport); 1183 } 1184 1185 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1186 __be16 __always_unused proto, u16 vid) 1187 { 1188 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1189 struct mlxsw_sp_port *mlxsw_sp_vport; 1190 bool untagged = vid == 1; 1191 int err; 1192 1193 /* VLAN 0 is added to HW filter when device goes up, but it is 1194 * reserved in our case, so simply return. 1195 */ 1196 if (!vid) 1197 return 0; 1198 1199 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) 1200 return 0; 1201 1202 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 1203 if (!mlxsw_sp_vport) 1204 return -ENOMEM; 1205 1206 /* When adding the first VLAN interface on a bridged port we need to 1207 * transition all the active 802.1Q bridge VLANs to use explicit 1208 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 1209 */ 1210 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1211 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 1212 if (err) 1213 goto err_port_vp_mode_trans; 1214 } 1215 1216 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 1217 if (err) 1218 goto err_port_add_vid; 1219 1220 return 0; 1221 1222 err_port_add_vid: 1223 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1224 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1225 err_port_vp_mode_trans: 1226 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1227 return err; 1228 } 1229 1230 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1231 __be16 __always_unused proto, u16 vid) 1232 { 1233 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1234 struct mlxsw_sp_port *mlxsw_sp_vport; 1235 struct mlxsw_sp_fid *f; 1236 1237 /* VLAN 0 is removed from HW filter when device goes down, but 1238 * it is reserved in our case, so simply return. 1239 */ 1240 if (!vid) 1241 return 0; 1242 1243 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1244 if (WARN_ON(!mlxsw_sp_vport)) 1245 return 0; 1246 1247 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1248 1249 /* Drop FID reference. If this was the last reference the 1250 * resources will be freed. 1251 */ 1252 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 1253 if (f && !WARN_ON(!f->leave)) 1254 f->leave(mlxsw_sp_vport); 1255 1256 /* When removing the last VLAN interface on a bridged port we need to 1257 * transition all active 802.1Q bridge VLANs to use VID to FID 1258 * mappings and set port's mode to VLAN mode. 1259 */ 1260 if (list_is_singular(&mlxsw_sp_port->vports_list)) 1261 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1262 1263 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1264 1265 return 0; 1266 } 1267 1268 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1269 size_t len) 1270 { 1271 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1272 u8 module = mlxsw_sp_port->mapping.module; 1273 u8 width = mlxsw_sp_port->mapping.width; 1274 u8 lane = mlxsw_sp_port->mapping.lane; 1275 int err; 1276 1277 if (!mlxsw_sp_port->split) 1278 err = snprintf(name, len, "p%d", module + 1); 1279 else 1280 err = snprintf(name, len, "p%ds%d", module + 1, 1281 lane / width); 1282 1283 if (err >= len) 1284 return -EINVAL; 1285 1286 return 0; 1287 } 1288 1289 static struct mlxsw_sp_port_mall_tc_entry * 1290 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1291 unsigned long cookie) { 1292 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1293 1294 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1295 if (mall_tc_entry->cookie == cookie) 1296 return mall_tc_entry; 1297 1298 return NULL; 1299 } 1300 1301 static int 1302 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1303 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1304 const struct tc_action *a, 1305 bool ingress) 1306 { 1307 struct net *net = dev_net(mlxsw_sp_port->dev); 1308 enum mlxsw_sp_span_type span_type; 1309 struct mlxsw_sp_port *to_port; 1310 struct net_device *to_dev; 1311 int ifindex; 1312 1313 ifindex = tcf_mirred_ifindex(a); 1314 to_dev = __dev_get_by_index(net, ifindex); 1315 if (!to_dev) { 1316 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1317 return -EINVAL; 1318 } 1319 1320 if (!mlxsw_sp_port_dev_check(to_dev)) { 1321 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1322 return -EOPNOTSUPP; 1323 } 1324 to_port = netdev_priv(to_dev); 1325 1326 mirror->to_local_port = to_port->local_port; 1327 mirror->ingress = ingress; 1328 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1329 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1330 } 1331 1332 static void 1333 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1334 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1335 { 1336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1337 enum mlxsw_sp_span_type span_type; 1338 struct mlxsw_sp_port *to_port; 1339 1340 to_port = mlxsw_sp->ports[mirror->to_local_port]; 1341 span_type = mirror->ingress ? 1342 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1343 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1344 } 1345 1346 static int 1347 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1348 struct tc_cls_matchall_offload *cls, 1349 const struct tc_action *a, 1350 bool ingress) 1351 { 1352 int err; 1353 1354 if (!mlxsw_sp_port->sample) 1355 return -EOPNOTSUPP; 1356 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1357 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1358 return -EEXIST; 1359 } 1360 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1361 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1362 return -EOPNOTSUPP; 1363 } 1364 1365 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1366 tcf_sample_psample_group(a)); 1367 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1368 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1369 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1370 1371 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1372 if (err) 1373 goto err_port_sample_set; 1374 return 0; 1375 1376 err_port_sample_set: 1377 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1378 return err; 1379 } 1380 1381 static void 1382 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1383 { 1384 if (!mlxsw_sp_port->sample) 1385 return; 1386 1387 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1388 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1389 } 1390 1391 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1392 __be16 protocol, 1393 struct tc_cls_matchall_offload *cls, 1394 bool ingress) 1395 { 1396 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1397 const struct tc_action *a; 1398 LIST_HEAD(actions); 1399 int err; 1400 1401 if (!tc_single_action(cls->exts)) { 1402 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1403 return -EOPNOTSUPP; 1404 } 1405 1406 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1407 if (!mall_tc_entry) 1408 return -ENOMEM; 1409 mall_tc_entry->cookie = cls->cookie; 1410 1411 tcf_exts_to_list(cls->exts, &actions); 1412 a = list_first_entry(&actions, struct tc_action, list); 1413 1414 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1415 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1416 1417 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1418 mirror = &mall_tc_entry->mirror; 1419 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1420 mirror, a, ingress); 1421 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1422 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1423 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, 1424 a, ingress); 1425 } else { 1426 err = -EOPNOTSUPP; 1427 } 1428 1429 if (err) 1430 goto err_add_action; 1431 1432 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1433 return 0; 1434 1435 err_add_action: 1436 kfree(mall_tc_entry); 1437 return err; 1438 } 1439 1440 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1441 struct tc_cls_matchall_offload *cls) 1442 { 1443 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1444 1445 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1446 cls->cookie); 1447 if (!mall_tc_entry) { 1448 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1449 return; 1450 } 1451 list_del(&mall_tc_entry->list); 1452 1453 switch (mall_tc_entry->type) { 1454 case MLXSW_SP_PORT_MALL_MIRROR: 1455 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1456 &mall_tc_entry->mirror); 1457 break; 1458 case MLXSW_SP_PORT_MALL_SAMPLE: 1459 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1460 break; 1461 default: 1462 WARN_ON(1); 1463 } 1464 1465 kfree(mall_tc_entry); 1466 } 1467 1468 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1469 __be16 proto, struct tc_to_netdev *tc) 1470 { 1471 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1472 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1473 1474 switch (tc->type) { 1475 case TC_SETUP_MATCHALL: 1476 switch (tc->cls_mall->command) { 1477 case TC_CLSMATCHALL_REPLACE: 1478 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1479 proto, 1480 tc->cls_mall, 1481 ingress); 1482 case TC_CLSMATCHALL_DESTROY: 1483 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1484 tc->cls_mall); 1485 return 0; 1486 default: 1487 return -EOPNOTSUPP; 1488 } 1489 case TC_SETUP_CLSFLOWER: 1490 switch (tc->cls_flower->command) { 1491 case TC_CLSFLOWER_REPLACE: 1492 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, 1493 proto, tc->cls_flower); 1494 case TC_CLSFLOWER_DESTROY: 1495 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, 1496 tc->cls_flower); 1497 return 0; 1498 case TC_CLSFLOWER_STATS: 1499 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, 1500 tc->cls_flower); 1501 default: 1502 return -EOPNOTSUPP; 1503 } 1504 } 1505 1506 return -EOPNOTSUPP; 1507 } 1508 1509 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1510 .ndo_open = mlxsw_sp_port_open, 1511 .ndo_stop = mlxsw_sp_port_stop, 1512 .ndo_start_xmit = mlxsw_sp_port_xmit, 1513 .ndo_setup_tc = mlxsw_sp_setup_tc, 1514 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1515 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1516 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1517 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1518 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1519 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1520 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1521 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1522 .ndo_fdb_add = switchdev_port_fdb_add, 1523 .ndo_fdb_del = switchdev_port_fdb_del, 1524 .ndo_fdb_dump = switchdev_port_fdb_dump, 1525 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 1526 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 1527 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 1528 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1529 }; 1530 1531 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1532 struct ethtool_drvinfo *drvinfo) 1533 { 1534 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1535 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1536 1537 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1538 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1539 sizeof(drvinfo->version)); 1540 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1541 "%d.%d.%d", 1542 mlxsw_sp->bus_info->fw_rev.major, 1543 mlxsw_sp->bus_info->fw_rev.minor, 1544 mlxsw_sp->bus_info->fw_rev.subminor); 1545 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1546 sizeof(drvinfo->bus_info)); 1547 } 1548 1549 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1550 struct ethtool_pauseparam *pause) 1551 { 1552 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1553 1554 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1555 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1556 } 1557 1558 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1559 struct ethtool_pauseparam *pause) 1560 { 1561 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1562 1563 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1564 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1565 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1566 1567 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1568 pfcc_pl); 1569 } 1570 1571 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1572 struct ethtool_pauseparam *pause) 1573 { 1574 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1575 bool pause_en = pause->tx_pause || pause->rx_pause; 1576 int err; 1577 1578 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1579 netdev_err(dev, "PFC already enabled on port\n"); 1580 return -EINVAL; 1581 } 1582 1583 if (pause->autoneg) { 1584 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1585 return -EINVAL; 1586 } 1587 1588 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1589 if (err) { 1590 netdev_err(dev, "Failed to configure port's headroom\n"); 1591 return err; 1592 } 1593 1594 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1595 if (err) { 1596 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1597 goto err_port_pause_configure; 1598 } 1599 1600 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1601 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1602 1603 return 0; 1604 1605 err_port_pause_configure: 1606 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1607 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1608 return err; 1609 } 1610 1611 struct mlxsw_sp_port_hw_stats { 1612 char str[ETH_GSTRING_LEN]; 1613 u64 (*getter)(const char *payload); 1614 bool cells_bytes; 1615 }; 1616 1617 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1618 { 1619 .str = "a_frames_transmitted_ok", 1620 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1621 }, 1622 { 1623 .str = "a_frames_received_ok", 1624 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1625 }, 1626 { 1627 .str = "a_frame_check_sequence_errors", 1628 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1629 }, 1630 { 1631 .str = "a_alignment_errors", 1632 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1633 }, 1634 { 1635 .str = "a_octets_transmitted_ok", 1636 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1637 }, 1638 { 1639 .str = "a_octets_received_ok", 1640 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1641 }, 1642 { 1643 .str = "a_multicast_frames_xmitted_ok", 1644 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1645 }, 1646 { 1647 .str = "a_broadcast_frames_xmitted_ok", 1648 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1649 }, 1650 { 1651 .str = "a_multicast_frames_received_ok", 1652 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1653 }, 1654 { 1655 .str = "a_broadcast_frames_received_ok", 1656 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1657 }, 1658 { 1659 .str = "a_in_range_length_errors", 1660 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1661 }, 1662 { 1663 .str = "a_out_of_range_length_field", 1664 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1665 }, 1666 { 1667 .str = "a_frame_too_long_errors", 1668 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1669 }, 1670 { 1671 .str = "a_symbol_error_during_carrier", 1672 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1673 }, 1674 { 1675 .str = "a_mac_control_frames_transmitted", 1676 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1677 }, 1678 { 1679 .str = "a_mac_control_frames_received", 1680 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1681 }, 1682 { 1683 .str = "a_unsupported_opcodes_received", 1684 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1685 }, 1686 { 1687 .str = "a_pause_mac_ctrl_frames_received", 1688 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1689 }, 1690 { 1691 .str = "a_pause_mac_ctrl_frames_xmitted", 1692 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1693 }, 1694 }; 1695 1696 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1697 1698 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1699 { 1700 .str = "rx_octets_prio", 1701 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1702 }, 1703 { 1704 .str = "rx_frames_prio", 1705 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1706 }, 1707 { 1708 .str = "tx_octets_prio", 1709 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1710 }, 1711 { 1712 .str = "tx_frames_prio", 1713 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1714 }, 1715 { 1716 .str = "rx_pause_prio", 1717 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1718 }, 1719 { 1720 .str = "rx_pause_duration_prio", 1721 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1722 }, 1723 { 1724 .str = "tx_pause_prio", 1725 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1726 }, 1727 { 1728 .str = "tx_pause_duration_prio", 1729 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1730 }, 1731 }; 1732 1733 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1734 1735 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1736 { 1737 .str = "tc_transmit_queue_tc", 1738 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1739 .cells_bytes = true, 1740 }, 1741 { 1742 .str = "tc_no_buffer_discard_uc_tc", 1743 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1744 }, 1745 }; 1746 1747 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1748 1749 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1750 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1751 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1752 IEEE_8021QAZ_MAX_TCS) 1753 1754 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1755 { 1756 int i; 1757 1758 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1759 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1760 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1761 *p += ETH_GSTRING_LEN; 1762 } 1763 } 1764 1765 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1766 { 1767 int i; 1768 1769 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1770 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1771 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1772 *p += ETH_GSTRING_LEN; 1773 } 1774 } 1775 1776 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1777 u32 stringset, u8 *data) 1778 { 1779 u8 *p = data; 1780 int i; 1781 1782 switch (stringset) { 1783 case ETH_SS_STATS: 1784 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1785 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1786 ETH_GSTRING_LEN); 1787 p += ETH_GSTRING_LEN; 1788 } 1789 1790 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1791 mlxsw_sp_port_get_prio_strings(&p, i); 1792 1793 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1794 mlxsw_sp_port_get_tc_strings(&p, i); 1795 1796 break; 1797 } 1798 } 1799 1800 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1801 enum ethtool_phys_id_state state) 1802 { 1803 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1804 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1805 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1806 bool active; 1807 1808 switch (state) { 1809 case ETHTOOL_ID_ACTIVE: 1810 active = true; 1811 break; 1812 case ETHTOOL_ID_INACTIVE: 1813 active = false; 1814 break; 1815 default: 1816 return -EOPNOTSUPP; 1817 } 1818 1819 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1820 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1821 } 1822 1823 static int 1824 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 1825 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 1826 { 1827 switch (grp) { 1828 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 1829 *p_hw_stats = mlxsw_sp_port_hw_stats; 1830 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 1831 break; 1832 case MLXSW_REG_PPCNT_PRIO_CNT: 1833 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 1834 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1835 break; 1836 case MLXSW_REG_PPCNT_TC_CNT: 1837 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 1838 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 1839 break; 1840 default: 1841 WARN_ON(1); 1842 return -EOPNOTSUPP; 1843 } 1844 return 0; 1845 } 1846 1847 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 1848 enum mlxsw_reg_ppcnt_grp grp, int prio, 1849 u64 *data, int data_index) 1850 { 1851 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1853 struct mlxsw_sp_port_hw_stats *hw_stats; 1854 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1855 int i, len; 1856 int err; 1857 1858 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 1859 if (err) 1860 return; 1861 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 1862 for (i = 0; i < len; i++) { 1863 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 1864 if (!hw_stats[i].cells_bytes) 1865 continue; 1866 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 1867 data[data_index + i]); 1868 } 1869 } 1870 1871 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1872 struct ethtool_stats *stats, u64 *data) 1873 { 1874 int i, data_index = 0; 1875 1876 /* IEEE 802.3 Counters */ 1877 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 1878 data, data_index); 1879 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 1880 1881 /* Per-Priority Counters */ 1882 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1883 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 1884 data, data_index); 1885 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 1886 } 1887 1888 /* Per-TC Counters */ 1889 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1890 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 1891 data, data_index); 1892 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 1893 } 1894 } 1895 1896 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1897 { 1898 switch (sset) { 1899 case ETH_SS_STATS: 1900 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 1901 default: 1902 return -EOPNOTSUPP; 1903 } 1904 } 1905 1906 struct mlxsw_sp_port_link_mode { 1907 enum ethtool_link_mode_bit_indices mask_ethtool; 1908 u32 mask; 1909 u32 speed; 1910 }; 1911 1912 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1913 { 1914 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1915 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1916 .speed = SPEED_100, 1917 }, 1918 { 1919 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1920 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1921 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1922 .speed = SPEED_1000, 1923 }, 1924 { 1925 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1926 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1927 .speed = SPEED_10000, 1928 }, 1929 { 1930 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1931 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1932 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 1933 .speed = SPEED_10000, 1934 }, 1935 { 1936 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1937 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1938 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1939 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1940 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1941 .speed = SPEED_10000, 1942 }, 1943 { 1944 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1945 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 1946 .speed = SPEED_20000, 1947 }, 1948 { 1949 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1950 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1951 .speed = SPEED_40000, 1952 }, 1953 { 1954 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1955 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1956 .speed = SPEED_40000, 1957 }, 1958 { 1959 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1960 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1961 .speed = SPEED_40000, 1962 }, 1963 { 1964 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1965 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1966 .speed = SPEED_40000, 1967 }, 1968 { 1969 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 1970 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1971 .speed = SPEED_25000, 1972 }, 1973 { 1974 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 1975 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1976 .speed = SPEED_25000, 1977 }, 1978 { 1979 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1980 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1981 .speed = SPEED_25000, 1982 }, 1983 { 1984 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1985 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1986 .speed = SPEED_25000, 1987 }, 1988 { 1989 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 1990 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 1991 .speed = SPEED_50000, 1992 }, 1993 { 1994 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1995 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 1996 .speed = SPEED_50000, 1997 }, 1998 { 1999 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2000 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2001 .speed = SPEED_50000, 2002 }, 2003 { 2004 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2005 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2006 .speed = SPEED_56000, 2007 }, 2008 { 2009 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2010 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2011 .speed = SPEED_56000, 2012 }, 2013 { 2014 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2015 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2016 .speed = SPEED_56000, 2017 }, 2018 { 2019 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2020 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2021 .speed = SPEED_56000, 2022 }, 2023 { 2024 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2025 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2026 .speed = SPEED_100000, 2027 }, 2028 { 2029 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2030 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2031 .speed = SPEED_100000, 2032 }, 2033 { 2034 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2035 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2036 .speed = SPEED_100000, 2037 }, 2038 { 2039 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2040 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2041 .speed = SPEED_100000, 2042 }, 2043 }; 2044 2045 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2046 2047 static void 2048 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2049 struct ethtool_link_ksettings *cmd) 2050 { 2051 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2052 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2053 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2054 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2055 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2056 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2057 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2058 2059 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2060 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2061 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2062 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2063 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2064 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2065 } 2066 2067 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2068 { 2069 int i; 2070 2071 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2072 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2073 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2074 mode); 2075 } 2076 } 2077 2078 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2079 struct ethtool_link_ksettings *cmd) 2080 { 2081 u32 speed = SPEED_UNKNOWN; 2082 u8 duplex = DUPLEX_UNKNOWN; 2083 int i; 2084 2085 if (!carrier_ok) 2086 goto out; 2087 2088 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2089 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2090 speed = mlxsw_sp_port_link_mode[i].speed; 2091 duplex = DUPLEX_FULL; 2092 break; 2093 } 2094 } 2095 out: 2096 cmd->base.speed = speed; 2097 cmd->base.duplex = duplex; 2098 } 2099 2100 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2101 { 2102 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2103 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2104 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2105 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2106 return PORT_FIBRE; 2107 2108 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2109 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2110 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2111 return PORT_DA; 2112 2113 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2114 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2115 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2116 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2117 return PORT_NONE; 2118 2119 return PORT_OTHER; 2120 } 2121 2122 static u32 2123 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2124 { 2125 u32 ptys_proto = 0; 2126 int i; 2127 2128 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2129 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2130 cmd->link_modes.advertising)) 2131 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2132 } 2133 return ptys_proto; 2134 } 2135 2136 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2137 { 2138 u32 ptys_proto = 0; 2139 int i; 2140 2141 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2142 if (speed == mlxsw_sp_port_link_mode[i].speed) 2143 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2144 } 2145 return ptys_proto; 2146 } 2147 2148 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2149 { 2150 u32 ptys_proto = 0; 2151 int i; 2152 2153 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2154 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2155 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2156 } 2157 return ptys_proto; 2158 } 2159 2160 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2161 struct ethtool_link_ksettings *cmd) 2162 { 2163 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2164 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2165 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2166 2167 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2168 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2169 } 2170 2171 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2172 struct ethtool_link_ksettings *cmd) 2173 { 2174 if (!autoneg) 2175 return; 2176 2177 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2178 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2179 } 2180 2181 static void 2182 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2183 struct ethtool_link_ksettings *cmd) 2184 { 2185 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2186 return; 2187 2188 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2189 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2190 } 2191 2192 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2193 struct ethtool_link_ksettings *cmd) 2194 { 2195 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2196 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2198 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2199 u8 autoneg_status; 2200 bool autoneg; 2201 int err; 2202 2203 autoneg = mlxsw_sp_port->link.autoneg; 2204 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2205 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2206 if (err) 2207 return err; 2208 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2209 ð_proto_oper); 2210 2211 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2212 2213 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2214 2215 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2216 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2217 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2218 2219 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2220 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2221 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2222 cmd); 2223 2224 return 0; 2225 } 2226 2227 static int 2228 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2229 const struct ethtool_link_ksettings *cmd) 2230 { 2231 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2233 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2234 u32 eth_proto_cap, eth_proto_new; 2235 bool autoneg; 2236 int err; 2237 2238 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2239 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2240 if (err) 2241 return err; 2242 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2243 2244 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2245 eth_proto_new = autoneg ? 2246 mlxsw_sp_to_ptys_advert_link(cmd) : 2247 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2248 2249 eth_proto_new = eth_proto_new & eth_proto_cap; 2250 if (!eth_proto_new) { 2251 netdev_err(dev, "No supported speed requested\n"); 2252 return -EINVAL; 2253 } 2254 2255 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2256 eth_proto_new); 2257 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2258 if (err) 2259 return err; 2260 2261 if (!netif_running(dev)) 2262 return 0; 2263 2264 mlxsw_sp_port->link.autoneg = autoneg; 2265 2266 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2267 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2268 2269 return 0; 2270 } 2271 2272 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2273 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2274 .get_link = ethtool_op_get_link, 2275 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2276 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2277 .get_strings = mlxsw_sp_port_get_strings, 2278 .set_phys_id = mlxsw_sp_port_set_phys_id, 2279 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2280 .get_sset_count = mlxsw_sp_port_get_sset_count, 2281 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2282 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2283 }; 2284 2285 static int 2286 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2287 { 2288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2289 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2290 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2291 u32 eth_proto_admin; 2292 2293 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2294 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2295 eth_proto_admin); 2296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2297 } 2298 2299 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2300 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2301 bool dwrr, u8 dwrr_weight) 2302 { 2303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2304 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2305 2306 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2307 next_index); 2308 mlxsw_reg_qeec_de_set(qeec_pl, true); 2309 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2310 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2311 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2312 } 2313 2314 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2315 enum mlxsw_reg_qeec_hr hr, u8 index, 2316 u8 next_index, u32 maxrate) 2317 { 2318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2319 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2320 2321 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2322 next_index); 2323 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2324 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2325 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2326 } 2327 2328 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2329 u8 switch_prio, u8 tclass) 2330 { 2331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2332 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2333 2334 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2335 tclass); 2336 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2337 } 2338 2339 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2340 { 2341 int err, i; 2342 2343 /* Setup the elements hierarcy, so that each TC is linked to 2344 * one subgroup, which are all member in the same group. 2345 */ 2346 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2347 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2348 0); 2349 if (err) 2350 return err; 2351 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2352 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2353 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2354 0, false, 0); 2355 if (err) 2356 return err; 2357 } 2358 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2359 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2360 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2361 false, 0); 2362 if (err) 2363 return err; 2364 } 2365 2366 /* Make sure the max shaper is disabled in all hierarcies that 2367 * support it. 2368 */ 2369 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2370 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2371 MLXSW_REG_QEEC_MAS_DIS); 2372 if (err) 2373 return err; 2374 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2375 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2376 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2377 i, 0, 2378 MLXSW_REG_QEEC_MAS_DIS); 2379 if (err) 2380 return err; 2381 } 2382 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2383 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2384 MLXSW_REG_QEEC_HIERARCY_TC, 2385 i, i, 2386 MLXSW_REG_QEEC_MAS_DIS); 2387 if (err) 2388 return err; 2389 } 2390 2391 /* Map all priorities to traffic class 0. */ 2392 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2393 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2394 if (err) 2395 return err; 2396 } 2397 2398 return 0; 2399 } 2400 2401 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) 2402 { 2403 mlxsw_sp_port->pvid = 1; 2404 2405 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); 2406 } 2407 2408 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 2409 { 2410 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 2411 } 2412 2413 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2414 bool split, u8 module, u8 width, u8 lane) 2415 { 2416 struct mlxsw_sp_port *mlxsw_sp_port; 2417 struct net_device *dev; 2418 size_t bytes; 2419 int err; 2420 2421 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2422 if (!dev) 2423 return -ENOMEM; 2424 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2425 mlxsw_sp_port = netdev_priv(dev); 2426 mlxsw_sp_port->dev = dev; 2427 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2428 mlxsw_sp_port->local_port = local_port; 2429 mlxsw_sp_port->split = split; 2430 mlxsw_sp_port->mapping.module = module; 2431 mlxsw_sp_port->mapping.width = width; 2432 mlxsw_sp_port->mapping.lane = lane; 2433 mlxsw_sp_port->link.autoneg = 1; 2434 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 2435 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 2436 if (!mlxsw_sp_port->active_vlans) { 2437 err = -ENOMEM; 2438 goto err_port_active_vlans_alloc; 2439 } 2440 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 2441 if (!mlxsw_sp_port->untagged_vlans) { 2442 err = -ENOMEM; 2443 goto err_port_untagged_vlans_alloc; 2444 } 2445 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 2446 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2447 2448 mlxsw_sp_port->pcpu_stats = 2449 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2450 if (!mlxsw_sp_port->pcpu_stats) { 2451 err = -ENOMEM; 2452 goto err_alloc_stats; 2453 } 2454 2455 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2456 GFP_KERNEL); 2457 if (!mlxsw_sp_port->sample) { 2458 err = -ENOMEM; 2459 goto err_alloc_sample; 2460 } 2461 2462 mlxsw_sp_port->hw_stats.cache = 2463 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2464 2465 if (!mlxsw_sp_port->hw_stats.cache) { 2466 err = -ENOMEM; 2467 goto err_alloc_hw_stats; 2468 } 2469 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2470 &update_stats_cache); 2471 2472 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2473 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2474 2475 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2476 if (err) { 2477 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2478 mlxsw_sp_port->local_port); 2479 goto err_port_swid_set; 2480 } 2481 2482 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2483 if (err) { 2484 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2485 mlxsw_sp_port->local_port); 2486 goto err_dev_addr_init; 2487 } 2488 2489 netif_carrier_off(dev); 2490 2491 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2492 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2493 dev->hw_features |= NETIF_F_HW_TC; 2494 2495 dev->min_mtu = 0; 2496 dev->max_mtu = ETH_MAX_MTU; 2497 2498 /* Each packet needs to have a Tx header (metadata) on top all other 2499 * headers. 2500 */ 2501 dev->needed_headroom = MLXSW_TXHDR_LEN; 2502 2503 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2504 if (err) { 2505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2506 mlxsw_sp_port->local_port); 2507 goto err_port_system_port_mapping_set; 2508 } 2509 2510 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2511 if (err) { 2512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2513 mlxsw_sp_port->local_port); 2514 goto err_port_speed_by_width_set; 2515 } 2516 2517 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2518 if (err) { 2519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2520 mlxsw_sp_port->local_port); 2521 goto err_port_mtu_set; 2522 } 2523 2524 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2525 if (err) 2526 goto err_port_admin_status_set; 2527 2528 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2529 if (err) { 2530 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2531 mlxsw_sp_port->local_port); 2532 goto err_port_buffers_init; 2533 } 2534 2535 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2536 if (err) { 2537 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2538 mlxsw_sp_port->local_port); 2539 goto err_port_ets_init; 2540 } 2541 2542 /* ETS and buffers must be initialized before DCB. */ 2543 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2544 if (err) { 2545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2546 mlxsw_sp_port->local_port); 2547 goto err_port_dcb_init; 2548 } 2549 2550 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); 2551 if (err) { 2552 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", 2553 mlxsw_sp_port->local_port); 2554 goto err_port_pvid_vport_create; 2555 } 2556 2557 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2558 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2559 err = register_netdev(dev); 2560 if (err) { 2561 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2562 mlxsw_sp_port->local_port); 2563 goto err_register_netdev; 2564 } 2565 2566 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 2567 mlxsw_sp_port, dev, mlxsw_sp_port->split, 2568 module); 2569 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2570 return 0; 2571 2572 err_register_netdev: 2573 mlxsw_sp->ports[local_port] = NULL; 2574 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2575 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2576 err_port_pvid_vport_create: 2577 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2578 err_port_dcb_init: 2579 err_port_ets_init: 2580 err_port_buffers_init: 2581 err_port_admin_status_set: 2582 err_port_mtu_set: 2583 err_port_speed_by_width_set: 2584 err_port_system_port_mapping_set: 2585 err_dev_addr_init: 2586 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2587 err_port_swid_set: 2588 kfree(mlxsw_sp_port->hw_stats.cache); 2589 err_alloc_hw_stats: 2590 kfree(mlxsw_sp_port->sample); 2591 err_alloc_sample: 2592 free_percpu(mlxsw_sp_port->pcpu_stats); 2593 err_alloc_stats: 2594 kfree(mlxsw_sp_port->untagged_vlans); 2595 err_port_untagged_vlans_alloc: 2596 kfree(mlxsw_sp_port->active_vlans); 2597 err_port_active_vlans_alloc: 2598 free_netdev(dev); 2599 return err; 2600 } 2601 2602 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2603 bool split, u8 module, u8 width, u8 lane) 2604 { 2605 int err; 2606 2607 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2608 if (err) { 2609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2610 local_port); 2611 return err; 2612 } 2613 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, 2614 module, width, lane); 2615 if (err) 2616 goto err_port_create; 2617 return 0; 2618 2619 err_port_create: 2620 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2621 return err; 2622 } 2623 2624 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2625 { 2626 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2627 2628 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 2629 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 2630 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2631 mlxsw_sp->ports[local_port] = NULL; 2632 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2633 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); 2634 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2635 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2636 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2637 kfree(mlxsw_sp_port->hw_stats.cache); 2638 kfree(mlxsw_sp_port->sample); 2639 free_percpu(mlxsw_sp_port->pcpu_stats); 2640 kfree(mlxsw_sp_port->untagged_vlans); 2641 kfree(mlxsw_sp_port->active_vlans); 2642 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); 2643 free_netdev(mlxsw_sp_port->dev); 2644 } 2645 2646 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2647 { 2648 __mlxsw_sp_port_remove(mlxsw_sp, local_port); 2649 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2650 } 2651 2652 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2653 { 2654 return mlxsw_sp->ports[local_port] != NULL; 2655 } 2656 2657 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2658 { 2659 int i; 2660 2661 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 2662 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2663 mlxsw_sp_port_remove(mlxsw_sp, i); 2664 kfree(mlxsw_sp->port_to_module); 2665 kfree(mlxsw_sp->ports); 2666 } 2667 2668 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2669 { 2670 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2671 u8 module, width, lane; 2672 size_t alloc_size; 2673 int i; 2674 int err; 2675 2676 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2677 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2678 if (!mlxsw_sp->ports) 2679 return -ENOMEM; 2680 2681 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); 2682 if (!mlxsw_sp->port_to_module) { 2683 err = -ENOMEM; 2684 goto err_port_to_module_alloc; 2685 } 2686 2687 for (i = 1; i < max_ports; i++) { 2688 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 2689 &width, &lane); 2690 if (err) 2691 goto err_port_module_info_get; 2692 if (!width) 2693 continue; 2694 mlxsw_sp->port_to_module[i] = module; 2695 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 2696 module, width, lane); 2697 if (err) 2698 goto err_port_create; 2699 } 2700 return 0; 2701 2702 err_port_create: 2703 err_port_module_info_get: 2704 for (i--; i >= 1; i--) 2705 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2706 mlxsw_sp_port_remove(mlxsw_sp, i); 2707 kfree(mlxsw_sp->port_to_module); 2708 err_port_to_module_alloc: 2709 kfree(mlxsw_sp->ports); 2710 return err; 2711 } 2712 2713 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 2714 { 2715 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 2716 2717 return local_port - offset; 2718 } 2719 2720 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2721 u8 module, unsigned int count) 2722 { 2723 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 2724 int err, i; 2725 2726 for (i = 0; i < count; i++) { 2727 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 2728 width, i * width); 2729 if (err) 2730 goto err_port_module_map; 2731 } 2732 2733 for (i = 0; i < count; i++) { 2734 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 2735 if (err) 2736 goto err_port_swid_set; 2737 } 2738 2739 for (i = 0; i < count; i++) { 2740 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2741 module, width, i * width); 2742 if (err) 2743 goto err_port_create; 2744 } 2745 2746 return 0; 2747 2748 err_port_create: 2749 for (i--; i >= 0; i--) 2750 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2751 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2752 i = count; 2753 err_port_swid_set: 2754 for (i--; i >= 0; i--) 2755 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 2756 MLXSW_PORT_SWID_DISABLED_PORT); 2757 i = count; 2758 err_port_module_map: 2759 for (i--; i >= 0; i--) 2760 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 2761 return err; 2762 } 2763 2764 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2765 u8 base_port, unsigned int count) 2766 { 2767 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 2768 int i; 2769 2770 /* Split by four means we need to re-create two ports, otherwise 2771 * only one. 2772 */ 2773 count = count / 2; 2774 2775 for (i = 0; i < count; i++) { 2776 local_port = base_port + i * 2; 2777 module = mlxsw_sp->port_to_module[local_port]; 2778 2779 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2780 0); 2781 } 2782 2783 for (i = 0; i < count; i++) 2784 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2785 2786 for (i = 0; i < count; i++) { 2787 local_port = base_port + i * 2; 2788 module = mlxsw_sp->port_to_module[local_port]; 2789 2790 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2791 width, 0); 2792 } 2793 } 2794 2795 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2796 unsigned int count) 2797 { 2798 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2799 struct mlxsw_sp_port *mlxsw_sp_port; 2800 u8 module, cur_width, base_port; 2801 int i; 2802 int err; 2803 2804 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2805 if (!mlxsw_sp_port) { 2806 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2807 local_port); 2808 return -EINVAL; 2809 } 2810 2811 module = mlxsw_sp_port->mapping.module; 2812 cur_width = mlxsw_sp_port->mapping.width; 2813 2814 if (count != 2 && count != 4) { 2815 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2816 return -EINVAL; 2817 } 2818 2819 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2820 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2821 return -EINVAL; 2822 } 2823 2824 /* Make sure we have enough slave (even) ports for the split. */ 2825 if (count == 2) { 2826 base_port = local_port; 2827 if (mlxsw_sp->ports[base_port + 1]) { 2828 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2829 return -EINVAL; 2830 } 2831 } else { 2832 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2833 if (mlxsw_sp->ports[base_port + 1] || 2834 mlxsw_sp->ports[base_port + 3]) { 2835 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2836 return -EINVAL; 2837 } 2838 } 2839 2840 for (i = 0; i < count; i++) 2841 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2842 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2843 2844 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2845 if (err) { 2846 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2847 goto err_port_split_create; 2848 } 2849 2850 return 0; 2851 2852 err_port_split_create: 2853 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2854 return err; 2855 } 2856 2857 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2858 { 2859 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2860 struct mlxsw_sp_port *mlxsw_sp_port; 2861 u8 cur_width, base_port; 2862 unsigned int count; 2863 int i; 2864 2865 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2866 if (!mlxsw_sp_port) { 2867 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2868 local_port); 2869 return -EINVAL; 2870 } 2871 2872 if (!mlxsw_sp_port->split) { 2873 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2874 return -EINVAL; 2875 } 2876 2877 cur_width = mlxsw_sp_port->mapping.width; 2878 count = cur_width == 1 ? 4 : 2; 2879 2880 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2881 2882 /* Determine which ports to remove. */ 2883 if (count == 2 && local_port >= base_port + 2) 2884 base_port = base_port + 2; 2885 2886 for (i = 0; i < count; i++) 2887 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 2888 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2889 2890 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2891 2892 return 0; 2893 } 2894 2895 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2896 char *pude_pl, void *priv) 2897 { 2898 struct mlxsw_sp *mlxsw_sp = priv; 2899 struct mlxsw_sp_port *mlxsw_sp_port; 2900 enum mlxsw_reg_pude_oper_status status; 2901 u8 local_port; 2902 2903 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2904 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2905 if (!mlxsw_sp_port) 2906 return; 2907 2908 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2909 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2910 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2911 netif_carrier_on(mlxsw_sp_port->dev); 2912 } else { 2913 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2914 netif_carrier_off(mlxsw_sp_port->dev); 2915 } 2916 } 2917 2918 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2919 u8 local_port, void *priv) 2920 { 2921 struct mlxsw_sp *mlxsw_sp = priv; 2922 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2923 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2924 2925 if (unlikely(!mlxsw_sp_port)) { 2926 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2927 local_port); 2928 return; 2929 } 2930 2931 skb->dev = mlxsw_sp_port->dev; 2932 2933 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2934 u64_stats_update_begin(&pcpu_stats->syncp); 2935 pcpu_stats->rx_packets++; 2936 pcpu_stats->rx_bytes += skb->len; 2937 u64_stats_update_end(&pcpu_stats->syncp); 2938 2939 skb->protocol = eth_type_trans(skb, skb->dev); 2940 netif_receive_skb(skb); 2941 } 2942 2943 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2944 void *priv) 2945 { 2946 skb->offload_fwd_mark = 1; 2947 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2948 } 2949 2950 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 2951 void *priv) 2952 { 2953 struct mlxsw_sp *mlxsw_sp = priv; 2954 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2955 struct psample_group *psample_group; 2956 u32 size; 2957 2958 if (unlikely(!mlxsw_sp_port)) { 2959 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 2960 local_port); 2961 goto out; 2962 } 2963 if (unlikely(!mlxsw_sp_port->sample)) { 2964 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 2965 local_port); 2966 goto out; 2967 } 2968 2969 size = mlxsw_sp_port->sample->truncate ? 2970 mlxsw_sp_port->sample->trunc_size : skb->len; 2971 2972 rcu_read_lock(); 2973 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 2974 if (!psample_group) 2975 goto out_unlock; 2976 psample_sample_packet(psample_group, skb, size, 2977 mlxsw_sp_port->dev->ifindex, 0, 2978 mlxsw_sp_port->sample->rate); 2979 out_unlock: 2980 rcu_read_unlock(); 2981 out: 2982 consume_skb(skb); 2983 } 2984 2985 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2986 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2987 _is_ctrl, SP_##_trap_group, DISCARD) 2988 2989 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2990 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2991 _is_ctrl, SP_##_trap_group, DISCARD) 2992 2993 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2994 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2995 2996 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2997 /* Events */ 2998 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2999 /* L2 traps */ 3000 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3001 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3002 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3003 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3004 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3005 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3006 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3007 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3008 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3009 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3010 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3011 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3012 /* L3 traps */ 3013 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3014 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3015 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3016 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), 3017 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3018 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3019 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), 3020 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), 3021 /* PKT Sample trap */ 3022 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3023 false, SP_IP2ME, DISCARD) 3024 }; 3025 3026 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3027 { 3028 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3029 enum mlxsw_reg_qpcr_ir_units ir_units; 3030 int max_cpu_policers; 3031 bool is_bytes; 3032 u8 burst_size; 3033 u32 rate; 3034 int i, err; 3035 3036 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3037 return -EIO; 3038 3039 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3040 3041 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3042 for (i = 0; i < max_cpu_policers; i++) { 3043 is_bytes = false; 3044 switch (i) { 3045 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3046 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3047 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3048 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3049 rate = 128; 3050 burst_size = 7; 3051 break; 3052 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3053 rate = 16 * 1024; 3054 burst_size = 10; 3055 break; 3056 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 3057 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3058 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3059 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 3060 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3061 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3062 rate = 1024; 3063 burst_size = 7; 3064 break; 3065 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3066 is_bytes = true; 3067 rate = 4 * 1024; 3068 burst_size = 4; 3069 break; 3070 default: 3071 continue; 3072 } 3073 3074 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3075 burst_size); 3076 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3077 if (err) 3078 return err; 3079 } 3080 3081 return 0; 3082 } 3083 3084 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3085 { 3086 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3087 enum mlxsw_reg_htgt_trap_group i; 3088 int max_cpu_policers; 3089 int max_trap_groups; 3090 u8 priority, tc; 3091 u16 policer_id; 3092 int err; 3093 3094 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3095 return -EIO; 3096 3097 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3098 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3099 3100 for (i = 0; i < max_trap_groups; i++) { 3101 policer_id = i; 3102 switch (i) { 3103 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3104 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3105 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3106 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3107 priority = 5; 3108 tc = 5; 3109 break; 3110 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 3111 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3112 priority = 4; 3113 tc = 4; 3114 break; 3115 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3116 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3117 priority = 3; 3118 tc = 3; 3119 break; 3120 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3121 priority = 2; 3122 tc = 2; 3123 break; 3124 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 3125 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3126 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3127 priority = 1; 3128 tc = 1; 3129 break; 3130 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3131 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3132 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3133 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3134 break; 3135 default: 3136 continue; 3137 } 3138 3139 if (max_cpu_policers <= policer_id && 3140 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3141 return -EIO; 3142 3143 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3144 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3145 if (err) 3146 return err; 3147 } 3148 3149 return 0; 3150 } 3151 3152 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3153 { 3154 int i; 3155 int err; 3156 3157 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3158 if (err) 3159 return err; 3160 3161 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3162 if (err) 3163 return err; 3164 3165 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3166 err = mlxsw_core_trap_register(mlxsw_sp->core, 3167 &mlxsw_sp_listener[i], 3168 mlxsw_sp); 3169 if (err) 3170 goto err_listener_register; 3171 3172 } 3173 return 0; 3174 3175 err_listener_register: 3176 for (i--; i >= 0; i--) { 3177 mlxsw_core_trap_unregister(mlxsw_sp->core, 3178 &mlxsw_sp_listener[i], 3179 mlxsw_sp); 3180 } 3181 return err; 3182 } 3183 3184 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3185 { 3186 int i; 3187 3188 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3189 mlxsw_core_trap_unregister(mlxsw_sp->core, 3190 &mlxsw_sp_listener[i], 3191 mlxsw_sp); 3192 } 3193 } 3194 3195 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 3196 enum mlxsw_reg_sfgc_type type, 3197 enum mlxsw_reg_sfgc_bridge_type bridge_type) 3198 { 3199 enum mlxsw_flood_table_type table_type; 3200 enum mlxsw_sp_flood_table flood_table; 3201 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 3202 3203 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 3204 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 3205 else 3206 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 3207 3208 switch (type) { 3209 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST: 3210 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 3211 break; 3212 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4: 3213 flood_table = MLXSW_SP_FLOOD_TABLE_MC; 3214 break; 3215 default: 3216 flood_table = MLXSW_SP_FLOOD_TABLE_BC; 3217 } 3218 3219 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 3220 flood_table); 3221 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 3222 } 3223 3224 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 3225 { 3226 int type, err; 3227 3228 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 3229 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 3230 continue; 3231 3232 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 3233 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 3234 if (err) 3235 return err; 3236 3237 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 3238 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 3239 if (err) 3240 return err; 3241 } 3242 3243 return 0; 3244 } 3245 3246 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3247 { 3248 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3249 int err; 3250 3251 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3252 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3253 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3254 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3255 MLXSW_REG_SLCR_LAG_HASH_SIP | 3256 MLXSW_REG_SLCR_LAG_HASH_DIP | 3257 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3258 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3259 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3260 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3261 if (err) 3262 return err; 3263 3264 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3265 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3266 return -EIO; 3267 3268 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3269 sizeof(struct mlxsw_sp_upper), 3270 GFP_KERNEL); 3271 if (!mlxsw_sp->lags) 3272 return -ENOMEM; 3273 3274 return 0; 3275 } 3276 3277 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3278 { 3279 kfree(mlxsw_sp->lags); 3280 } 3281 3282 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3283 { 3284 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3285 3286 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3287 MLXSW_REG_HTGT_INVALID_POLICER, 3288 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3289 MLXSW_REG_HTGT_DEFAULT_TC); 3290 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3291 } 3292 3293 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create); 3294 3295 static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp) 3296 { 3297 return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true); 3298 } 3299 3300 static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp) 3301 { 3302 mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false); 3303 } 3304 3305 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3306 const struct mlxsw_bus_info *mlxsw_bus_info) 3307 { 3308 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3309 int err; 3310 3311 mlxsw_sp->core = mlxsw_core; 3312 mlxsw_sp->bus_info = mlxsw_bus_info; 3313 INIT_LIST_HEAD(&mlxsw_sp->fids); 3314 INIT_LIST_HEAD(&mlxsw_sp->vfids.list); 3315 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 3316 3317 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3318 if (err) { 3319 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3320 return err; 3321 } 3322 3323 err = mlxsw_sp_traps_init(mlxsw_sp); 3324 if (err) { 3325 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3326 return err; 3327 } 3328 3329 err = mlxsw_sp_flood_init(mlxsw_sp); 3330 if (err) { 3331 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 3332 goto err_flood_init; 3333 } 3334 3335 err = mlxsw_sp_buffers_init(mlxsw_sp); 3336 if (err) { 3337 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3338 goto err_buffers_init; 3339 } 3340 3341 err = mlxsw_sp_lag_init(mlxsw_sp); 3342 if (err) { 3343 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3344 goto err_lag_init; 3345 } 3346 3347 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3348 if (err) { 3349 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3350 goto err_switchdev_init; 3351 } 3352 3353 err = mlxsw_sp_router_init(mlxsw_sp); 3354 if (err) { 3355 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3356 goto err_router_init; 3357 } 3358 3359 err = mlxsw_sp_span_init(mlxsw_sp); 3360 if (err) { 3361 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3362 goto err_span_init; 3363 } 3364 3365 err = mlxsw_sp_acl_init(mlxsw_sp); 3366 if (err) { 3367 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3368 goto err_acl_init; 3369 } 3370 3371 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3372 if (err) { 3373 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3374 goto err_counter_pool_init; 3375 } 3376 3377 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3378 if (err) { 3379 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3380 goto err_dpipe_init; 3381 } 3382 3383 err = mlxsw_sp_dummy_fid_init(mlxsw_sp); 3384 if (err) { 3385 dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n"); 3386 goto err_dummy_fid_init; 3387 } 3388 3389 err = mlxsw_sp_ports_create(mlxsw_sp); 3390 if (err) { 3391 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3392 goto err_ports_create; 3393 } 3394 3395 return 0; 3396 3397 err_ports_create: 3398 mlxsw_sp_dummy_fid_fini(mlxsw_sp); 3399 err_dummy_fid_init: 3400 mlxsw_sp_dpipe_fini(mlxsw_sp); 3401 err_dpipe_init: 3402 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3403 err_counter_pool_init: 3404 mlxsw_sp_acl_fini(mlxsw_sp); 3405 err_acl_init: 3406 mlxsw_sp_span_fini(mlxsw_sp); 3407 err_span_init: 3408 mlxsw_sp_router_fini(mlxsw_sp); 3409 err_router_init: 3410 mlxsw_sp_switchdev_fini(mlxsw_sp); 3411 err_switchdev_init: 3412 mlxsw_sp_lag_fini(mlxsw_sp); 3413 err_lag_init: 3414 mlxsw_sp_buffers_fini(mlxsw_sp); 3415 err_buffers_init: 3416 err_flood_init: 3417 mlxsw_sp_traps_fini(mlxsw_sp); 3418 return err; 3419 } 3420 3421 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3422 { 3423 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3424 3425 mlxsw_sp_ports_remove(mlxsw_sp); 3426 mlxsw_sp_dummy_fid_fini(mlxsw_sp); 3427 mlxsw_sp_dpipe_fini(mlxsw_sp); 3428 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3429 mlxsw_sp_acl_fini(mlxsw_sp); 3430 mlxsw_sp_span_fini(mlxsw_sp); 3431 mlxsw_sp_router_fini(mlxsw_sp); 3432 mlxsw_sp_switchdev_fini(mlxsw_sp); 3433 mlxsw_sp_lag_fini(mlxsw_sp); 3434 mlxsw_sp_buffers_fini(mlxsw_sp); 3435 mlxsw_sp_traps_fini(mlxsw_sp); 3436 WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); 3437 WARN_ON(!list_empty(&mlxsw_sp->fids)); 3438 } 3439 3440 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 3441 .used_max_vepa_channels = 1, 3442 .max_vepa_channels = 0, 3443 .used_max_mid = 1, 3444 .max_mid = MLXSW_SP_MID_MAX, 3445 .used_max_pgt = 1, 3446 .max_pgt = 0, 3447 .used_flood_tables = 1, 3448 .used_flood_mode = 1, 3449 .flood_mode = 3, 3450 .max_fid_offset_flood_tables = 3, 3451 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3452 .max_fid_flood_tables = 3, 3453 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 3454 .used_max_ib_mc = 1, 3455 .max_ib_mc = 0, 3456 .used_max_pkey = 1, 3457 .max_pkey = 0, 3458 .used_kvd_split_data = 1, 3459 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3460 .kvd_hash_single_parts = 2, 3461 .kvd_hash_double_parts = 1, 3462 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3463 .swid_config = { 3464 { 3465 .used_type = 1, 3466 .type = MLXSW_PORT_SWID_TYPE_ETH, 3467 } 3468 }, 3469 .resource_query_enable = 1, 3470 }; 3471 3472 static struct mlxsw_driver mlxsw_sp_driver = { 3473 .kind = mlxsw_sp_driver_name, 3474 .priv_size = sizeof(struct mlxsw_sp), 3475 .init = mlxsw_sp_init, 3476 .fini = mlxsw_sp_fini, 3477 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3478 .port_split = mlxsw_sp_port_split, 3479 .port_unsplit = mlxsw_sp_port_unsplit, 3480 .sb_pool_get = mlxsw_sp_sb_pool_get, 3481 .sb_pool_set = mlxsw_sp_sb_pool_set, 3482 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3483 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3484 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3485 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3486 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3487 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3488 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3489 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3490 .txhdr_construct = mlxsw_sp_txhdr_construct, 3491 .txhdr_len = MLXSW_TXHDR_LEN, 3492 .profile = &mlxsw_sp_config_profile, 3493 }; 3494 3495 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3496 { 3497 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3498 } 3499 3500 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3501 { 3502 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3503 int ret = 0; 3504 3505 if (mlxsw_sp_port_dev_check(lower_dev)) { 3506 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3507 ret = 1; 3508 } 3509 3510 return ret; 3511 } 3512 3513 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3514 { 3515 struct mlxsw_sp_port *mlxsw_sp_port; 3516 3517 if (mlxsw_sp_port_dev_check(dev)) 3518 return netdev_priv(dev); 3519 3520 mlxsw_sp_port = NULL; 3521 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3522 3523 return mlxsw_sp_port; 3524 } 3525 3526 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3527 { 3528 struct mlxsw_sp_port *mlxsw_sp_port; 3529 3530 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3531 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3532 } 3533 3534 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3535 { 3536 struct mlxsw_sp_port *mlxsw_sp_port; 3537 3538 if (mlxsw_sp_port_dev_check(dev)) 3539 return netdev_priv(dev); 3540 3541 mlxsw_sp_port = NULL; 3542 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3543 &mlxsw_sp_port); 3544 3545 return mlxsw_sp_port; 3546 } 3547 3548 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3549 { 3550 struct mlxsw_sp_port *mlxsw_sp_port; 3551 3552 rcu_read_lock(); 3553 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3554 if (mlxsw_sp_port) 3555 dev_hold(mlxsw_sp_port->dev); 3556 rcu_read_unlock(); 3557 return mlxsw_sp_port; 3558 } 3559 3560 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3561 { 3562 dev_put(mlxsw_sp_port->dev); 3563 } 3564 3565 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 3566 u16 fid) 3567 { 3568 if (mlxsw_sp_fid_is_vfid(fid)) 3569 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); 3570 else 3571 return test_bit(fid, lag_port->active_vlans); 3572 } 3573 3574 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, 3575 u16 fid) 3576 { 3577 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3578 u8 local_port = mlxsw_sp_port->local_port; 3579 u16 lag_id = mlxsw_sp_port->lag_id; 3580 u64 max_lag_members; 3581 int i, count = 0; 3582 3583 if (!mlxsw_sp_port->lagged) 3584 return true; 3585 3586 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3587 MAX_LAG_MEMBERS); 3588 for (i = 0; i < max_lag_members; i++) { 3589 struct mlxsw_sp_port *lag_port; 3590 3591 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); 3592 if (!lag_port || lag_port->local_port == local_port) 3593 continue; 3594 if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) 3595 count++; 3596 } 3597 3598 return !count; 3599 } 3600 3601 static int 3602 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3603 u16 fid) 3604 { 3605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3606 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3607 3608 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 3609 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3610 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 3611 mlxsw_sp_port->local_port); 3612 3613 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", 3614 mlxsw_sp_port->local_port, fid); 3615 3616 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3617 } 3618 3619 static int 3620 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 3621 u16 fid) 3622 { 3623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3624 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 3625 3626 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 3627 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 3628 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 3629 3630 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", 3631 mlxsw_sp_port->lag_id, fid); 3632 3633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 3634 } 3635 3636 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) 3637 { 3638 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) 3639 return 0; 3640 3641 if (mlxsw_sp_port->lagged) 3642 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, 3643 fid); 3644 else 3645 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3646 } 3647 3648 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) 3649 { 3650 struct mlxsw_sp_fid *f, *tmp; 3651 3652 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) 3653 if (--f->ref_count == 0) 3654 mlxsw_sp_fid_destroy(mlxsw_sp, f); 3655 else 3656 WARN_ON_ONCE(1); 3657 } 3658 3659 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3660 struct net_device *br_dev) 3661 { 3662 return !mlxsw_sp->master_bridge.dev || 3663 mlxsw_sp->master_bridge.dev == br_dev; 3664 } 3665 3666 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 3667 struct net_device *br_dev) 3668 { 3669 mlxsw_sp->master_bridge.dev = br_dev; 3670 mlxsw_sp->master_bridge.ref_count++; 3671 } 3672 3673 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3674 { 3675 if (--mlxsw_sp->master_bridge.ref_count == 0) { 3676 mlxsw_sp->master_bridge.dev = NULL; 3677 /* It's possible upper VLAN devices are still holding 3678 * references to underlying FIDs. Drop the reference 3679 * and release the resources if it was the last one. 3680 * If it wasn't, then something bad happened. 3681 */ 3682 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); 3683 } 3684 } 3685 3686 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3687 struct net_device *br_dev) 3688 { 3689 struct net_device *dev = mlxsw_sp_port->dev; 3690 int err; 3691 3692 /* When port is not bridged untagged packets are tagged with 3693 * PVID=VID=1, thereby creating an implicit VLAN interface in 3694 * the device. Remove it and let bridge code take care of its 3695 * own VLANs. 3696 */ 3697 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 3698 if (err) 3699 return err; 3700 3701 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); 3702 3703 mlxsw_sp_port->learning = 1; 3704 mlxsw_sp_port->learning_sync = 1; 3705 mlxsw_sp_port->uc_flood = 1; 3706 mlxsw_sp_port->mc_flood = 1; 3707 mlxsw_sp_port->mc_router = 0; 3708 mlxsw_sp_port->mc_disabled = 1; 3709 mlxsw_sp_port->bridged = 1; 3710 3711 return 0; 3712 } 3713 3714 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3715 { 3716 struct net_device *dev = mlxsw_sp_port->dev; 3717 3718 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 3719 3720 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); 3721 3722 mlxsw_sp_port->learning = 0; 3723 mlxsw_sp_port->learning_sync = 0; 3724 mlxsw_sp_port->uc_flood = 0; 3725 mlxsw_sp_port->mc_flood = 0; 3726 mlxsw_sp_port->mc_router = 0; 3727 mlxsw_sp_port->bridged = 0; 3728 3729 /* Add implicit VLAN interface in the device, so that untagged 3730 * packets will be classified to the default vFID. 3731 */ 3732 mlxsw_sp_port_add_vid(dev, 0, 1); 3733 } 3734 3735 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3736 { 3737 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3738 3739 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3740 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3741 } 3742 3743 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3744 { 3745 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3746 3747 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3749 } 3750 3751 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3752 u16 lag_id, u8 port_index) 3753 { 3754 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3755 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3756 3757 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3758 lag_id, port_index); 3759 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3760 } 3761 3762 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3763 u16 lag_id) 3764 { 3765 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3766 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3767 3768 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3769 lag_id); 3770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3771 } 3772 3773 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3774 u16 lag_id) 3775 { 3776 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3777 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3778 3779 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3780 lag_id); 3781 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3782 } 3783 3784 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3785 u16 lag_id) 3786 { 3787 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3788 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3789 3790 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3791 lag_id); 3792 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3793 } 3794 3795 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3796 struct net_device *lag_dev, 3797 u16 *p_lag_id) 3798 { 3799 struct mlxsw_sp_upper *lag; 3800 int free_lag_id = -1; 3801 u64 max_lag; 3802 int i; 3803 3804 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3805 for (i = 0; i < max_lag; i++) { 3806 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3807 if (lag->ref_count) { 3808 if (lag->dev == lag_dev) { 3809 *p_lag_id = i; 3810 return 0; 3811 } 3812 } else if (free_lag_id < 0) { 3813 free_lag_id = i; 3814 } 3815 } 3816 if (free_lag_id < 0) 3817 return -EBUSY; 3818 *p_lag_id = free_lag_id; 3819 return 0; 3820 } 3821 3822 static bool 3823 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3824 struct net_device *lag_dev, 3825 struct netdev_lag_upper_info *lag_upper_info) 3826 { 3827 u16 lag_id; 3828 3829 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3830 return false; 3831 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3832 return false; 3833 return true; 3834 } 3835 3836 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3837 u16 lag_id, u8 *p_port_index) 3838 { 3839 u64 max_lag_members; 3840 int i; 3841 3842 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3843 MAX_LAG_MEMBERS); 3844 for (i = 0; i < max_lag_members; i++) { 3845 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3846 *p_port_index = i; 3847 return 0; 3848 } 3849 } 3850 return -EBUSY; 3851 } 3852 3853 static void 3854 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3855 struct net_device *lag_dev, u16 lag_id) 3856 { 3857 struct mlxsw_sp_port *mlxsw_sp_vport; 3858 struct mlxsw_sp_fid *f; 3859 3860 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3861 if (WARN_ON(!mlxsw_sp_vport)) 3862 return; 3863 3864 /* If vPort is assigned a RIF, then leave it since it's no 3865 * longer valid. 3866 */ 3867 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3868 if (f) 3869 f->leave(mlxsw_sp_vport); 3870 3871 mlxsw_sp_vport->lag_id = lag_id; 3872 mlxsw_sp_vport->lagged = 1; 3873 mlxsw_sp_vport->dev = lag_dev; 3874 } 3875 3876 static void 3877 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3878 { 3879 struct mlxsw_sp_port *mlxsw_sp_vport; 3880 struct mlxsw_sp_fid *f; 3881 3882 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); 3883 if (WARN_ON(!mlxsw_sp_vport)) 3884 return; 3885 3886 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3887 if (f) 3888 f->leave(mlxsw_sp_vport); 3889 3890 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 3891 mlxsw_sp_vport->lagged = 0; 3892 } 3893 3894 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3895 struct net_device *lag_dev) 3896 { 3897 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3898 struct mlxsw_sp_upper *lag; 3899 u16 lag_id; 3900 u8 port_index; 3901 int err; 3902 3903 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3904 if (err) 3905 return err; 3906 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3907 if (!lag->ref_count) { 3908 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3909 if (err) 3910 return err; 3911 lag->dev = lag_dev; 3912 } 3913 3914 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3915 if (err) 3916 return err; 3917 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3918 if (err) 3919 goto err_col_port_add; 3920 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 3921 if (err) 3922 goto err_col_port_enable; 3923 3924 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3925 mlxsw_sp_port->local_port); 3926 mlxsw_sp_port->lag_id = lag_id; 3927 mlxsw_sp_port->lagged = 1; 3928 lag->ref_count++; 3929 3930 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id); 3931 3932 return 0; 3933 3934 err_col_port_enable: 3935 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3936 err_col_port_add: 3937 if (!lag->ref_count) 3938 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3939 return err; 3940 } 3941 3942 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3943 struct net_device *lag_dev) 3944 { 3945 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3946 u16 lag_id = mlxsw_sp_port->lag_id; 3947 struct mlxsw_sp_upper *lag; 3948 3949 if (!mlxsw_sp_port->lagged) 3950 return; 3951 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3952 WARN_ON(lag->ref_count == 0); 3953 3954 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 3955 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3956 3957 if (mlxsw_sp_port->bridged) { 3958 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 3959 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 3960 } 3961 3962 if (lag->ref_count == 1) 3963 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3964 3965 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3966 mlxsw_sp_port->local_port); 3967 mlxsw_sp_port->lagged = 0; 3968 lag->ref_count--; 3969 3970 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); 3971 } 3972 3973 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3974 u16 lag_id) 3975 { 3976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3977 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3978 3979 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3980 mlxsw_sp_port->local_port); 3981 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3982 } 3983 3984 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3985 u16 lag_id) 3986 { 3987 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3988 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3989 3990 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3991 mlxsw_sp_port->local_port); 3992 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3993 } 3994 3995 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 3996 bool lag_tx_enabled) 3997 { 3998 if (lag_tx_enabled) 3999 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4000 mlxsw_sp_port->lag_id); 4001 else 4002 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4003 mlxsw_sp_port->lag_id); 4004 } 4005 4006 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4007 struct netdev_lag_lower_state_info *info) 4008 { 4009 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4010 } 4011 4012 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 4013 struct net_device *vlan_dev) 4014 { 4015 struct mlxsw_sp_port *mlxsw_sp_vport; 4016 u16 vid = vlan_dev_vlan_id(vlan_dev); 4017 4018 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4019 if (WARN_ON(!mlxsw_sp_vport)) 4020 return -EINVAL; 4021 4022 mlxsw_sp_vport->dev = vlan_dev; 4023 4024 return 0; 4025 } 4026 4027 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 4028 struct net_device *vlan_dev) 4029 { 4030 struct mlxsw_sp_port *mlxsw_sp_vport; 4031 u16 vid = vlan_dev_vlan_id(vlan_dev); 4032 4033 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4034 if (WARN_ON(!mlxsw_sp_vport)) 4035 return; 4036 4037 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 4038 } 4039 4040 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4041 bool enable) 4042 { 4043 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4044 enum mlxsw_reg_spms_state spms_state; 4045 char *spms_pl; 4046 u16 vid; 4047 int err; 4048 4049 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4050 MLXSW_REG_SPMS_STATE_DISCARDING; 4051 4052 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4053 if (!spms_pl) 4054 return -ENOMEM; 4055 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4056 4057 for (vid = 0; vid < VLAN_N_VID; vid++) 4058 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4059 4060 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4061 kfree(spms_pl); 4062 return err; 4063 } 4064 4065 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4066 { 4067 int err; 4068 4069 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4070 if (err) 4071 return err; 4072 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4073 true, false); 4074 if (err) 4075 goto err_port_vlan_set; 4076 return 0; 4077 4078 err_port_vlan_set: 4079 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4080 return err; 4081 } 4082 4083 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4084 { 4085 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4086 false, false); 4087 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4088 } 4089 4090 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 4091 unsigned long event, void *ptr) 4092 { 4093 struct netdev_notifier_changeupper_info *info; 4094 struct mlxsw_sp_port *mlxsw_sp_port; 4095 struct net_device *upper_dev; 4096 struct mlxsw_sp *mlxsw_sp; 4097 int err = 0; 4098 4099 mlxsw_sp_port = netdev_priv(dev); 4100 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4101 info = ptr; 4102 4103 switch (event) { 4104 case NETDEV_PRECHANGEUPPER: 4105 upper_dev = info->upper_dev; 4106 if (!is_vlan_dev(upper_dev) && 4107 !netif_is_lag_master(upper_dev) && 4108 !netif_is_bridge_master(upper_dev) && 4109 !netif_is_ovs_master(upper_dev)) 4110 return -EINVAL; 4111 if (!info->linking) 4112 break; 4113 /* HW limitation forbids to put ports to multiple bridges. */ 4114 if (netif_is_bridge_master(upper_dev) && 4115 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 4116 return -EINVAL; 4117 if (netif_is_lag_master(upper_dev) && 4118 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4119 info->upper_info)) 4120 return -EINVAL; 4121 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4122 return -EINVAL; 4123 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4124 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4125 return -EINVAL; 4126 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) 4127 return -EINVAL; 4128 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) 4129 return -EINVAL; 4130 break; 4131 case NETDEV_CHANGEUPPER: 4132 upper_dev = info->upper_dev; 4133 if (is_vlan_dev(upper_dev)) { 4134 if (info->linking) 4135 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 4136 upper_dev); 4137 else 4138 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 4139 upper_dev); 4140 } else if (netif_is_bridge_master(upper_dev)) { 4141 if (info->linking) 4142 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4143 upper_dev); 4144 else 4145 mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 4146 } else if (netif_is_lag_master(upper_dev)) { 4147 if (info->linking) 4148 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4149 upper_dev); 4150 else 4151 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4152 upper_dev); 4153 } else if (netif_is_ovs_master(upper_dev)) { 4154 if (info->linking) 4155 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4156 else 4157 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4158 } else { 4159 err = -EINVAL; 4160 WARN_ON(1); 4161 } 4162 break; 4163 } 4164 4165 return err; 4166 } 4167 4168 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4169 unsigned long event, void *ptr) 4170 { 4171 struct netdev_notifier_changelowerstate_info *info; 4172 struct mlxsw_sp_port *mlxsw_sp_port; 4173 int err; 4174 4175 mlxsw_sp_port = netdev_priv(dev); 4176 info = ptr; 4177 4178 switch (event) { 4179 case NETDEV_CHANGELOWERSTATE: 4180 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4181 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4182 info->lower_state_info); 4183 if (err) 4184 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4185 } 4186 break; 4187 } 4188 4189 return 0; 4190 } 4191 4192 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 4193 unsigned long event, void *ptr) 4194 { 4195 switch (event) { 4196 case NETDEV_PRECHANGEUPPER: 4197 case NETDEV_CHANGEUPPER: 4198 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 4199 case NETDEV_CHANGELOWERSTATE: 4200 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 4201 } 4202 4203 return 0; 4204 } 4205 4206 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4207 unsigned long event, void *ptr) 4208 { 4209 struct net_device *dev; 4210 struct list_head *iter; 4211 int ret; 4212 4213 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4214 if (mlxsw_sp_port_dev_check(dev)) { 4215 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4216 if (ret) 4217 return ret; 4218 } 4219 } 4220 4221 return 0; 4222 } 4223 4224 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, 4225 struct net_device *vlan_dev) 4226 { 4227 u16 fid = vlan_dev_vlan_id(vlan_dev); 4228 struct mlxsw_sp_fid *f; 4229 4230 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4231 if (!f) { 4232 f = mlxsw_sp_fid_create(mlxsw_sp, fid); 4233 if (IS_ERR(f)) 4234 return PTR_ERR(f); 4235 } 4236 4237 f->ref_count++; 4238 4239 return 0; 4240 } 4241 4242 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, 4243 struct net_device *vlan_dev) 4244 { 4245 u16 fid = vlan_dev_vlan_id(vlan_dev); 4246 struct mlxsw_sp_fid *f; 4247 4248 f = mlxsw_sp_fid_find(mlxsw_sp, fid); 4249 if (f && f->rif) 4250 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); 4251 if (f && --f->ref_count == 0) 4252 mlxsw_sp_fid_destroy(mlxsw_sp, f); 4253 } 4254 4255 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4256 unsigned long event, void *ptr) 4257 { 4258 struct netdev_notifier_changeupper_info *info; 4259 struct net_device *upper_dev; 4260 struct mlxsw_sp *mlxsw_sp; 4261 int err = 0; 4262 4263 mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4264 if (!mlxsw_sp) 4265 return 0; 4266 4267 info = ptr; 4268 4269 switch (event) { 4270 case NETDEV_PRECHANGEUPPER: 4271 upper_dev = info->upper_dev; 4272 if (!is_vlan_dev(upper_dev)) 4273 return -EINVAL; 4274 if (is_vlan_dev(upper_dev) && 4275 br_dev != mlxsw_sp->master_bridge.dev) 4276 return -EINVAL; 4277 break; 4278 case NETDEV_CHANGEUPPER: 4279 upper_dev = info->upper_dev; 4280 if (is_vlan_dev(upper_dev)) { 4281 if (info->linking) 4282 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, 4283 upper_dev); 4284 else 4285 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, 4286 upper_dev); 4287 } else { 4288 err = -EINVAL; 4289 WARN_ON(1); 4290 } 4291 break; 4292 } 4293 4294 return err; 4295 } 4296 4297 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 4298 { 4299 return find_first_zero_bit(mlxsw_sp->vfids.mapped, 4300 MLXSW_SP_VFID_MAX); 4301 } 4302 4303 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 4304 { 4305 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 4306 4307 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); 4308 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 4309 } 4310 4311 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 4312 4313 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 4314 struct net_device *br_dev) 4315 { 4316 struct device *dev = mlxsw_sp->bus_info->dev; 4317 struct mlxsw_sp_fid *f; 4318 u16 vfid, fid; 4319 int err; 4320 4321 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 4322 if (vfid == MLXSW_SP_VFID_MAX) { 4323 dev_err(dev, "No available vFIDs\n"); 4324 return ERR_PTR(-ERANGE); 4325 } 4326 4327 fid = mlxsw_sp_vfid_to_fid(vfid); 4328 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); 4329 if (err) { 4330 dev_err(dev, "Failed to create FID=%d\n", fid); 4331 return ERR_PTR(err); 4332 } 4333 4334 f = kzalloc(sizeof(*f), GFP_KERNEL); 4335 if (!f) 4336 goto err_allocate_vfid; 4337 4338 f->leave = mlxsw_sp_vport_vfid_leave; 4339 f->fid = fid; 4340 f->dev = br_dev; 4341 4342 list_add(&f->list, &mlxsw_sp->vfids.list); 4343 set_bit(vfid, mlxsw_sp->vfids.mapped); 4344 4345 return f; 4346 4347 err_allocate_vfid: 4348 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4349 return ERR_PTR(-ENOMEM); 4350 } 4351 4352 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 4353 struct mlxsw_sp_fid *f) 4354 { 4355 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 4356 u16 fid = f->fid; 4357 4358 clear_bit(vfid, mlxsw_sp->vfids.mapped); 4359 list_del(&f->list); 4360 4361 if (f->rif) 4362 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); 4363 4364 kfree(f); 4365 4366 mlxsw_sp_vfid_op(mlxsw_sp, fid, false); 4367 } 4368 4369 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 4370 bool valid) 4371 { 4372 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 4373 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4374 4375 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, 4376 vid); 4377 } 4378 4379 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4380 struct net_device *br_dev) 4381 { 4382 struct mlxsw_sp_fid *f; 4383 int err; 4384 4385 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 4386 if (!f) { 4387 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 4388 if (IS_ERR(f)) 4389 return PTR_ERR(f); 4390 } 4391 4392 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); 4393 if (err) 4394 goto err_vport_flood_set; 4395 4396 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); 4397 if (err) 4398 goto err_vport_fid_map; 4399 4400 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); 4401 f->ref_count++; 4402 4403 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); 4404 4405 return 0; 4406 4407 err_vport_fid_map: 4408 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4409 err_vport_flood_set: 4410 if (!f->ref_count) 4411 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4412 return err; 4413 } 4414 4415 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4416 { 4417 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4418 4419 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); 4420 4421 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); 4422 4423 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 4424 4425 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); 4426 4427 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 4428 if (--f->ref_count == 0) 4429 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 4430 } 4431 4432 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 4433 struct net_device *br_dev) 4434 { 4435 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 4436 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4437 struct net_device *dev = mlxsw_sp_vport->dev; 4438 int err; 4439 4440 if (f && !WARN_ON(!f->leave)) 4441 f->leave(mlxsw_sp_vport); 4442 4443 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); 4444 if (err) { 4445 netdev_err(dev, "Failed to join vFID\n"); 4446 return err; 4447 } 4448 4449 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 4450 if (err) { 4451 netdev_err(dev, "Failed to enable learning\n"); 4452 goto err_port_vid_learning_set; 4453 } 4454 4455 mlxsw_sp_vport->learning = 1; 4456 mlxsw_sp_vport->learning_sync = 1; 4457 mlxsw_sp_vport->uc_flood = 1; 4458 mlxsw_sp_vport->mc_flood = 1; 4459 mlxsw_sp_vport->mc_router = 0; 4460 mlxsw_sp_vport->mc_disabled = 1; 4461 mlxsw_sp_vport->bridged = 1; 4462 4463 return 0; 4464 4465 err_port_vid_learning_set: 4466 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4467 return err; 4468 } 4469 4470 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 4471 { 4472 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 4473 4474 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 4475 4476 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 4477 4478 mlxsw_sp_vport->learning = 0; 4479 mlxsw_sp_vport->learning_sync = 0; 4480 mlxsw_sp_vport->uc_flood = 0; 4481 mlxsw_sp_vport->mc_flood = 0; 4482 mlxsw_sp_vport->mc_router = 0; 4483 mlxsw_sp_vport->bridged = 0; 4484 } 4485 4486 static bool 4487 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 4488 const struct net_device *br_dev) 4489 { 4490 struct mlxsw_sp_port *mlxsw_sp_vport; 4491 4492 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 4493 vport.list) { 4494 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); 4495 4496 if (dev && dev == br_dev) 4497 return false; 4498 } 4499 4500 return true; 4501 } 4502 4503 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 4504 unsigned long event, void *ptr, 4505 u16 vid) 4506 { 4507 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4508 struct netdev_notifier_changeupper_info *info = ptr; 4509 struct mlxsw_sp_port *mlxsw_sp_vport; 4510 struct net_device *upper_dev; 4511 int err = 0; 4512 4513 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 4514 if (!mlxsw_sp_vport) 4515 return 0; 4516 4517 switch (event) { 4518 case NETDEV_PRECHANGEUPPER: 4519 upper_dev = info->upper_dev; 4520 if (!netif_is_bridge_master(upper_dev)) 4521 return -EINVAL; 4522 if (!info->linking) 4523 break; 4524 /* We can't have multiple VLAN interfaces configured on 4525 * the same port and being members in the same bridge. 4526 */ 4527 if (netif_is_bridge_master(upper_dev) && 4528 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 4529 upper_dev)) 4530 return -EINVAL; 4531 break; 4532 case NETDEV_CHANGEUPPER: 4533 upper_dev = info->upper_dev; 4534 if (netif_is_bridge_master(upper_dev)) { 4535 if (info->linking) 4536 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 4537 upper_dev); 4538 else 4539 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); 4540 } else { 4541 err = -EINVAL; 4542 WARN_ON(1); 4543 } 4544 break; 4545 } 4546 4547 return err; 4548 } 4549 4550 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 4551 unsigned long event, void *ptr, 4552 u16 vid) 4553 { 4554 struct net_device *dev; 4555 struct list_head *iter; 4556 int ret; 4557 4558 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4559 if (mlxsw_sp_port_dev_check(dev)) { 4560 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 4561 vid); 4562 if (ret) 4563 return ret; 4564 } 4565 } 4566 4567 return 0; 4568 } 4569 4570 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4571 unsigned long event, void *ptr) 4572 { 4573 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4574 u16 vid = vlan_dev_vlan_id(vlan_dev); 4575 4576 if (mlxsw_sp_port_dev_check(real_dev)) 4577 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 4578 vid); 4579 else if (netif_is_lag_master(real_dev)) 4580 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 4581 vid); 4582 4583 return 0; 4584 } 4585 4586 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4587 { 4588 struct netdev_notifier_changeupper_info *info = ptr; 4589 4590 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4591 return false; 4592 return netif_is_l3_master(info->upper_dev); 4593 } 4594 4595 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4596 unsigned long event, void *ptr) 4597 { 4598 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4599 int err = 0; 4600 4601 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4602 err = mlxsw_sp_netdevice_router_port_event(dev); 4603 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4604 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4605 else if (mlxsw_sp_port_dev_check(dev)) 4606 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 4607 else if (netif_is_lag_master(dev)) 4608 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4609 else if (netif_is_bridge_master(dev)) 4610 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4611 else if (is_vlan_dev(dev)) 4612 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4613 4614 return notifier_from_errno(err); 4615 } 4616 4617 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4618 .notifier_call = mlxsw_sp_netdevice_event, 4619 }; 4620 4621 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4622 .notifier_call = mlxsw_sp_inetaddr_event, 4623 .priority = 10, /* Must be called before FIB notifier block */ 4624 }; 4625 4626 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4627 .notifier_call = mlxsw_sp_router_netevent_event, 4628 }; 4629 4630 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4631 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4632 {0, }, 4633 }; 4634 4635 static struct pci_driver mlxsw_sp_pci_driver = { 4636 .name = mlxsw_sp_driver_name, 4637 .id_table = mlxsw_sp_pci_id_table, 4638 }; 4639 4640 static int __init mlxsw_sp_module_init(void) 4641 { 4642 int err; 4643 4644 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4645 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4646 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4647 4648 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4649 if (err) 4650 goto err_core_driver_register; 4651 4652 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4653 if (err) 4654 goto err_pci_driver_register; 4655 4656 return 0; 4657 4658 err_pci_driver_register: 4659 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4660 err_core_driver_register: 4661 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4662 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4663 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4664 return err; 4665 } 4666 4667 static void __exit mlxsw_sp_module_exit(void) 4668 { 4669 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4670 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4671 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4672 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4673 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4674 } 4675 4676 module_init(mlxsw_sp_module_init); 4677 module_exit(mlxsw_sp_module_exit); 4678 4679 MODULE_LICENSE("Dual BSD/GPL"); 4680 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4681 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4682 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4683