1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <linux/netlink.h> 57 #include <net/switchdev.h> 58 #include <net/pkt_cls.h> 59 #include <net/tc_act/tc_mirred.h> 60 #include <net/netevent.h> 61 #include <net/tc_act/tc_sample.h> 62 #include <net/addrconf.h> 63 64 #include "spectrum.h" 65 #include "pci.h" 66 #include "core.h" 67 #include "reg.h" 68 #include "port.h" 69 #include "trap.h" 70 #include "txheader.h" 71 #include "spectrum_cnt.h" 72 #include "spectrum_dpipe.h" 73 #include "spectrum_acl_flex_actions.h" 74 #include "../mlxfw/mlxfw.h" 75 76 #define MLXSW_FWREV_MAJOR 13 77 #define MLXSW_FWREV_MINOR 1530 78 #define MLXSW_FWREV_SUBMINOR 152 79 80 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 81 .major = MLXSW_FWREV_MAJOR, 82 .minor = MLXSW_FWREV_MINOR, 83 .subminor = MLXSW_FWREV_SUBMINOR 84 }; 85 86 #define MLXSW_SP_FW_FILENAME \ 87 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 88 "." __stringify(MLXSW_FWREV_MINOR) \ 89 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 90 91 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 92 static const char mlxsw_sp_driver_version[] = "1.0"; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 168 u16 component_index, u32 *p_max_size, 169 u8 *p_align_bits, u16 *p_max_write_size) 170 { 171 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 172 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 174 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 175 int err; 176 177 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 179 if (err) 180 return err; 181 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 182 p_max_write_size); 183 184 *p_align_bits = max_t(u8, *p_align_bits, 2); 185 *p_max_write_size = min_t(u16, *p_max_write_size, 186 MLXSW_REG_MCDA_MAX_DATA_LEN); 187 return 0; 188 } 189 190 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 191 { 192 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 193 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 195 char mcc_pl[MLXSW_REG_MCC_LEN]; 196 u8 control_state; 197 int err; 198 199 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 201 if (err) 202 return err; 203 204 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 205 if (control_state != MLXFW_FSM_STATE_IDLE) 206 return -EBUSY; 207 208 mlxsw_reg_mcc_pack(mcc_pl, 209 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 210 0, *fwhandle, 0); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index, 216 u32 component_size) 217 { 218 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 219 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 221 char mcc_pl[MLXSW_REG_MCC_LEN]; 222 223 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 224 component_index, fwhandle, component_size); 225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 226 } 227 228 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 229 u32 fwhandle, u8 *data, u16 size, 230 u32 offset) 231 { 232 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 233 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 235 char mcda_pl[MLXSW_REG_MCDA_LEN]; 236 237 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 239 } 240 241 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 242 u32 fwhandle, u16 component_index) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcc_pl[MLXSW_REG_MCC_LEN]; 248 249 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 250 component_index, fwhandle, 0); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 252 } 253 254 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 262 fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 267 enum mlxfw_fsm_state *fsm_state, 268 enum mlxfw_fsm_state_err *fsm_state_err) 269 { 270 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 271 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 273 char mcc_pl[MLXSW_REG_MCC_LEN]; 274 u8 control_state; 275 u8 error_code; 276 int err; 277 278 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 279 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 280 if (err) 281 return err; 282 283 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 284 *fsm_state = control_state; 285 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 286 MLXFW_FSM_STATE_ERR_MAX); 287 return 0; 288 } 289 290 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 298 fwhandle, 0); 299 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, 310 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 311 fwhandle, 0); 312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 316 .component_query = mlxsw_sp_component_query, 317 .fsm_lock = mlxsw_sp_fsm_lock, 318 .fsm_component_update = mlxsw_sp_fsm_component_update, 319 .fsm_block_download = mlxsw_sp_fsm_block_download, 320 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 321 .fsm_activate = mlxsw_sp_fsm_activate, 322 .fsm_query_state = mlxsw_sp_fsm_query_state, 323 .fsm_cancel = mlxsw_sp_fsm_cancel, 324 .fsm_release = mlxsw_sp_fsm_release 325 }; 326 327 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 328 const struct firmware *firmware) 329 { 330 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 331 .mlxfw_dev = { 332 .ops = &mlxsw_sp_mlxfw_dev_ops, 333 .psid = mlxsw_sp->bus_info->psid, 334 .psid_size = strlen(mlxsw_sp->bus_info->psid), 335 }, 336 .mlxsw_sp = mlxsw_sp 337 }; 338 339 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 340 } 341 342 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 343 const struct mlxsw_fw_rev *b) 344 { 345 if (a->major != b->major) 346 return a->major > b->major; 347 if (a->minor != b->minor) 348 return a->minor > b->minor; 349 return a->subminor >= b->subminor; 350 } 351 352 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 353 { 354 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 355 const struct firmware *firmware; 356 int err; 357 358 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 359 return 0; 360 361 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 362 rev->major, rev->minor, rev->subminor); 363 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 364 MLXSW_SP_FW_FILENAME); 365 366 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 367 mlxsw_sp->bus_info->dev); 368 if (err) { 369 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 370 MLXSW_SP_FW_FILENAME); 371 return err; 372 } 373 374 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 375 release_firmware(firmware); 376 return err; 377 } 378 379 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 380 unsigned int counter_index, u64 *packets, 381 u64 *bytes) 382 { 383 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 384 int err; 385 386 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 387 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 388 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 389 if (err) 390 return err; 391 if (packets) 392 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 393 if (bytes) 394 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 395 return 0; 396 } 397 398 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 399 unsigned int counter_index) 400 { 401 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 402 403 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 404 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 406 } 407 408 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 409 unsigned int *p_counter_index) 410 { 411 int err; 412 413 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 414 p_counter_index); 415 if (err) 416 return err; 417 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 418 if (err) 419 goto err_counter_clear; 420 return 0; 421 422 err_counter_clear: 423 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 424 *p_counter_index); 425 return err; 426 } 427 428 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 429 unsigned int counter_index) 430 { 431 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 432 counter_index); 433 } 434 435 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 436 const struct mlxsw_tx_info *tx_info) 437 { 438 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 439 440 memset(txhdr, 0, MLXSW_TXHDR_LEN); 441 442 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 443 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 444 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 445 mlxsw_tx_hdr_swid_set(txhdr, 0); 446 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 447 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 448 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 449 } 450 451 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 452 u8 state) 453 { 454 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 455 enum mlxsw_reg_spms_state spms_state; 456 char *spms_pl; 457 int err; 458 459 switch (state) { 460 case BR_STATE_FORWARDING: 461 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 462 break; 463 case BR_STATE_LEARNING: 464 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 465 break; 466 case BR_STATE_LISTENING: /* fall-through */ 467 case BR_STATE_DISABLED: /* fall-through */ 468 case BR_STATE_BLOCKING: 469 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 470 break; 471 default: 472 BUG(); 473 } 474 475 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 476 if (!spms_pl) 477 return -ENOMEM; 478 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 479 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 480 481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 482 kfree(spms_pl); 483 return err; 484 } 485 486 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 487 { 488 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 489 int err; 490 491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 492 if (err) 493 return err; 494 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 495 return 0; 496 } 497 498 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 499 { 500 int i; 501 502 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 503 return -EIO; 504 505 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 506 MAX_SPAN); 507 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 508 sizeof(struct mlxsw_sp_span_entry), 509 GFP_KERNEL); 510 if (!mlxsw_sp->span.entries) 511 return -ENOMEM; 512 513 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 514 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 515 516 return 0; 517 } 518 519 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 520 { 521 int i; 522 523 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 524 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 525 526 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 527 } 528 kfree(mlxsw_sp->span.entries); 529 } 530 531 static struct mlxsw_sp_span_entry * 532 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 533 { 534 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 535 struct mlxsw_sp_span_entry *span_entry; 536 char mpat_pl[MLXSW_REG_MPAT_LEN]; 537 u8 local_port = port->local_port; 538 int index; 539 int i; 540 int err; 541 542 /* find a free entry to use */ 543 index = -1; 544 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 545 if (!mlxsw_sp->span.entries[i].used) { 546 index = i; 547 span_entry = &mlxsw_sp->span.entries[i]; 548 break; 549 } 550 } 551 if (index < 0) 552 return NULL; 553 554 /* create a new port analayzer entry for local_port */ 555 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 556 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 557 if (err) 558 return NULL; 559 560 span_entry->used = true; 561 span_entry->id = index; 562 span_entry->ref_count = 1; 563 span_entry->local_port = local_port; 564 return span_entry; 565 } 566 567 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 568 struct mlxsw_sp_span_entry *span_entry) 569 { 570 u8 local_port = span_entry->local_port; 571 char mpat_pl[MLXSW_REG_MPAT_LEN]; 572 int pa_id = span_entry->id; 573 574 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 575 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 576 span_entry->used = false; 577 } 578 579 static struct mlxsw_sp_span_entry * 580 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 581 { 582 int i; 583 584 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 585 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 586 587 if (curr->used && curr->local_port == local_port) 588 return curr; 589 } 590 return NULL; 591 } 592 593 static struct mlxsw_sp_span_entry 594 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 595 { 596 struct mlxsw_sp_span_entry *span_entry; 597 598 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 599 port->local_port); 600 if (span_entry) { 601 /* Already exists, just take a reference */ 602 span_entry->ref_count++; 603 return span_entry; 604 } 605 606 return mlxsw_sp_span_entry_create(port); 607 } 608 609 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 610 struct mlxsw_sp_span_entry *span_entry) 611 { 612 WARN_ON(!span_entry->ref_count); 613 if (--span_entry->ref_count == 0) 614 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 615 return 0; 616 } 617 618 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 619 { 620 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 621 struct mlxsw_sp_span_inspected_port *p; 622 int i; 623 624 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 625 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 626 627 list_for_each_entry(p, &curr->bound_ports_list, list) 628 if (p->local_port == port->local_port && 629 p->type == MLXSW_SP_SPAN_EGRESS) 630 return true; 631 } 632 633 return false; 634 } 635 636 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 637 int mtu) 638 { 639 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 640 } 641 642 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 643 { 644 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 645 char sbib_pl[MLXSW_REG_SBIB_LEN]; 646 int err; 647 648 /* If port is egress mirrored, the shared buffer size should be 649 * updated according to the mtu value 650 */ 651 if (mlxsw_sp_span_is_egress_mirror(port)) { 652 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 653 654 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 656 if (err) { 657 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 658 return err; 659 } 660 } 661 662 return 0; 663 } 664 665 static struct mlxsw_sp_span_inspected_port * 666 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 667 struct mlxsw_sp_span_entry *span_entry) 668 { 669 struct mlxsw_sp_span_inspected_port *p; 670 671 list_for_each_entry(p, &span_entry->bound_ports_list, list) 672 if (port->local_port == p->local_port) 673 return p; 674 return NULL; 675 } 676 677 static int 678 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 679 struct mlxsw_sp_span_entry *span_entry, 680 enum mlxsw_sp_span_type type) 681 { 682 struct mlxsw_sp_span_inspected_port *inspected_port; 683 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 684 char mpar_pl[MLXSW_REG_MPAR_LEN]; 685 char sbib_pl[MLXSW_REG_SBIB_LEN]; 686 int pa_id = span_entry->id; 687 int err; 688 689 /* if it is an egress SPAN, bind a shared buffer to it */ 690 if (type == MLXSW_SP_SPAN_EGRESS) { 691 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 692 port->dev->mtu); 693 694 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 696 if (err) { 697 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 698 return err; 699 } 700 } 701 702 /* bind the port to the SPAN entry */ 703 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 704 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 706 if (err) 707 goto err_mpar_reg_write; 708 709 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 710 if (!inspected_port) { 711 err = -ENOMEM; 712 goto err_inspected_port_alloc; 713 } 714 inspected_port->local_port = port->local_port; 715 inspected_port->type = type; 716 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 717 718 return 0; 719 720 err_mpar_reg_write: 721 err_inspected_port_alloc: 722 if (type == MLXSW_SP_SPAN_EGRESS) { 723 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 724 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 725 } 726 return err; 727 } 728 729 static void 730 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 731 struct mlxsw_sp_span_entry *span_entry, 732 enum mlxsw_sp_span_type type) 733 { 734 struct mlxsw_sp_span_inspected_port *inspected_port; 735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 736 char mpar_pl[MLXSW_REG_MPAR_LEN]; 737 char sbib_pl[MLXSW_REG_SBIB_LEN]; 738 int pa_id = span_entry->id; 739 740 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 741 if (!inspected_port) 742 return; 743 744 /* remove the inspected port */ 745 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 746 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 748 749 /* remove the SBIB buffer if it was egress SPAN */ 750 if (type == MLXSW_SP_SPAN_EGRESS) { 751 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 752 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 753 } 754 755 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 756 757 list_del(&inspected_port->list); 758 kfree(inspected_port); 759 } 760 761 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 762 struct mlxsw_sp_port *to, 763 enum mlxsw_sp_span_type type) 764 { 765 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 766 struct mlxsw_sp_span_entry *span_entry; 767 int err; 768 769 span_entry = mlxsw_sp_span_entry_get(to); 770 if (!span_entry) 771 return -ENOENT; 772 773 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 774 span_entry->id); 775 776 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 777 if (err) 778 goto err_port_bind; 779 780 return 0; 781 782 err_port_bind: 783 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 784 return err; 785 } 786 787 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 788 u8 destination_port, 789 enum mlxsw_sp_span_type type) 790 { 791 struct mlxsw_sp_span_entry *span_entry; 792 793 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 794 destination_port); 795 if (!span_entry) { 796 netdev_err(from->dev, "no span entry found\n"); 797 return; 798 } 799 800 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 801 span_entry->id); 802 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 803 } 804 805 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 806 bool enable, u32 rate) 807 { 808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 809 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 810 811 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 813 } 814 815 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 816 bool is_up) 817 { 818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 819 char paos_pl[MLXSW_REG_PAOS_LEN]; 820 821 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 822 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 823 MLXSW_PORT_ADMIN_STATUS_DOWN); 824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 825 } 826 827 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 828 unsigned char *addr) 829 { 830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 831 char ppad_pl[MLXSW_REG_PPAD_LEN]; 832 833 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 834 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 836 } 837 838 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 839 { 840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 841 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 842 843 ether_addr_copy(addr, mlxsw_sp->base_mac); 844 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 845 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 846 } 847 848 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 849 { 850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 851 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 852 int max_mtu; 853 int err; 854 855 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 856 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 857 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 858 if (err) 859 return err; 860 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 861 862 if (mtu > max_mtu) 863 return -EINVAL; 864 865 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 867 } 868 869 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 870 { 871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 872 char pspa_pl[MLXSW_REG_PSPA_LEN]; 873 874 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 875 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 876 } 877 878 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 879 { 880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 881 char svpe_pl[MLXSW_REG_SVPE_LEN]; 882 883 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 885 } 886 887 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 888 bool learn_enable) 889 { 890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 891 char *spvmlr_pl; 892 int err; 893 894 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 895 if (!spvmlr_pl) 896 return -ENOMEM; 897 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 898 learn_enable); 899 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 900 kfree(spvmlr_pl); 901 return err; 902 } 903 904 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 905 u16 vid) 906 { 907 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 908 char spvid_pl[MLXSW_REG_SPVID_LEN]; 909 910 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 912 } 913 914 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 915 bool allow) 916 { 917 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 918 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 919 920 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 921 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 922 } 923 924 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 925 { 926 int err; 927 928 if (!vid) { 929 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 930 if (err) 931 return err; 932 } else { 933 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 934 if (err) 935 return err; 936 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 937 if (err) 938 goto err_port_allow_untagged_set; 939 } 940 941 mlxsw_sp_port->pvid = vid; 942 return 0; 943 944 err_port_allow_untagged_set: 945 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 946 return err; 947 } 948 949 static int 950 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 951 { 952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 953 char sspr_pl[MLXSW_REG_SSPR_LEN]; 954 955 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 957 } 958 959 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 960 u8 local_port, u8 *p_module, 961 u8 *p_width, u8 *p_lane) 962 { 963 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 964 int err; 965 966 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 967 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 968 if (err) 969 return err; 970 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 971 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 972 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 973 return 0; 974 } 975 976 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 977 u8 module, u8 width, u8 lane) 978 { 979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 980 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 981 int i; 982 983 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 984 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 985 for (i = 0; i < width; i++) { 986 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 987 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 988 } 989 990 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 991 } 992 993 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 994 { 995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 996 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 997 998 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 999 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 1000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 1001 } 1002 1003 static int mlxsw_sp_port_open(struct net_device *dev) 1004 { 1005 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1006 int err; 1007 1008 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1009 if (err) 1010 return err; 1011 netif_start_queue(dev); 1012 return 0; 1013 } 1014 1015 static int mlxsw_sp_port_stop(struct net_device *dev) 1016 { 1017 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1018 1019 netif_stop_queue(dev); 1020 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1021 } 1022 1023 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1024 struct net_device *dev) 1025 { 1026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1028 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1029 const struct mlxsw_tx_info tx_info = { 1030 .local_port = mlxsw_sp_port->local_port, 1031 .is_emad = false, 1032 }; 1033 u64 len; 1034 int err; 1035 1036 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1037 return NETDEV_TX_BUSY; 1038 1039 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1040 struct sk_buff *skb_orig = skb; 1041 1042 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1043 if (!skb) { 1044 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1045 dev_kfree_skb_any(skb_orig); 1046 return NETDEV_TX_OK; 1047 } 1048 dev_consume_skb_any(skb_orig); 1049 } 1050 1051 if (eth_skb_pad(skb)) { 1052 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1053 return NETDEV_TX_OK; 1054 } 1055 1056 mlxsw_sp_txhdr_construct(skb, &tx_info); 1057 /* TX header is consumed by HW on the way so we shouldn't count its 1058 * bytes as being sent. 1059 */ 1060 len = skb->len - MLXSW_TXHDR_LEN; 1061 1062 /* Due to a race we might fail here because of a full queue. In that 1063 * unlikely case we simply drop the packet. 1064 */ 1065 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1066 1067 if (!err) { 1068 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1069 u64_stats_update_begin(&pcpu_stats->syncp); 1070 pcpu_stats->tx_packets++; 1071 pcpu_stats->tx_bytes += len; 1072 u64_stats_update_end(&pcpu_stats->syncp); 1073 } else { 1074 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1075 dev_kfree_skb_any(skb); 1076 } 1077 return NETDEV_TX_OK; 1078 } 1079 1080 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1081 { 1082 } 1083 1084 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1085 { 1086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1087 struct sockaddr *addr = p; 1088 int err; 1089 1090 if (!is_valid_ether_addr(addr->sa_data)) 1091 return -EADDRNOTAVAIL; 1092 1093 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1094 if (err) 1095 return err; 1096 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1097 return 0; 1098 } 1099 1100 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1101 int mtu) 1102 { 1103 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1104 } 1105 1106 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1107 1108 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1109 u16 delay) 1110 { 1111 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1112 BITS_PER_BYTE)); 1113 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1114 mtu); 1115 } 1116 1117 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1118 * Assumes 100m cable and maximum MTU. 1119 */ 1120 #define MLXSW_SP_PAUSE_DELAY 58752 1121 1122 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1123 u16 delay, bool pfc, bool pause) 1124 { 1125 if (pfc) 1126 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1127 else if (pause) 1128 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1129 else 1130 return 0; 1131 } 1132 1133 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1134 bool lossy) 1135 { 1136 if (lossy) 1137 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1138 else 1139 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1140 thres); 1141 } 1142 1143 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1144 u8 *prio_tc, bool pause_en, 1145 struct ieee_pfc *my_pfc) 1146 { 1147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1148 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1149 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1150 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1151 int i, j, err; 1152 1153 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1155 if (err) 1156 return err; 1157 1158 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1159 bool configure = false; 1160 bool pfc = false; 1161 bool lossy; 1162 u16 thres; 1163 1164 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1165 if (prio_tc[j] == i) { 1166 pfc = pfc_en & BIT(j); 1167 configure = true; 1168 break; 1169 } 1170 } 1171 1172 if (!configure) 1173 continue; 1174 1175 lossy = !(pfc || pause_en); 1176 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1177 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1178 pause_en); 1179 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1180 } 1181 1182 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1183 } 1184 1185 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1186 int mtu, bool pause_en) 1187 { 1188 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1189 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1190 struct ieee_pfc *my_pfc; 1191 u8 *prio_tc; 1192 1193 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1194 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1195 1196 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1197 pause_en, my_pfc); 1198 } 1199 1200 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1201 { 1202 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1203 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1204 int err; 1205 1206 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1207 if (err) 1208 return err; 1209 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1210 if (err) 1211 goto err_span_port_mtu_update; 1212 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1213 if (err) 1214 goto err_port_mtu_set; 1215 dev->mtu = mtu; 1216 return 0; 1217 1218 err_port_mtu_set: 1219 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1220 err_span_port_mtu_update: 1221 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1222 return err; 1223 } 1224 1225 static int 1226 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1227 struct rtnl_link_stats64 *stats) 1228 { 1229 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1230 struct mlxsw_sp_port_pcpu_stats *p; 1231 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1232 u32 tx_dropped = 0; 1233 unsigned int start; 1234 int i; 1235 1236 for_each_possible_cpu(i) { 1237 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1238 do { 1239 start = u64_stats_fetch_begin_irq(&p->syncp); 1240 rx_packets = p->rx_packets; 1241 rx_bytes = p->rx_bytes; 1242 tx_packets = p->tx_packets; 1243 tx_bytes = p->tx_bytes; 1244 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1245 1246 stats->rx_packets += rx_packets; 1247 stats->rx_bytes += rx_bytes; 1248 stats->tx_packets += tx_packets; 1249 stats->tx_bytes += tx_bytes; 1250 /* tx_dropped is u32, updated without syncp protection. */ 1251 tx_dropped += p->tx_dropped; 1252 } 1253 stats->tx_dropped = tx_dropped; 1254 return 0; 1255 } 1256 1257 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1258 { 1259 switch (attr_id) { 1260 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1268 void *sp) 1269 { 1270 switch (attr_id) { 1271 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1272 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1273 } 1274 1275 return -EINVAL; 1276 } 1277 1278 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1279 int prio, char *ppcnt_pl) 1280 { 1281 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1283 1284 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1285 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1286 } 1287 1288 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1289 struct rtnl_link_stats64 *stats) 1290 { 1291 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1292 int err; 1293 1294 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1295 0, ppcnt_pl); 1296 if (err) 1297 goto out; 1298 1299 stats->tx_packets = 1300 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1301 stats->rx_packets = 1302 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1303 stats->tx_bytes = 1304 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1305 stats->rx_bytes = 1306 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1307 stats->multicast = 1308 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1309 1310 stats->rx_crc_errors = 1311 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1312 stats->rx_frame_errors = 1313 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1314 1315 stats->rx_length_errors = ( 1316 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1317 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1318 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1319 1320 stats->rx_errors = (stats->rx_crc_errors + 1321 stats->rx_frame_errors + stats->rx_length_errors); 1322 1323 out: 1324 return err; 1325 } 1326 1327 static void 1328 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1329 struct mlxsw_sp_port_xstats *xstats) 1330 { 1331 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1332 int err, i; 1333 1334 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1335 ppcnt_pl); 1336 if (!err) 1337 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1338 1339 for (i = 0; i < TC_MAX_QUEUE; i++) { 1340 err = mlxsw_sp_port_get_stats_raw(dev, 1341 MLXSW_REG_PPCNT_TC_CONG_TC, 1342 i, ppcnt_pl); 1343 if (!err) 1344 xstats->wred_drop[i] = 1345 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1346 1347 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1348 i, ppcnt_pl); 1349 if (err) 1350 continue; 1351 1352 xstats->backlog[i] = 1353 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1354 xstats->tail_drop[i] = 1355 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1356 } 1357 } 1358 1359 static void update_stats_cache(struct work_struct *work) 1360 { 1361 struct mlxsw_sp_port *mlxsw_sp_port = 1362 container_of(work, struct mlxsw_sp_port, 1363 periodic_hw_stats.update_dw.work); 1364 1365 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1366 goto out; 1367 1368 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1369 &mlxsw_sp_port->periodic_hw_stats.stats); 1370 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1371 &mlxsw_sp_port->periodic_hw_stats.xstats); 1372 1373 out: 1374 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1375 MLXSW_HW_STATS_UPDATE_TIME); 1376 } 1377 1378 /* Return the stats from a cache that is updated periodically, 1379 * as this function might get called in an atomic context. 1380 */ 1381 static void 1382 mlxsw_sp_port_get_stats64(struct net_device *dev, 1383 struct rtnl_link_stats64 *stats) 1384 { 1385 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1386 1387 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1388 } 1389 1390 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1391 u16 vid_begin, u16 vid_end, 1392 bool is_member, bool untagged) 1393 { 1394 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1395 char *spvm_pl; 1396 int err; 1397 1398 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1399 if (!spvm_pl) 1400 return -ENOMEM; 1401 1402 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1403 vid_end, is_member, untagged); 1404 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1405 kfree(spvm_pl); 1406 return err; 1407 } 1408 1409 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1410 u16 vid_end, bool is_member, bool untagged) 1411 { 1412 u16 vid, vid_e; 1413 int err; 1414 1415 for (vid = vid_begin; vid <= vid_end; 1416 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1417 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1418 vid_end); 1419 1420 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1421 is_member, untagged); 1422 if (err) 1423 return err; 1424 } 1425 1426 return 0; 1427 } 1428 1429 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1430 { 1431 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1432 1433 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1434 &mlxsw_sp_port->vlans_list, list) 1435 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1436 } 1437 1438 static struct mlxsw_sp_port_vlan * 1439 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1440 { 1441 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1442 bool untagged = vid == 1; 1443 int err; 1444 1445 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1446 if (err) 1447 return ERR_PTR(err); 1448 1449 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1450 if (!mlxsw_sp_port_vlan) { 1451 err = -ENOMEM; 1452 goto err_port_vlan_alloc; 1453 } 1454 1455 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1456 mlxsw_sp_port_vlan->vid = vid; 1457 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1458 1459 return mlxsw_sp_port_vlan; 1460 1461 err_port_vlan_alloc: 1462 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1463 return ERR_PTR(err); 1464 } 1465 1466 static void 1467 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1468 { 1469 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1470 u16 vid = mlxsw_sp_port_vlan->vid; 1471 1472 list_del(&mlxsw_sp_port_vlan->list); 1473 kfree(mlxsw_sp_port_vlan); 1474 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1475 } 1476 1477 struct mlxsw_sp_port_vlan * 1478 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1479 { 1480 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1481 1482 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1483 if (mlxsw_sp_port_vlan) 1484 return mlxsw_sp_port_vlan; 1485 1486 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1487 } 1488 1489 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1490 { 1491 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1492 1493 if (mlxsw_sp_port_vlan->bridge_port) 1494 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1495 else if (fid) 1496 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1497 1498 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1499 } 1500 1501 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1502 __be16 __always_unused proto, u16 vid) 1503 { 1504 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1505 1506 /* VLAN 0 is added to HW filter when device goes up, but it is 1507 * reserved in our case, so simply return. 1508 */ 1509 if (!vid) 1510 return 0; 1511 1512 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1513 } 1514 1515 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1516 __be16 __always_unused proto, u16 vid) 1517 { 1518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1519 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1520 1521 /* VLAN 0 is removed from HW filter when device goes down, but 1522 * it is reserved in our case, so simply return. 1523 */ 1524 if (!vid) 1525 return 0; 1526 1527 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1528 if (!mlxsw_sp_port_vlan) 1529 return 0; 1530 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1531 1532 return 0; 1533 } 1534 1535 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1536 size_t len) 1537 { 1538 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1539 u8 module = mlxsw_sp_port->mapping.module; 1540 u8 width = mlxsw_sp_port->mapping.width; 1541 u8 lane = mlxsw_sp_port->mapping.lane; 1542 int err; 1543 1544 if (!mlxsw_sp_port->split) 1545 err = snprintf(name, len, "p%d", module + 1); 1546 else 1547 err = snprintf(name, len, "p%ds%d", module + 1, 1548 lane / width); 1549 1550 if (err >= len) 1551 return -EINVAL; 1552 1553 return 0; 1554 } 1555 1556 static struct mlxsw_sp_port_mall_tc_entry * 1557 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1558 unsigned long cookie) { 1559 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1560 1561 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1562 if (mall_tc_entry->cookie == cookie) 1563 return mall_tc_entry; 1564 1565 return NULL; 1566 } 1567 1568 static int 1569 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1570 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1571 const struct tc_action *a, 1572 bool ingress) 1573 { 1574 struct net *net = dev_net(mlxsw_sp_port->dev); 1575 enum mlxsw_sp_span_type span_type; 1576 struct mlxsw_sp_port *to_port; 1577 struct net_device *to_dev; 1578 int ifindex; 1579 1580 ifindex = tcf_mirred_ifindex(a); 1581 to_dev = __dev_get_by_index(net, ifindex); 1582 if (!to_dev) { 1583 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1584 return -EINVAL; 1585 } 1586 1587 if (!mlxsw_sp_port_dev_check(to_dev)) { 1588 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1589 return -EOPNOTSUPP; 1590 } 1591 to_port = netdev_priv(to_dev); 1592 1593 mirror->to_local_port = to_port->local_port; 1594 mirror->ingress = ingress; 1595 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1596 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1597 } 1598 1599 static void 1600 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1601 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1602 { 1603 enum mlxsw_sp_span_type span_type; 1604 1605 span_type = mirror->ingress ? 1606 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1607 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, 1608 span_type); 1609 } 1610 1611 static int 1612 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1613 struct tc_cls_matchall_offload *cls, 1614 const struct tc_action *a, 1615 bool ingress) 1616 { 1617 int err; 1618 1619 if (!mlxsw_sp_port->sample) 1620 return -EOPNOTSUPP; 1621 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1622 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1623 return -EEXIST; 1624 } 1625 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1626 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1627 return -EOPNOTSUPP; 1628 } 1629 1630 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1631 tcf_sample_psample_group(a)); 1632 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1633 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1634 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1635 1636 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1637 if (err) 1638 goto err_port_sample_set; 1639 return 0; 1640 1641 err_port_sample_set: 1642 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1643 return err; 1644 } 1645 1646 static void 1647 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1648 { 1649 if (!mlxsw_sp_port->sample) 1650 return; 1651 1652 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1653 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1654 } 1655 1656 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1657 struct tc_cls_matchall_offload *f, 1658 bool ingress) 1659 { 1660 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1661 __be16 protocol = f->common.protocol; 1662 const struct tc_action *a; 1663 LIST_HEAD(actions); 1664 int err; 1665 1666 if (!tcf_exts_has_one_action(f->exts)) { 1667 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1668 return -EOPNOTSUPP; 1669 } 1670 1671 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1672 if (!mall_tc_entry) 1673 return -ENOMEM; 1674 mall_tc_entry->cookie = f->cookie; 1675 1676 tcf_exts_to_list(f->exts, &actions); 1677 a = list_first_entry(&actions, struct tc_action, list); 1678 1679 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1680 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1681 1682 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1683 mirror = &mall_tc_entry->mirror; 1684 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1685 mirror, a, ingress); 1686 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1687 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1688 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1689 a, ingress); 1690 } else { 1691 err = -EOPNOTSUPP; 1692 } 1693 1694 if (err) 1695 goto err_add_action; 1696 1697 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1698 return 0; 1699 1700 err_add_action: 1701 kfree(mall_tc_entry); 1702 return err; 1703 } 1704 1705 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1706 struct tc_cls_matchall_offload *f) 1707 { 1708 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1709 1710 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1711 f->cookie); 1712 if (!mall_tc_entry) { 1713 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1714 return; 1715 } 1716 list_del(&mall_tc_entry->list); 1717 1718 switch (mall_tc_entry->type) { 1719 case MLXSW_SP_PORT_MALL_MIRROR: 1720 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1721 &mall_tc_entry->mirror); 1722 break; 1723 case MLXSW_SP_PORT_MALL_SAMPLE: 1724 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1725 break; 1726 default: 1727 WARN_ON(1); 1728 } 1729 1730 kfree(mall_tc_entry); 1731 } 1732 1733 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1734 struct tc_cls_matchall_offload *f, 1735 bool ingress) 1736 { 1737 if (f->common.chain_index) 1738 return -EOPNOTSUPP; 1739 1740 switch (f->command) { 1741 case TC_CLSMATCHALL_REPLACE: 1742 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1743 ingress); 1744 case TC_CLSMATCHALL_DESTROY: 1745 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1746 return 0; 1747 default: 1748 return -EOPNOTSUPP; 1749 } 1750 } 1751 1752 static int 1753 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, 1754 struct tc_cls_flower_offload *f, 1755 bool ingress) 1756 { 1757 switch (f->command) { 1758 case TC_CLSFLOWER_REPLACE: 1759 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); 1760 case TC_CLSFLOWER_DESTROY: 1761 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); 1762 return 0; 1763 case TC_CLSFLOWER_STATS: 1764 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); 1765 default: 1766 return -EOPNOTSUPP; 1767 } 1768 } 1769 1770 static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1771 void *cb_priv, bool ingress) 1772 { 1773 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1774 1775 if (!tc_can_offload(mlxsw_sp_port->dev)) 1776 return -EOPNOTSUPP; 1777 1778 switch (type) { 1779 case TC_SETUP_CLSMATCHALL: 1780 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1781 ingress); 1782 case TC_SETUP_CLSFLOWER: 1783 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data, 1784 ingress); 1785 default: 1786 return -EOPNOTSUPP; 1787 } 1788 } 1789 1790 static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type, 1791 void *type_data, void *cb_priv) 1792 { 1793 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true); 1794 } 1795 1796 static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type, 1797 void *type_data, void *cb_priv) 1798 { 1799 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false); 1800 } 1801 1802 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1803 struct tc_block_offload *f) 1804 { 1805 tc_setup_cb_t *cb; 1806 1807 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1808 cb = mlxsw_sp_setup_tc_block_cb_ig; 1809 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1810 cb = mlxsw_sp_setup_tc_block_cb_eg; 1811 else 1812 return -EOPNOTSUPP; 1813 1814 switch (f->command) { 1815 case TC_BLOCK_BIND: 1816 return tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1817 mlxsw_sp_port); 1818 case TC_BLOCK_UNBIND: 1819 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1820 return 0; 1821 default: 1822 return -EOPNOTSUPP; 1823 } 1824 } 1825 1826 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1827 void *type_data) 1828 { 1829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1830 1831 switch (type) { 1832 case TC_SETUP_BLOCK: 1833 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1834 case TC_SETUP_QDISC_RED: 1835 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1836 default: 1837 return -EOPNOTSUPP; 1838 } 1839 } 1840 1841 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1842 .ndo_open = mlxsw_sp_port_open, 1843 .ndo_stop = mlxsw_sp_port_stop, 1844 .ndo_start_xmit = mlxsw_sp_port_xmit, 1845 .ndo_setup_tc = mlxsw_sp_setup_tc, 1846 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1847 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1848 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1849 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1850 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1851 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1852 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1853 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1854 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1855 }; 1856 1857 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1858 struct ethtool_drvinfo *drvinfo) 1859 { 1860 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1861 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1862 1863 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1864 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1865 sizeof(drvinfo->version)); 1866 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1867 "%d.%d.%d", 1868 mlxsw_sp->bus_info->fw_rev.major, 1869 mlxsw_sp->bus_info->fw_rev.minor, 1870 mlxsw_sp->bus_info->fw_rev.subminor); 1871 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1872 sizeof(drvinfo->bus_info)); 1873 } 1874 1875 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1876 struct ethtool_pauseparam *pause) 1877 { 1878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1879 1880 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1881 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1882 } 1883 1884 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1885 struct ethtool_pauseparam *pause) 1886 { 1887 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1888 1889 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1890 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1891 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1892 1893 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1894 pfcc_pl); 1895 } 1896 1897 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1898 struct ethtool_pauseparam *pause) 1899 { 1900 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1901 bool pause_en = pause->tx_pause || pause->rx_pause; 1902 int err; 1903 1904 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1905 netdev_err(dev, "PFC already enabled on port\n"); 1906 return -EINVAL; 1907 } 1908 1909 if (pause->autoneg) { 1910 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1911 return -EINVAL; 1912 } 1913 1914 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1915 if (err) { 1916 netdev_err(dev, "Failed to configure port's headroom\n"); 1917 return err; 1918 } 1919 1920 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1921 if (err) { 1922 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1923 goto err_port_pause_configure; 1924 } 1925 1926 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1927 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1928 1929 return 0; 1930 1931 err_port_pause_configure: 1932 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1933 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1934 return err; 1935 } 1936 1937 struct mlxsw_sp_port_hw_stats { 1938 char str[ETH_GSTRING_LEN]; 1939 u64 (*getter)(const char *payload); 1940 bool cells_bytes; 1941 }; 1942 1943 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1944 { 1945 .str = "a_frames_transmitted_ok", 1946 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1947 }, 1948 { 1949 .str = "a_frames_received_ok", 1950 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1951 }, 1952 { 1953 .str = "a_frame_check_sequence_errors", 1954 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1955 }, 1956 { 1957 .str = "a_alignment_errors", 1958 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1959 }, 1960 { 1961 .str = "a_octets_transmitted_ok", 1962 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1963 }, 1964 { 1965 .str = "a_octets_received_ok", 1966 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1967 }, 1968 { 1969 .str = "a_multicast_frames_xmitted_ok", 1970 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1971 }, 1972 { 1973 .str = "a_broadcast_frames_xmitted_ok", 1974 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1975 }, 1976 { 1977 .str = "a_multicast_frames_received_ok", 1978 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1979 }, 1980 { 1981 .str = "a_broadcast_frames_received_ok", 1982 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1983 }, 1984 { 1985 .str = "a_in_range_length_errors", 1986 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1987 }, 1988 { 1989 .str = "a_out_of_range_length_field", 1990 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1991 }, 1992 { 1993 .str = "a_frame_too_long_errors", 1994 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1995 }, 1996 { 1997 .str = "a_symbol_error_during_carrier", 1998 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1999 }, 2000 { 2001 .str = "a_mac_control_frames_transmitted", 2002 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2003 }, 2004 { 2005 .str = "a_mac_control_frames_received", 2006 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2007 }, 2008 { 2009 .str = "a_unsupported_opcodes_received", 2010 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2011 }, 2012 { 2013 .str = "a_pause_mac_ctrl_frames_received", 2014 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2015 }, 2016 { 2017 .str = "a_pause_mac_ctrl_frames_xmitted", 2018 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2019 }, 2020 }; 2021 2022 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2023 2024 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2025 { 2026 .str = "rx_octets_prio", 2027 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2028 }, 2029 { 2030 .str = "rx_frames_prio", 2031 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2032 }, 2033 { 2034 .str = "tx_octets_prio", 2035 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2036 }, 2037 { 2038 .str = "tx_frames_prio", 2039 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2040 }, 2041 { 2042 .str = "rx_pause_prio", 2043 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2044 }, 2045 { 2046 .str = "rx_pause_duration_prio", 2047 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2048 }, 2049 { 2050 .str = "tx_pause_prio", 2051 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2052 }, 2053 { 2054 .str = "tx_pause_duration_prio", 2055 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2056 }, 2057 }; 2058 2059 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2060 2061 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2062 { 2063 .str = "tc_transmit_queue_tc", 2064 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2065 .cells_bytes = true, 2066 }, 2067 { 2068 .str = "tc_no_buffer_discard_uc_tc", 2069 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2070 }, 2071 }; 2072 2073 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2074 2075 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2076 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2077 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2078 IEEE_8021QAZ_MAX_TCS) 2079 2080 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2081 { 2082 int i; 2083 2084 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2085 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2086 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2087 *p += ETH_GSTRING_LEN; 2088 } 2089 } 2090 2091 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2092 { 2093 int i; 2094 2095 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2096 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2097 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2098 *p += ETH_GSTRING_LEN; 2099 } 2100 } 2101 2102 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2103 u32 stringset, u8 *data) 2104 { 2105 u8 *p = data; 2106 int i; 2107 2108 switch (stringset) { 2109 case ETH_SS_STATS: 2110 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2111 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2112 ETH_GSTRING_LEN); 2113 p += ETH_GSTRING_LEN; 2114 } 2115 2116 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2117 mlxsw_sp_port_get_prio_strings(&p, i); 2118 2119 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2120 mlxsw_sp_port_get_tc_strings(&p, i); 2121 2122 break; 2123 } 2124 } 2125 2126 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2127 enum ethtool_phys_id_state state) 2128 { 2129 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2131 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2132 bool active; 2133 2134 switch (state) { 2135 case ETHTOOL_ID_ACTIVE: 2136 active = true; 2137 break; 2138 case ETHTOOL_ID_INACTIVE: 2139 active = false; 2140 break; 2141 default: 2142 return -EOPNOTSUPP; 2143 } 2144 2145 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2146 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2147 } 2148 2149 static int 2150 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2151 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2152 { 2153 switch (grp) { 2154 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2155 *p_hw_stats = mlxsw_sp_port_hw_stats; 2156 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2157 break; 2158 case MLXSW_REG_PPCNT_PRIO_CNT: 2159 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2160 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2161 break; 2162 case MLXSW_REG_PPCNT_TC_CNT: 2163 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2164 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2165 break; 2166 default: 2167 WARN_ON(1); 2168 return -EOPNOTSUPP; 2169 } 2170 return 0; 2171 } 2172 2173 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2174 enum mlxsw_reg_ppcnt_grp grp, int prio, 2175 u64 *data, int data_index) 2176 { 2177 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2178 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2179 struct mlxsw_sp_port_hw_stats *hw_stats; 2180 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2181 int i, len; 2182 int err; 2183 2184 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2185 if (err) 2186 return; 2187 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2188 for (i = 0; i < len; i++) { 2189 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2190 if (!hw_stats[i].cells_bytes) 2191 continue; 2192 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2193 data[data_index + i]); 2194 } 2195 } 2196 2197 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2198 struct ethtool_stats *stats, u64 *data) 2199 { 2200 int i, data_index = 0; 2201 2202 /* IEEE 802.3 Counters */ 2203 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2204 data, data_index); 2205 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2206 2207 /* Per-Priority Counters */ 2208 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2209 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2210 data, data_index); 2211 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2212 } 2213 2214 /* Per-TC Counters */ 2215 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2216 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2217 data, data_index); 2218 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2219 } 2220 } 2221 2222 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2223 { 2224 switch (sset) { 2225 case ETH_SS_STATS: 2226 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2227 default: 2228 return -EOPNOTSUPP; 2229 } 2230 } 2231 2232 struct mlxsw_sp_port_link_mode { 2233 enum ethtool_link_mode_bit_indices mask_ethtool; 2234 u32 mask; 2235 u32 speed; 2236 }; 2237 2238 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2239 { 2240 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2241 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2242 .speed = SPEED_100, 2243 }, 2244 { 2245 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2246 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2247 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2248 .speed = SPEED_1000, 2249 }, 2250 { 2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2252 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2253 .speed = SPEED_10000, 2254 }, 2255 { 2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2257 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2258 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2259 .speed = SPEED_10000, 2260 }, 2261 { 2262 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2263 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2264 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2265 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2266 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2267 .speed = SPEED_10000, 2268 }, 2269 { 2270 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2271 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2272 .speed = SPEED_20000, 2273 }, 2274 { 2275 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2276 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2277 .speed = SPEED_40000, 2278 }, 2279 { 2280 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2281 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2282 .speed = SPEED_40000, 2283 }, 2284 { 2285 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2286 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2287 .speed = SPEED_40000, 2288 }, 2289 { 2290 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2291 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2292 .speed = SPEED_40000, 2293 }, 2294 { 2295 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2296 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2297 .speed = SPEED_25000, 2298 }, 2299 { 2300 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2301 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2302 .speed = SPEED_25000, 2303 }, 2304 { 2305 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2306 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2307 .speed = SPEED_25000, 2308 }, 2309 { 2310 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2311 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2312 .speed = SPEED_25000, 2313 }, 2314 { 2315 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2316 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2317 .speed = SPEED_50000, 2318 }, 2319 { 2320 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2321 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2322 .speed = SPEED_50000, 2323 }, 2324 { 2325 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2326 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2327 .speed = SPEED_50000, 2328 }, 2329 { 2330 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2331 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2332 .speed = SPEED_56000, 2333 }, 2334 { 2335 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2336 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2337 .speed = SPEED_56000, 2338 }, 2339 { 2340 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2341 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2342 .speed = SPEED_56000, 2343 }, 2344 { 2345 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2346 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2347 .speed = SPEED_56000, 2348 }, 2349 { 2350 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2351 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2352 .speed = SPEED_100000, 2353 }, 2354 { 2355 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2356 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2357 .speed = SPEED_100000, 2358 }, 2359 { 2360 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2361 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2362 .speed = SPEED_100000, 2363 }, 2364 { 2365 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2366 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2367 .speed = SPEED_100000, 2368 }, 2369 }; 2370 2371 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2372 2373 static void 2374 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2375 struct ethtool_link_ksettings *cmd) 2376 { 2377 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2378 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2379 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2380 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2381 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2382 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2383 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2384 2385 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2386 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2387 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2388 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2389 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2390 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2391 } 2392 2393 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2394 { 2395 int i; 2396 2397 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2398 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2399 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2400 mode); 2401 } 2402 } 2403 2404 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2405 struct ethtool_link_ksettings *cmd) 2406 { 2407 u32 speed = SPEED_UNKNOWN; 2408 u8 duplex = DUPLEX_UNKNOWN; 2409 int i; 2410 2411 if (!carrier_ok) 2412 goto out; 2413 2414 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2415 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2416 speed = mlxsw_sp_port_link_mode[i].speed; 2417 duplex = DUPLEX_FULL; 2418 break; 2419 } 2420 } 2421 out: 2422 cmd->base.speed = speed; 2423 cmd->base.duplex = duplex; 2424 } 2425 2426 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2427 { 2428 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2429 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2430 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2431 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2432 return PORT_FIBRE; 2433 2434 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2435 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2436 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2437 return PORT_DA; 2438 2439 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2440 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2441 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2442 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2443 return PORT_NONE; 2444 2445 return PORT_OTHER; 2446 } 2447 2448 static u32 2449 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2450 { 2451 u32 ptys_proto = 0; 2452 int i; 2453 2454 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2455 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2456 cmd->link_modes.advertising)) 2457 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2458 } 2459 return ptys_proto; 2460 } 2461 2462 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2463 { 2464 u32 ptys_proto = 0; 2465 int i; 2466 2467 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2468 if (speed == mlxsw_sp_port_link_mode[i].speed) 2469 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2470 } 2471 return ptys_proto; 2472 } 2473 2474 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2475 { 2476 u32 ptys_proto = 0; 2477 int i; 2478 2479 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2480 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2481 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2482 } 2483 return ptys_proto; 2484 } 2485 2486 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2487 struct ethtool_link_ksettings *cmd) 2488 { 2489 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2490 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2491 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2492 2493 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2494 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2495 } 2496 2497 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2498 struct ethtool_link_ksettings *cmd) 2499 { 2500 if (!autoneg) 2501 return; 2502 2503 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2504 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2505 } 2506 2507 static void 2508 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2509 struct ethtool_link_ksettings *cmd) 2510 { 2511 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2512 return; 2513 2514 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2515 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2516 } 2517 2518 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2519 struct ethtool_link_ksettings *cmd) 2520 { 2521 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2522 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2524 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2525 u8 autoneg_status; 2526 bool autoneg; 2527 int err; 2528 2529 autoneg = mlxsw_sp_port->link.autoneg; 2530 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2531 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2532 if (err) 2533 return err; 2534 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2535 ð_proto_oper); 2536 2537 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2538 2539 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2540 2541 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2542 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2543 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2544 2545 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2546 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2547 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2548 cmd); 2549 2550 return 0; 2551 } 2552 2553 static int 2554 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2555 const struct ethtool_link_ksettings *cmd) 2556 { 2557 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2559 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2560 u32 eth_proto_cap, eth_proto_new; 2561 bool autoneg; 2562 int err; 2563 2564 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2565 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2566 if (err) 2567 return err; 2568 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2569 2570 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2571 eth_proto_new = autoneg ? 2572 mlxsw_sp_to_ptys_advert_link(cmd) : 2573 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2574 2575 eth_proto_new = eth_proto_new & eth_proto_cap; 2576 if (!eth_proto_new) { 2577 netdev_err(dev, "No supported speed requested\n"); 2578 return -EINVAL; 2579 } 2580 2581 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2582 eth_proto_new); 2583 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2584 if (err) 2585 return err; 2586 2587 if (!netif_running(dev)) 2588 return 0; 2589 2590 mlxsw_sp_port->link.autoneg = autoneg; 2591 2592 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2593 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2594 2595 return 0; 2596 } 2597 2598 static int mlxsw_sp_flash_device(struct net_device *dev, 2599 struct ethtool_flash *flash) 2600 { 2601 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2603 const struct firmware *firmware; 2604 int err; 2605 2606 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2607 return -EOPNOTSUPP; 2608 2609 dev_hold(dev); 2610 rtnl_unlock(); 2611 2612 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2613 if (err) 2614 goto out; 2615 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2616 release_firmware(firmware); 2617 out: 2618 rtnl_lock(); 2619 dev_put(dev); 2620 return err; 2621 } 2622 2623 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2624 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2625 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2626 2627 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2628 u16 offset, u16 size, void *data, 2629 unsigned int *p_read_size) 2630 { 2631 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2632 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2633 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2634 u16 i2c_addr; 2635 int status; 2636 int err; 2637 2638 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2639 2640 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2641 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2642 /* Cross pages read, read until offset 256 in low page */ 2643 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2644 2645 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2646 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2647 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2648 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2649 } 2650 2651 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2652 0, 0, offset, size, i2c_addr); 2653 2654 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2655 if (err) 2656 return err; 2657 2658 status = mlxsw_reg_mcia_status_get(mcia_pl); 2659 if (status) 2660 return -EIO; 2661 2662 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2663 memcpy(data, eeprom_tmp, size); 2664 *p_read_size = size; 2665 2666 return 0; 2667 } 2668 2669 enum mlxsw_sp_eeprom_module_info_rev_id { 2670 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2671 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2672 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2673 }; 2674 2675 enum mlxsw_sp_eeprom_module_info_id { 2676 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2677 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2678 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2679 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2680 }; 2681 2682 enum mlxsw_sp_eeprom_module_info { 2683 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2684 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2685 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2686 }; 2687 2688 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2689 struct ethtool_modinfo *modinfo) 2690 { 2691 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2692 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2693 u8 module_rev_id, module_id; 2694 unsigned int read_size; 2695 int err; 2696 2697 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2698 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2699 module_info, &read_size); 2700 if (err) 2701 return err; 2702 2703 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2704 return -EIO; 2705 2706 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2707 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2708 2709 switch (module_id) { 2710 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2711 modinfo->type = ETH_MODULE_SFF_8436; 2712 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2713 break; 2714 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2715 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2716 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2717 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2718 modinfo->type = ETH_MODULE_SFF_8636; 2719 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2720 } else { 2721 modinfo->type = ETH_MODULE_SFF_8436; 2722 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2723 } 2724 break; 2725 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2726 modinfo->type = ETH_MODULE_SFF_8472; 2727 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2728 break; 2729 default: 2730 return -EINVAL; 2731 } 2732 2733 return 0; 2734 } 2735 2736 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2737 struct ethtool_eeprom *ee, 2738 u8 *data) 2739 { 2740 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2741 int offset = ee->offset; 2742 unsigned int read_size; 2743 int i = 0; 2744 int err; 2745 2746 if (!ee->len) 2747 return -EINVAL; 2748 2749 memset(data, 0, ee->len); 2750 2751 while (i < ee->len) { 2752 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2753 ee->len - i, data + i, 2754 &read_size); 2755 if (err) { 2756 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2757 return err; 2758 } 2759 2760 i += read_size; 2761 offset += read_size; 2762 } 2763 2764 return 0; 2765 } 2766 2767 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2768 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2769 .get_link = ethtool_op_get_link, 2770 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2771 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2772 .get_strings = mlxsw_sp_port_get_strings, 2773 .set_phys_id = mlxsw_sp_port_set_phys_id, 2774 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2775 .get_sset_count = mlxsw_sp_port_get_sset_count, 2776 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2777 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2778 .flash_device = mlxsw_sp_flash_device, 2779 .get_module_info = mlxsw_sp_get_module_info, 2780 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2781 }; 2782 2783 static int 2784 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2785 { 2786 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2787 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2788 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2789 u32 eth_proto_admin; 2790 2791 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2792 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2793 eth_proto_admin); 2794 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2795 } 2796 2797 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2798 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2799 bool dwrr, u8 dwrr_weight) 2800 { 2801 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2802 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2803 2804 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2805 next_index); 2806 mlxsw_reg_qeec_de_set(qeec_pl, true); 2807 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2808 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2810 } 2811 2812 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2813 enum mlxsw_reg_qeec_hr hr, u8 index, 2814 u8 next_index, u32 maxrate) 2815 { 2816 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2817 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2818 2819 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2820 next_index); 2821 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2822 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2823 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2824 } 2825 2826 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2827 u8 switch_prio, u8 tclass) 2828 { 2829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2830 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2831 2832 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2833 tclass); 2834 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2835 } 2836 2837 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2838 { 2839 int err, i; 2840 2841 /* Setup the elements hierarcy, so that each TC is linked to 2842 * one subgroup, which are all member in the same group. 2843 */ 2844 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2845 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2846 0); 2847 if (err) 2848 return err; 2849 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2850 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2851 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2852 0, false, 0); 2853 if (err) 2854 return err; 2855 } 2856 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2857 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2858 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2859 false, 0); 2860 if (err) 2861 return err; 2862 } 2863 2864 /* Make sure the max shaper is disabled in all hierarcies that 2865 * support it. 2866 */ 2867 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2868 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2869 MLXSW_REG_QEEC_MAS_DIS); 2870 if (err) 2871 return err; 2872 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2873 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2874 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2875 i, 0, 2876 MLXSW_REG_QEEC_MAS_DIS); 2877 if (err) 2878 return err; 2879 } 2880 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2881 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2882 MLXSW_REG_QEEC_HIERARCY_TC, 2883 i, i, 2884 MLXSW_REG_QEEC_MAS_DIS); 2885 if (err) 2886 return err; 2887 } 2888 2889 /* Map all priorities to traffic class 0. */ 2890 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2891 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2892 if (err) 2893 return err; 2894 } 2895 2896 return 0; 2897 } 2898 2899 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2900 bool split, u8 module, u8 width, u8 lane) 2901 { 2902 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2903 struct mlxsw_sp_port *mlxsw_sp_port; 2904 struct net_device *dev; 2905 int err; 2906 2907 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2908 if (err) { 2909 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2910 local_port); 2911 return err; 2912 } 2913 2914 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2915 if (!dev) { 2916 err = -ENOMEM; 2917 goto err_alloc_etherdev; 2918 } 2919 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2920 mlxsw_sp_port = netdev_priv(dev); 2921 mlxsw_sp_port->dev = dev; 2922 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2923 mlxsw_sp_port->local_port = local_port; 2924 mlxsw_sp_port->pvid = 1; 2925 mlxsw_sp_port->split = split; 2926 mlxsw_sp_port->mapping.module = module; 2927 mlxsw_sp_port->mapping.width = width; 2928 mlxsw_sp_port->mapping.lane = lane; 2929 mlxsw_sp_port->link.autoneg = 1; 2930 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2931 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2932 2933 mlxsw_sp_port->pcpu_stats = 2934 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2935 if (!mlxsw_sp_port->pcpu_stats) { 2936 err = -ENOMEM; 2937 goto err_alloc_stats; 2938 } 2939 2940 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2941 GFP_KERNEL); 2942 if (!mlxsw_sp_port->sample) { 2943 err = -ENOMEM; 2944 goto err_alloc_sample; 2945 } 2946 2947 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 2948 &update_stats_cache); 2949 2950 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2951 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2952 2953 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2954 if (err) { 2955 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2956 mlxsw_sp_port->local_port); 2957 goto err_port_module_map; 2958 } 2959 2960 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2961 if (err) { 2962 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2963 mlxsw_sp_port->local_port); 2964 goto err_port_swid_set; 2965 } 2966 2967 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2968 if (err) { 2969 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2970 mlxsw_sp_port->local_port); 2971 goto err_dev_addr_init; 2972 } 2973 2974 netif_carrier_off(dev); 2975 2976 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2977 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2978 dev->hw_features |= NETIF_F_HW_TC; 2979 2980 dev->min_mtu = 0; 2981 dev->max_mtu = ETH_MAX_MTU; 2982 2983 /* Each packet needs to have a Tx header (metadata) on top all other 2984 * headers. 2985 */ 2986 dev->needed_headroom = MLXSW_TXHDR_LEN; 2987 2988 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2989 if (err) { 2990 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2991 mlxsw_sp_port->local_port); 2992 goto err_port_system_port_mapping_set; 2993 } 2994 2995 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2996 if (err) { 2997 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2998 mlxsw_sp_port->local_port); 2999 goto err_port_speed_by_width_set; 3000 } 3001 3002 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3003 if (err) { 3004 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3005 mlxsw_sp_port->local_port); 3006 goto err_port_mtu_set; 3007 } 3008 3009 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3010 if (err) 3011 goto err_port_admin_status_set; 3012 3013 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3016 mlxsw_sp_port->local_port); 3017 goto err_port_buffers_init; 3018 } 3019 3020 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3021 if (err) { 3022 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3023 mlxsw_sp_port->local_port); 3024 goto err_port_ets_init; 3025 } 3026 3027 /* ETS and buffers must be initialized before DCB. */ 3028 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3029 if (err) { 3030 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3031 mlxsw_sp_port->local_port); 3032 goto err_port_dcb_init; 3033 } 3034 3035 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3036 if (err) { 3037 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3038 mlxsw_sp_port->local_port); 3039 goto err_port_fids_init; 3040 } 3041 3042 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3043 if (IS_ERR(mlxsw_sp_port_vlan)) { 3044 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3045 mlxsw_sp_port->local_port); 3046 err = PTR_ERR(mlxsw_sp_port_vlan); 3047 goto err_port_vlan_get; 3048 } 3049 3050 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3051 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3052 err = register_netdev(dev); 3053 if (err) { 3054 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3055 mlxsw_sp_port->local_port); 3056 goto err_register_netdev; 3057 } 3058 3059 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3060 mlxsw_sp_port, dev, mlxsw_sp_port->split, 3061 module); 3062 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3063 return 0; 3064 3065 err_register_netdev: 3066 mlxsw_sp->ports[local_port] = NULL; 3067 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3068 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3069 err_port_vlan_get: 3070 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3071 err_port_fids_init: 3072 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3073 err_port_dcb_init: 3074 err_port_ets_init: 3075 err_port_buffers_init: 3076 err_port_admin_status_set: 3077 err_port_mtu_set: 3078 err_port_speed_by_width_set: 3079 err_port_system_port_mapping_set: 3080 err_dev_addr_init: 3081 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3082 err_port_swid_set: 3083 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3084 err_port_module_map: 3085 kfree(mlxsw_sp_port->sample); 3086 err_alloc_sample: 3087 free_percpu(mlxsw_sp_port->pcpu_stats); 3088 err_alloc_stats: 3089 free_netdev(dev); 3090 err_alloc_etherdev: 3091 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3092 return err; 3093 } 3094 3095 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3096 { 3097 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3098 3099 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3100 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3101 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3102 mlxsw_sp->ports[local_port] = NULL; 3103 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3104 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3105 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3106 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3107 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3108 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3109 kfree(mlxsw_sp_port->sample); 3110 free_percpu(mlxsw_sp_port->pcpu_stats); 3111 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3112 free_netdev(mlxsw_sp_port->dev); 3113 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3114 } 3115 3116 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3117 { 3118 return mlxsw_sp->ports[local_port] != NULL; 3119 } 3120 3121 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3122 { 3123 int i; 3124 3125 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3126 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3127 mlxsw_sp_port_remove(mlxsw_sp, i); 3128 kfree(mlxsw_sp->port_to_module); 3129 kfree(mlxsw_sp->ports); 3130 } 3131 3132 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3133 { 3134 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3135 u8 module, width, lane; 3136 size_t alloc_size; 3137 int i; 3138 int err; 3139 3140 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3141 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3142 if (!mlxsw_sp->ports) 3143 return -ENOMEM; 3144 3145 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3146 GFP_KERNEL); 3147 if (!mlxsw_sp->port_to_module) { 3148 err = -ENOMEM; 3149 goto err_port_to_module_alloc; 3150 } 3151 3152 for (i = 1; i < max_ports; i++) { 3153 /* Mark as invalid */ 3154 mlxsw_sp->port_to_module[i] = -1; 3155 3156 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3157 &width, &lane); 3158 if (err) 3159 goto err_port_module_info_get; 3160 if (!width) 3161 continue; 3162 mlxsw_sp->port_to_module[i] = module; 3163 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3164 module, width, lane); 3165 if (err) 3166 goto err_port_create; 3167 } 3168 return 0; 3169 3170 err_port_create: 3171 err_port_module_info_get: 3172 for (i--; i >= 1; i--) 3173 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3174 mlxsw_sp_port_remove(mlxsw_sp, i); 3175 kfree(mlxsw_sp->port_to_module); 3176 err_port_to_module_alloc: 3177 kfree(mlxsw_sp->ports); 3178 return err; 3179 } 3180 3181 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3182 { 3183 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3184 3185 return local_port - offset; 3186 } 3187 3188 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3189 u8 module, unsigned int count) 3190 { 3191 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3192 int err, i; 3193 3194 for (i = 0; i < count; i++) { 3195 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3196 module, width, i * width); 3197 if (err) 3198 goto err_port_create; 3199 } 3200 3201 return 0; 3202 3203 err_port_create: 3204 for (i--; i >= 0; i--) 3205 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3206 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3207 return err; 3208 } 3209 3210 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3211 u8 base_port, unsigned int count) 3212 { 3213 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3214 int i; 3215 3216 /* Split by four means we need to re-create two ports, otherwise 3217 * only one. 3218 */ 3219 count = count / 2; 3220 3221 for (i = 0; i < count; i++) { 3222 local_port = base_port + i * 2; 3223 if (mlxsw_sp->port_to_module[local_port] < 0) 3224 continue; 3225 module = mlxsw_sp->port_to_module[local_port]; 3226 3227 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3228 width, 0); 3229 } 3230 } 3231 3232 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3233 unsigned int count) 3234 { 3235 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3236 struct mlxsw_sp_port *mlxsw_sp_port; 3237 u8 module, cur_width, base_port; 3238 int i; 3239 int err; 3240 3241 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3242 if (!mlxsw_sp_port) { 3243 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3244 local_port); 3245 return -EINVAL; 3246 } 3247 3248 module = mlxsw_sp_port->mapping.module; 3249 cur_width = mlxsw_sp_port->mapping.width; 3250 3251 if (count != 2 && count != 4) { 3252 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3253 return -EINVAL; 3254 } 3255 3256 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3257 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3258 return -EINVAL; 3259 } 3260 3261 /* Make sure we have enough slave (even) ports for the split. */ 3262 if (count == 2) { 3263 base_port = local_port; 3264 if (mlxsw_sp->ports[base_port + 1]) { 3265 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3266 return -EINVAL; 3267 } 3268 } else { 3269 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3270 if (mlxsw_sp->ports[base_port + 1] || 3271 mlxsw_sp->ports[base_port + 3]) { 3272 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3273 return -EINVAL; 3274 } 3275 } 3276 3277 for (i = 0; i < count; i++) 3278 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3279 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3280 3281 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3282 if (err) { 3283 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3284 goto err_port_split_create; 3285 } 3286 3287 return 0; 3288 3289 err_port_split_create: 3290 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3291 return err; 3292 } 3293 3294 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3295 { 3296 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3297 struct mlxsw_sp_port *mlxsw_sp_port; 3298 u8 cur_width, base_port; 3299 unsigned int count; 3300 int i; 3301 3302 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3303 if (!mlxsw_sp_port) { 3304 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3305 local_port); 3306 return -EINVAL; 3307 } 3308 3309 if (!mlxsw_sp_port->split) { 3310 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3311 return -EINVAL; 3312 } 3313 3314 cur_width = mlxsw_sp_port->mapping.width; 3315 count = cur_width == 1 ? 4 : 2; 3316 3317 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3318 3319 /* Determine which ports to remove. */ 3320 if (count == 2 && local_port >= base_port + 2) 3321 base_port = base_port + 2; 3322 3323 for (i = 0; i < count; i++) 3324 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3325 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3326 3327 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3328 3329 return 0; 3330 } 3331 3332 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3333 char *pude_pl, void *priv) 3334 { 3335 struct mlxsw_sp *mlxsw_sp = priv; 3336 struct mlxsw_sp_port *mlxsw_sp_port; 3337 enum mlxsw_reg_pude_oper_status status; 3338 u8 local_port; 3339 3340 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3341 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3342 if (!mlxsw_sp_port) 3343 return; 3344 3345 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3346 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3347 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3348 netif_carrier_on(mlxsw_sp_port->dev); 3349 } else { 3350 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3351 netif_carrier_off(mlxsw_sp_port->dev); 3352 } 3353 } 3354 3355 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3356 u8 local_port, void *priv) 3357 { 3358 struct mlxsw_sp *mlxsw_sp = priv; 3359 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3360 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3361 3362 if (unlikely(!mlxsw_sp_port)) { 3363 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3364 local_port); 3365 return; 3366 } 3367 3368 skb->dev = mlxsw_sp_port->dev; 3369 3370 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3371 u64_stats_update_begin(&pcpu_stats->syncp); 3372 pcpu_stats->rx_packets++; 3373 pcpu_stats->rx_bytes += skb->len; 3374 u64_stats_update_end(&pcpu_stats->syncp); 3375 3376 skb->protocol = eth_type_trans(skb, skb->dev); 3377 netif_receive_skb(skb); 3378 } 3379 3380 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3381 void *priv) 3382 { 3383 skb->offload_fwd_mark = 1; 3384 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3385 } 3386 3387 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3388 u8 local_port, void *priv) 3389 { 3390 skb->offload_mr_fwd_mark = 1; 3391 skb->offload_fwd_mark = 1; 3392 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3393 } 3394 3395 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3396 void *priv) 3397 { 3398 struct mlxsw_sp *mlxsw_sp = priv; 3399 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3400 struct psample_group *psample_group; 3401 u32 size; 3402 3403 if (unlikely(!mlxsw_sp_port)) { 3404 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3405 local_port); 3406 goto out; 3407 } 3408 if (unlikely(!mlxsw_sp_port->sample)) { 3409 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3410 local_port); 3411 goto out; 3412 } 3413 3414 size = mlxsw_sp_port->sample->truncate ? 3415 mlxsw_sp_port->sample->trunc_size : skb->len; 3416 3417 rcu_read_lock(); 3418 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3419 if (!psample_group) 3420 goto out_unlock; 3421 psample_sample_packet(psample_group, skb, size, 3422 mlxsw_sp_port->dev->ifindex, 0, 3423 mlxsw_sp_port->sample->rate); 3424 out_unlock: 3425 rcu_read_unlock(); 3426 out: 3427 consume_skb(skb); 3428 } 3429 3430 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3431 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3432 _is_ctrl, SP_##_trap_group, DISCARD) 3433 3434 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3435 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3436 _is_ctrl, SP_##_trap_group, DISCARD) 3437 3438 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3439 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3440 _is_ctrl, SP_##_trap_group, DISCARD) 3441 3442 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3443 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3444 3445 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3446 /* Events */ 3447 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3448 /* L2 traps */ 3449 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3450 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3451 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3452 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3453 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3454 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3455 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3456 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3457 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3458 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3459 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3460 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3461 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3462 false), 3463 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3464 false), 3465 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3466 false), 3467 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3468 false), 3469 /* L3 traps */ 3470 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3471 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3472 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3473 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3474 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3475 false), 3476 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3477 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3478 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3479 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3480 false), 3481 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3482 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3483 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3484 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3485 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3486 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3487 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3488 false), 3489 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3490 false), 3491 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3492 false), 3493 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3494 false), 3495 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3496 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3497 false), 3498 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3499 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3500 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3501 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3502 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3503 /* PKT Sample trap */ 3504 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3505 false, SP_IP2ME, DISCARD), 3506 /* ACL trap */ 3507 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3508 /* Multicast Router Traps */ 3509 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3510 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3511 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3512 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3513 }; 3514 3515 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3516 { 3517 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3518 enum mlxsw_reg_qpcr_ir_units ir_units; 3519 int max_cpu_policers; 3520 bool is_bytes; 3521 u8 burst_size; 3522 u32 rate; 3523 int i, err; 3524 3525 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3526 return -EIO; 3527 3528 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3529 3530 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3531 for (i = 0; i < max_cpu_policers; i++) { 3532 is_bytes = false; 3533 switch (i) { 3534 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3536 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3537 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3538 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3539 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3540 rate = 128; 3541 burst_size = 7; 3542 break; 3543 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3544 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3545 rate = 16 * 1024; 3546 burst_size = 10; 3547 break; 3548 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3549 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3550 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3552 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3553 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3554 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3555 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3556 rate = 1024; 3557 burst_size = 7; 3558 break; 3559 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3560 is_bytes = true; 3561 rate = 4 * 1024; 3562 burst_size = 4; 3563 break; 3564 default: 3565 continue; 3566 } 3567 3568 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3569 burst_size); 3570 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3571 if (err) 3572 return err; 3573 } 3574 3575 return 0; 3576 } 3577 3578 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3579 { 3580 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3581 enum mlxsw_reg_htgt_trap_group i; 3582 int max_cpu_policers; 3583 int max_trap_groups; 3584 u8 priority, tc; 3585 u16 policer_id; 3586 int err; 3587 3588 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3589 return -EIO; 3590 3591 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3592 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3593 3594 for (i = 0; i < max_trap_groups; i++) { 3595 policer_id = i; 3596 switch (i) { 3597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3599 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3600 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3602 priority = 5; 3603 tc = 5; 3604 break; 3605 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3607 priority = 4; 3608 tc = 4; 3609 break; 3610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3613 priority = 3; 3614 tc = 3; 3615 break; 3616 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3617 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3618 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3619 priority = 2; 3620 tc = 2; 3621 break; 3622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3623 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3624 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3625 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3626 priority = 1; 3627 tc = 1; 3628 break; 3629 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3630 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3631 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3632 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3633 break; 3634 default: 3635 continue; 3636 } 3637 3638 if (max_cpu_policers <= policer_id && 3639 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3640 return -EIO; 3641 3642 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3643 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3644 if (err) 3645 return err; 3646 } 3647 3648 return 0; 3649 } 3650 3651 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3652 { 3653 int i; 3654 int err; 3655 3656 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3657 if (err) 3658 return err; 3659 3660 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3661 if (err) 3662 return err; 3663 3664 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3665 err = mlxsw_core_trap_register(mlxsw_sp->core, 3666 &mlxsw_sp_listener[i], 3667 mlxsw_sp); 3668 if (err) 3669 goto err_listener_register; 3670 3671 } 3672 return 0; 3673 3674 err_listener_register: 3675 for (i--; i >= 0; i--) { 3676 mlxsw_core_trap_unregister(mlxsw_sp->core, 3677 &mlxsw_sp_listener[i], 3678 mlxsw_sp); 3679 } 3680 return err; 3681 } 3682 3683 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3684 { 3685 int i; 3686 3687 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3688 mlxsw_core_trap_unregister(mlxsw_sp->core, 3689 &mlxsw_sp_listener[i], 3690 mlxsw_sp); 3691 } 3692 } 3693 3694 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3695 { 3696 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3697 int err; 3698 3699 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3700 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3701 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3702 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3703 MLXSW_REG_SLCR_LAG_HASH_SIP | 3704 MLXSW_REG_SLCR_LAG_HASH_DIP | 3705 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3706 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3707 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3708 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3709 if (err) 3710 return err; 3711 3712 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3713 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3714 return -EIO; 3715 3716 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3717 sizeof(struct mlxsw_sp_upper), 3718 GFP_KERNEL); 3719 if (!mlxsw_sp->lags) 3720 return -ENOMEM; 3721 3722 return 0; 3723 } 3724 3725 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3726 { 3727 kfree(mlxsw_sp->lags); 3728 } 3729 3730 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3731 { 3732 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3733 3734 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3735 MLXSW_REG_HTGT_INVALID_POLICER, 3736 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3737 MLXSW_REG_HTGT_DEFAULT_TC); 3738 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3739 } 3740 3741 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3742 unsigned long event, void *ptr); 3743 3744 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3745 const struct mlxsw_bus_info *mlxsw_bus_info) 3746 { 3747 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3748 int err; 3749 3750 mlxsw_sp->core = mlxsw_core; 3751 mlxsw_sp->bus_info = mlxsw_bus_info; 3752 3753 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3754 if (err) { 3755 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3756 return err; 3757 } 3758 3759 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3760 if (err) { 3761 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3762 return err; 3763 } 3764 3765 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3766 if (err) { 3767 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3768 return err; 3769 } 3770 3771 err = mlxsw_sp_fids_init(mlxsw_sp); 3772 if (err) { 3773 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3774 goto err_fids_init; 3775 } 3776 3777 err = mlxsw_sp_traps_init(mlxsw_sp); 3778 if (err) { 3779 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3780 goto err_traps_init; 3781 } 3782 3783 err = mlxsw_sp_buffers_init(mlxsw_sp); 3784 if (err) { 3785 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3786 goto err_buffers_init; 3787 } 3788 3789 err = mlxsw_sp_lag_init(mlxsw_sp); 3790 if (err) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3792 goto err_lag_init; 3793 } 3794 3795 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3796 if (err) { 3797 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3798 goto err_switchdev_init; 3799 } 3800 3801 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3802 if (err) { 3803 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3804 goto err_counter_pool_init; 3805 } 3806 3807 err = mlxsw_sp_afa_init(mlxsw_sp); 3808 if (err) { 3809 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3810 goto err_afa_init; 3811 } 3812 3813 err = mlxsw_sp_router_init(mlxsw_sp); 3814 if (err) { 3815 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3816 goto err_router_init; 3817 } 3818 3819 /* Initialize netdevice notifier after router is initialized, so that 3820 * the event handler can use router structures. 3821 */ 3822 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3823 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3824 if (err) { 3825 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3826 goto err_netdev_notifier; 3827 } 3828 3829 err = mlxsw_sp_span_init(mlxsw_sp); 3830 if (err) { 3831 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3832 goto err_span_init; 3833 } 3834 3835 err = mlxsw_sp_acl_init(mlxsw_sp); 3836 if (err) { 3837 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3838 goto err_acl_init; 3839 } 3840 3841 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3842 if (err) { 3843 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3844 goto err_dpipe_init; 3845 } 3846 3847 err = mlxsw_sp_ports_create(mlxsw_sp); 3848 if (err) { 3849 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3850 goto err_ports_create; 3851 } 3852 3853 return 0; 3854 3855 err_ports_create: 3856 mlxsw_sp_dpipe_fini(mlxsw_sp); 3857 err_dpipe_init: 3858 mlxsw_sp_acl_fini(mlxsw_sp); 3859 err_acl_init: 3860 mlxsw_sp_span_fini(mlxsw_sp); 3861 err_span_init: 3862 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3863 err_netdev_notifier: 3864 mlxsw_sp_router_fini(mlxsw_sp); 3865 err_router_init: 3866 mlxsw_sp_afa_fini(mlxsw_sp); 3867 err_afa_init: 3868 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3869 err_counter_pool_init: 3870 mlxsw_sp_switchdev_fini(mlxsw_sp); 3871 err_switchdev_init: 3872 mlxsw_sp_lag_fini(mlxsw_sp); 3873 err_lag_init: 3874 mlxsw_sp_buffers_fini(mlxsw_sp); 3875 err_buffers_init: 3876 mlxsw_sp_traps_fini(mlxsw_sp); 3877 err_traps_init: 3878 mlxsw_sp_fids_fini(mlxsw_sp); 3879 err_fids_init: 3880 mlxsw_sp_kvdl_fini(mlxsw_sp); 3881 return err; 3882 } 3883 3884 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3885 { 3886 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3887 3888 mlxsw_sp_ports_remove(mlxsw_sp); 3889 mlxsw_sp_dpipe_fini(mlxsw_sp); 3890 mlxsw_sp_acl_fini(mlxsw_sp); 3891 mlxsw_sp_span_fini(mlxsw_sp); 3892 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3893 mlxsw_sp_router_fini(mlxsw_sp); 3894 mlxsw_sp_afa_fini(mlxsw_sp); 3895 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3896 mlxsw_sp_switchdev_fini(mlxsw_sp); 3897 mlxsw_sp_lag_fini(mlxsw_sp); 3898 mlxsw_sp_buffers_fini(mlxsw_sp); 3899 mlxsw_sp_traps_fini(mlxsw_sp); 3900 mlxsw_sp_fids_fini(mlxsw_sp); 3901 mlxsw_sp_kvdl_fini(mlxsw_sp); 3902 } 3903 3904 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 3905 .used_max_vepa_channels = 1, 3906 .max_vepa_channels = 0, 3907 .used_max_mid = 1, 3908 .max_mid = MLXSW_SP_MID_MAX, 3909 .used_max_pgt = 1, 3910 .max_pgt = 0, 3911 .used_flood_tables = 1, 3912 .used_flood_mode = 1, 3913 .flood_mode = 3, 3914 .max_fid_offset_flood_tables = 3, 3915 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3916 .max_fid_flood_tables = 3, 3917 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3918 .used_max_ib_mc = 1, 3919 .max_ib_mc = 0, 3920 .used_max_pkey = 1, 3921 .max_pkey = 0, 3922 .used_kvd_split_data = 1, 3923 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3924 .kvd_hash_single_parts = 59, 3925 .kvd_hash_double_parts = 41, 3926 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3927 .swid_config = { 3928 { 3929 .used_type = 1, 3930 .type = MLXSW_PORT_SWID_TYPE_ETH, 3931 } 3932 }, 3933 .resource_query_enable = 1, 3934 }; 3935 3936 static struct mlxsw_driver mlxsw_sp_driver = { 3937 .kind = mlxsw_sp_driver_name, 3938 .priv_size = sizeof(struct mlxsw_sp), 3939 .init = mlxsw_sp_init, 3940 .fini = mlxsw_sp_fini, 3941 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3942 .port_split = mlxsw_sp_port_split, 3943 .port_unsplit = mlxsw_sp_port_unsplit, 3944 .sb_pool_get = mlxsw_sp_sb_pool_get, 3945 .sb_pool_set = mlxsw_sp_sb_pool_set, 3946 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3947 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3948 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3949 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3950 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3951 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3952 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3953 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3954 .txhdr_construct = mlxsw_sp_txhdr_construct, 3955 .txhdr_len = MLXSW_TXHDR_LEN, 3956 .profile = &mlxsw_sp_config_profile, 3957 }; 3958 3959 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3960 { 3961 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3962 } 3963 3964 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3965 { 3966 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3967 int ret = 0; 3968 3969 if (mlxsw_sp_port_dev_check(lower_dev)) { 3970 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3971 ret = 1; 3972 } 3973 3974 return ret; 3975 } 3976 3977 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3978 { 3979 struct mlxsw_sp_port *mlxsw_sp_port; 3980 3981 if (mlxsw_sp_port_dev_check(dev)) 3982 return netdev_priv(dev); 3983 3984 mlxsw_sp_port = NULL; 3985 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3986 3987 return mlxsw_sp_port; 3988 } 3989 3990 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3991 { 3992 struct mlxsw_sp_port *mlxsw_sp_port; 3993 3994 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3995 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3996 } 3997 3998 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3999 { 4000 struct mlxsw_sp_port *mlxsw_sp_port; 4001 4002 if (mlxsw_sp_port_dev_check(dev)) 4003 return netdev_priv(dev); 4004 4005 mlxsw_sp_port = NULL; 4006 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4007 &mlxsw_sp_port); 4008 4009 return mlxsw_sp_port; 4010 } 4011 4012 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4013 { 4014 struct mlxsw_sp_port *mlxsw_sp_port; 4015 4016 rcu_read_lock(); 4017 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4018 if (mlxsw_sp_port) 4019 dev_hold(mlxsw_sp_port->dev); 4020 rcu_read_unlock(); 4021 return mlxsw_sp_port; 4022 } 4023 4024 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4025 { 4026 dev_put(mlxsw_sp_port->dev); 4027 } 4028 4029 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4030 { 4031 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4032 4033 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4034 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4035 } 4036 4037 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4038 { 4039 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4040 4041 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4042 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4043 } 4044 4045 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4046 u16 lag_id, u8 port_index) 4047 { 4048 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4049 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4050 4051 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4052 lag_id, port_index); 4053 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4054 } 4055 4056 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4057 u16 lag_id) 4058 { 4059 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4060 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4061 4062 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4063 lag_id); 4064 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4065 } 4066 4067 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4068 u16 lag_id) 4069 { 4070 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4071 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4072 4073 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4074 lag_id); 4075 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4076 } 4077 4078 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4079 u16 lag_id) 4080 { 4081 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4082 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4083 4084 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4085 lag_id); 4086 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4087 } 4088 4089 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4090 struct net_device *lag_dev, 4091 u16 *p_lag_id) 4092 { 4093 struct mlxsw_sp_upper *lag; 4094 int free_lag_id = -1; 4095 u64 max_lag; 4096 int i; 4097 4098 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4099 for (i = 0; i < max_lag; i++) { 4100 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4101 if (lag->ref_count) { 4102 if (lag->dev == lag_dev) { 4103 *p_lag_id = i; 4104 return 0; 4105 } 4106 } else if (free_lag_id < 0) { 4107 free_lag_id = i; 4108 } 4109 } 4110 if (free_lag_id < 0) 4111 return -EBUSY; 4112 *p_lag_id = free_lag_id; 4113 return 0; 4114 } 4115 4116 static bool 4117 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4118 struct net_device *lag_dev, 4119 struct netdev_lag_upper_info *lag_upper_info, 4120 struct netlink_ext_ack *extack) 4121 { 4122 u16 lag_id; 4123 4124 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4125 NL_SET_ERR_MSG(extack, 4126 "spectrum: Exceeded number of supported LAG devices"); 4127 return false; 4128 } 4129 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4130 NL_SET_ERR_MSG(extack, 4131 "spectrum: LAG device using unsupported Tx type"); 4132 return false; 4133 } 4134 return true; 4135 } 4136 4137 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4138 u16 lag_id, u8 *p_port_index) 4139 { 4140 u64 max_lag_members; 4141 int i; 4142 4143 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4144 MAX_LAG_MEMBERS); 4145 for (i = 0; i < max_lag_members; i++) { 4146 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4147 *p_port_index = i; 4148 return 0; 4149 } 4150 } 4151 return -EBUSY; 4152 } 4153 4154 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4155 struct net_device *lag_dev) 4156 { 4157 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4158 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4159 struct mlxsw_sp_upper *lag; 4160 u16 lag_id; 4161 u8 port_index; 4162 int err; 4163 4164 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4165 if (err) 4166 return err; 4167 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4168 if (!lag->ref_count) { 4169 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4170 if (err) 4171 return err; 4172 lag->dev = lag_dev; 4173 } 4174 4175 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4176 if (err) 4177 return err; 4178 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4179 if (err) 4180 goto err_col_port_add; 4181 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4182 if (err) 4183 goto err_col_port_enable; 4184 4185 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4186 mlxsw_sp_port->local_port); 4187 mlxsw_sp_port->lag_id = lag_id; 4188 mlxsw_sp_port->lagged = 1; 4189 lag->ref_count++; 4190 4191 /* Port is no longer usable as a router interface */ 4192 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4193 if (mlxsw_sp_port_vlan->fid) 4194 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4195 4196 return 0; 4197 4198 err_col_port_enable: 4199 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4200 err_col_port_add: 4201 if (!lag->ref_count) 4202 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4203 return err; 4204 } 4205 4206 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4207 struct net_device *lag_dev) 4208 { 4209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4210 u16 lag_id = mlxsw_sp_port->lag_id; 4211 struct mlxsw_sp_upper *lag; 4212 4213 if (!mlxsw_sp_port->lagged) 4214 return; 4215 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4216 WARN_ON(lag->ref_count == 0); 4217 4218 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4219 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4220 4221 /* Any VLANs configured on the port are no longer valid */ 4222 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4223 4224 if (lag->ref_count == 1) 4225 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4226 4227 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4228 mlxsw_sp_port->local_port); 4229 mlxsw_sp_port->lagged = 0; 4230 lag->ref_count--; 4231 4232 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4233 /* Make sure untagged frames are allowed to ingress */ 4234 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4235 } 4236 4237 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4238 u16 lag_id) 4239 { 4240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4241 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4242 4243 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4244 mlxsw_sp_port->local_port); 4245 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4246 } 4247 4248 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4249 u16 lag_id) 4250 { 4251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4252 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4253 4254 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4255 mlxsw_sp_port->local_port); 4256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4257 } 4258 4259 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4260 bool lag_tx_enabled) 4261 { 4262 if (lag_tx_enabled) 4263 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4264 mlxsw_sp_port->lag_id); 4265 else 4266 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4267 mlxsw_sp_port->lag_id); 4268 } 4269 4270 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4271 struct netdev_lag_lower_state_info *info) 4272 { 4273 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4274 } 4275 4276 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4277 bool enable) 4278 { 4279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4280 enum mlxsw_reg_spms_state spms_state; 4281 char *spms_pl; 4282 u16 vid; 4283 int err; 4284 4285 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4286 MLXSW_REG_SPMS_STATE_DISCARDING; 4287 4288 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4289 if (!spms_pl) 4290 return -ENOMEM; 4291 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4292 4293 for (vid = 0; vid < VLAN_N_VID; vid++) 4294 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4295 4296 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4297 kfree(spms_pl); 4298 return err; 4299 } 4300 4301 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4302 { 4303 u16 vid = 1; 4304 int err; 4305 4306 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4307 if (err) 4308 return err; 4309 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4310 if (err) 4311 goto err_port_stp_set; 4312 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4313 true, false); 4314 if (err) 4315 goto err_port_vlan_set; 4316 4317 for (; vid <= VLAN_N_VID - 1; vid++) { 4318 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4319 vid, false); 4320 if (err) 4321 goto err_vid_learning_set; 4322 } 4323 4324 return 0; 4325 4326 err_vid_learning_set: 4327 for (vid--; vid >= 1; vid--) 4328 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4329 err_port_vlan_set: 4330 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4331 err_port_stp_set: 4332 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4333 return err; 4334 } 4335 4336 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4337 { 4338 u16 vid; 4339 4340 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4341 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4342 vid, true); 4343 4344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4345 false, false); 4346 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4347 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4348 } 4349 4350 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4351 struct net_device *dev, 4352 unsigned long event, void *ptr) 4353 { 4354 struct netdev_notifier_changeupper_info *info; 4355 struct mlxsw_sp_port *mlxsw_sp_port; 4356 struct netlink_ext_ack *extack; 4357 struct net_device *upper_dev; 4358 struct mlxsw_sp *mlxsw_sp; 4359 int err = 0; 4360 4361 mlxsw_sp_port = netdev_priv(dev); 4362 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4363 info = ptr; 4364 extack = netdev_notifier_info_to_extack(&info->info); 4365 4366 switch (event) { 4367 case NETDEV_PRECHANGEUPPER: 4368 upper_dev = info->upper_dev; 4369 if (!is_vlan_dev(upper_dev) && 4370 !netif_is_lag_master(upper_dev) && 4371 !netif_is_bridge_master(upper_dev) && 4372 !netif_is_ovs_master(upper_dev)) { 4373 NL_SET_ERR_MSG(extack, 4374 "spectrum: Unknown upper device type"); 4375 return -EINVAL; 4376 } 4377 if (!info->linking) 4378 break; 4379 if (netdev_has_any_upper_dev(upper_dev)) { 4380 NL_SET_ERR_MSG(extack, 4381 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4382 return -EINVAL; 4383 } 4384 if (netif_is_lag_master(upper_dev) && 4385 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4386 info->upper_info, extack)) 4387 return -EINVAL; 4388 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4389 NL_SET_ERR_MSG(extack, 4390 "spectrum: Master device is a LAG master and this device has a VLAN"); 4391 return -EINVAL; 4392 } 4393 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4394 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4395 NL_SET_ERR_MSG(extack, 4396 "spectrum: Can not put a VLAN on a LAG port"); 4397 return -EINVAL; 4398 } 4399 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4400 NL_SET_ERR_MSG(extack, 4401 "spectrum: Master device is an OVS master and this device has a VLAN"); 4402 return -EINVAL; 4403 } 4404 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4405 NL_SET_ERR_MSG(extack, 4406 "spectrum: Can not put a VLAN on an OVS port"); 4407 return -EINVAL; 4408 } 4409 break; 4410 case NETDEV_CHANGEUPPER: 4411 upper_dev = info->upper_dev; 4412 if (netif_is_bridge_master(upper_dev)) { 4413 if (info->linking) 4414 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4415 lower_dev, 4416 upper_dev, 4417 extack); 4418 else 4419 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4420 lower_dev, 4421 upper_dev); 4422 } else if (netif_is_lag_master(upper_dev)) { 4423 if (info->linking) 4424 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4425 upper_dev); 4426 else 4427 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4428 upper_dev); 4429 } else if (netif_is_ovs_master(upper_dev)) { 4430 if (info->linking) 4431 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4432 else 4433 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4434 } 4435 break; 4436 } 4437 4438 return err; 4439 } 4440 4441 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4442 unsigned long event, void *ptr) 4443 { 4444 struct netdev_notifier_changelowerstate_info *info; 4445 struct mlxsw_sp_port *mlxsw_sp_port; 4446 int err; 4447 4448 mlxsw_sp_port = netdev_priv(dev); 4449 info = ptr; 4450 4451 switch (event) { 4452 case NETDEV_CHANGELOWERSTATE: 4453 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4454 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4455 info->lower_state_info); 4456 if (err) 4457 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4458 } 4459 break; 4460 } 4461 4462 return 0; 4463 } 4464 4465 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4466 struct net_device *port_dev, 4467 unsigned long event, void *ptr) 4468 { 4469 switch (event) { 4470 case NETDEV_PRECHANGEUPPER: 4471 case NETDEV_CHANGEUPPER: 4472 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4473 event, ptr); 4474 case NETDEV_CHANGELOWERSTATE: 4475 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4476 ptr); 4477 } 4478 4479 return 0; 4480 } 4481 4482 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4483 unsigned long event, void *ptr) 4484 { 4485 struct net_device *dev; 4486 struct list_head *iter; 4487 int ret; 4488 4489 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4490 if (mlxsw_sp_port_dev_check(dev)) { 4491 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4492 ptr); 4493 if (ret) 4494 return ret; 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4502 struct net_device *dev, 4503 unsigned long event, void *ptr, 4504 u16 vid) 4505 { 4506 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4507 struct netdev_notifier_changeupper_info *info = ptr; 4508 struct netlink_ext_ack *extack; 4509 struct net_device *upper_dev; 4510 int err = 0; 4511 4512 extack = netdev_notifier_info_to_extack(&info->info); 4513 4514 switch (event) { 4515 case NETDEV_PRECHANGEUPPER: 4516 upper_dev = info->upper_dev; 4517 if (!netif_is_bridge_master(upper_dev)) { 4518 NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); 4519 return -EINVAL; 4520 } 4521 if (!info->linking) 4522 break; 4523 if (netdev_has_any_upper_dev(upper_dev)) { 4524 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4525 return -EINVAL; 4526 } 4527 break; 4528 case NETDEV_CHANGEUPPER: 4529 upper_dev = info->upper_dev; 4530 if (netif_is_bridge_master(upper_dev)) { 4531 if (info->linking) 4532 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4533 vlan_dev, 4534 upper_dev, 4535 extack); 4536 else 4537 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4538 vlan_dev, 4539 upper_dev); 4540 } else { 4541 err = -EINVAL; 4542 WARN_ON(1); 4543 } 4544 break; 4545 } 4546 4547 return err; 4548 } 4549 4550 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4551 struct net_device *lag_dev, 4552 unsigned long event, 4553 void *ptr, u16 vid) 4554 { 4555 struct net_device *dev; 4556 struct list_head *iter; 4557 int ret; 4558 4559 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4560 if (mlxsw_sp_port_dev_check(dev)) { 4561 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4562 event, ptr, 4563 vid); 4564 if (ret) 4565 return ret; 4566 } 4567 } 4568 4569 return 0; 4570 } 4571 4572 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4573 unsigned long event, void *ptr) 4574 { 4575 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4576 u16 vid = vlan_dev_vlan_id(vlan_dev); 4577 4578 if (mlxsw_sp_port_dev_check(real_dev)) 4579 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4580 event, ptr, vid); 4581 else if (netif_is_lag_master(real_dev)) 4582 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4583 real_dev, event, 4584 ptr, vid); 4585 4586 return 0; 4587 } 4588 4589 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4590 { 4591 struct netdev_notifier_changeupper_info *info = ptr; 4592 4593 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4594 return false; 4595 return netif_is_l3_master(info->upper_dev); 4596 } 4597 4598 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4599 unsigned long event, void *ptr) 4600 { 4601 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4602 struct mlxsw_sp *mlxsw_sp; 4603 int err = 0; 4604 4605 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4606 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4607 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4608 event, ptr); 4609 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4610 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4611 event, ptr); 4612 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4613 err = mlxsw_sp_netdevice_router_port_event(dev); 4614 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4615 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4616 else if (mlxsw_sp_port_dev_check(dev)) 4617 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4618 else if (netif_is_lag_master(dev)) 4619 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4620 else if (is_vlan_dev(dev)) 4621 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4622 4623 return notifier_from_errno(err); 4624 } 4625 4626 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4627 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4628 }; 4629 4630 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4631 .notifier_call = mlxsw_sp_inetaddr_event, 4632 }; 4633 4634 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4635 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4636 }; 4637 4638 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4639 .notifier_call = mlxsw_sp_inet6addr_event, 4640 }; 4641 4642 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4643 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4644 {0, }, 4645 }; 4646 4647 static struct pci_driver mlxsw_sp_pci_driver = { 4648 .name = mlxsw_sp_driver_name, 4649 .id_table = mlxsw_sp_pci_id_table, 4650 }; 4651 4652 static int __init mlxsw_sp_module_init(void) 4653 { 4654 int err; 4655 4656 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4657 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4658 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4659 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4660 4661 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4662 if (err) 4663 goto err_core_driver_register; 4664 4665 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4666 if (err) 4667 goto err_pci_driver_register; 4668 4669 return 0; 4670 4671 err_pci_driver_register: 4672 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4673 err_core_driver_register: 4674 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4675 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4676 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4677 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4678 return err; 4679 } 4680 4681 static void __exit mlxsw_sp_module_exit(void) 4682 { 4683 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4684 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4685 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4686 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4687 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4688 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4689 } 4690 4691 module_init(mlxsw_sp_module_init); 4692 module_exit(mlxsw_sp_module_exit); 4693 4694 MODULE_LICENSE("Dual BSD/GPL"); 4695 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4696 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4697 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4698 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4699