1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <linux/netlink.h> 57 #include <net/switchdev.h> 58 #include <net/pkt_cls.h> 59 #include <net/tc_act/tc_mirred.h> 60 #include <net/netevent.h> 61 #include <net/tc_act/tc_sample.h> 62 #include <net/addrconf.h> 63 64 #include "spectrum.h" 65 #include "pci.h" 66 #include "core.h" 67 #include "reg.h" 68 #include "port.h" 69 #include "trap.h" 70 #include "txheader.h" 71 #include "spectrum_cnt.h" 72 #include "spectrum_dpipe.h" 73 #include "spectrum_acl_flex_actions.h" 74 #include "../mlxfw/mlxfw.h" 75 76 #define MLXSW_FWREV_MAJOR 13 77 #define MLXSW_FWREV_MINOR 1530 78 #define MLXSW_FWREV_SUBMINOR 152 79 80 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 81 .major = MLXSW_FWREV_MAJOR, 82 .minor = MLXSW_FWREV_MINOR, 83 .subminor = MLXSW_FWREV_SUBMINOR 84 }; 85 86 #define MLXSW_SP_FW_FILENAME \ 87 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 88 "." __stringify(MLXSW_FWREV_MINOR) \ 89 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 90 91 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 92 static const char mlxsw_sp_driver_version[] = "1.0"; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 168 u16 component_index, u32 *p_max_size, 169 u8 *p_align_bits, u16 *p_max_write_size) 170 { 171 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 172 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 174 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 175 int err; 176 177 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 179 if (err) 180 return err; 181 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 182 p_max_write_size); 183 184 *p_align_bits = max_t(u8, *p_align_bits, 2); 185 *p_max_write_size = min_t(u16, *p_max_write_size, 186 MLXSW_REG_MCDA_MAX_DATA_LEN); 187 return 0; 188 } 189 190 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 191 { 192 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 193 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 195 char mcc_pl[MLXSW_REG_MCC_LEN]; 196 u8 control_state; 197 int err; 198 199 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 201 if (err) 202 return err; 203 204 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 205 if (control_state != MLXFW_FSM_STATE_IDLE) 206 return -EBUSY; 207 208 mlxsw_reg_mcc_pack(mcc_pl, 209 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 210 0, *fwhandle, 0); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index, 216 u32 component_size) 217 { 218 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 219 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 221 char mcc_pl[MLXSW_REG_MCC_LEN]; 222 223 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 224 component_index, fwhandle, component_size); 225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 226 } 227 228 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 229 u32 fwhandle, u8 *data, u16 size, 230 u32 offset) 231 { 232 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 233 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 235 char mcda_pl[MLXSW_REG_MCDA_LEN]; 236 237 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 239 } 240 241 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 242 u32 fwhandle, u16 component_index) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcc_pl[MLXSW_REG_MCC_LEN]; 248 249 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 250 component_index, fwhandle, 0); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 252 } 253 254 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 262 fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 267 enum mlxfw_fsm_state *fsm_state, 268 enum mlxfw_fsm_state_err *fsm_state_err) 269 { 270 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 271 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 273 char mcc_pl[MLXSW_REG_MCC_LEN]; 274 u8 control_state; 275 u8 error_code; 276 int err; 277 278 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 279 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 280 if (err) 281 return err; 282 283 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 284 *fsm_state = control_state; 285 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 286 MLXFW_FSM_STATE_ERR_MAX); 287 return 0; 288 } 289 290 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 298 fwhandle, 0); 299 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, 310 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 311 fwhandle, 0); 312 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 316 .component_query = mlxsw_sp_component_query, 317 .fsm_lock = mlxsw_sp_fsm_lock, 318 .fsm_component_update = mlxsw_sp_fsm_component_update, 319 .fsm_block_download = mlxsw_sp_fsm_block_download, 320 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 321 .fsm_activate = mlxsw_sp_fsm_activate, 322 .fsm_query_state = mlxsw_sp_fsm_query_state, 323 .fsm_cancel = mlxsw_sp_fsm_cancel, 324 .fsm_release = mlxsw_sp_fsm_release 325 }; 326 327 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 328 const struct firmware *firmware) 329 { 330 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 331 .mlxfw_dev = { 332 .ops = &mlxsw_sp_mlxfw_dev_ops, 333 .psid = mlxsw_sp->bus_info->psid, 334 .psid_size = strlen(mlxsw_sp->bus_info->psid), 335 }, 336 .mlxsw_sp = mlxsw_sp 337 }; 338 339 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 340 } 341 342 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 343 const struct mlxsw_fw_rev *b) 344 { 345 if (a->major != b->major) 346 return a->major > b->major; 347 if (a->minor != b->minor) 348 return a->minor > b->minor; 349 return a->subminor >= b->subminor; 350 } 351 352 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 353 { 354 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 355 const struct firmware *firmware; 356 int err; 357 358 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 359 return 0; 360 361 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 362 rev->major, rev->minor, rev->subminor); 363 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 364 MLXSW_SP_FW_FILENAME); 365 366 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 367 mlxsw_sp->bus_info->dev); 368 if (err) { 369 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 370 MLXSW_SP_FW_FILENAME); 371 return err; 372 } 373 374 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 375 release_firmware(firmware); 376 return err; 377 } 378 379 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 380 unsigned int counter_index, u64 *packets, 381 u64 *bytes) 382 { 383 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 384 int err; 385 386 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 387 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 388 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 389 if (err) 390 return err; 391 if (packets) 392 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 393 if (bytes) 394 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 395 return 0; 396 } 397 398 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 399 unsigned int counter_index) 400 { 401 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 402 403 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 404 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 406 } 407 408 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 409 unsigned int *p_counter_index) 410 { 411 int err; 412 413 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 414 p_counter_index); 415 if (err) 416 return err; 417 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 418 if (err) 419 goto err_counter_clear; 420 return 0; 421 422 err_counter_clear: 423 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 424 *p_counter_index); 425 return err; 426 } 427 428 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 429 unsigned int counter_index) 430 { 431 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 432 counter_index); 433 } 434 435 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 436 const struct mlxsw_tx_info *tx_info) 437 { 438 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 439 440 memset(txhdr, 0, MLXSW_TXHDR_LEN); 441 442 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 443 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 444 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 445 mlxsw_tx_hdr_swid_set(txhdr, 0); 446 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 447 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 448 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 449 } 450 451 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 452 u8 state) 453 { 454 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 455 enum mlxsw_reg_spms_state spms_state; 456 char *spms_pl; 457 int err; 458 459 switch (state) { 460 case BR_STATE_FORWARDING: 461 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 462 break; 463 case BR_STATE_LEARNING: 464 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 465 break; 466 case BR_STATE_LISTENING: /* fall-through */ 467 case BR_STATE_DISABLED: /* fall-through */ 468 case BR_STATE_BLOCKING: 469 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 470 break; 471 default: 472 BUG(); 473 } 474 475 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 476 if (!spms_pl) 477 return -ENOMEM; 478 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 479 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 480 481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 482 kfree(spms_pl); 483 return err; 484 } 485 486 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 487 { 488 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 489 int err; 490 491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 492 if (err) 493 return err; 494 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 495 return 0; 496 } 497 498 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 499 { 500 int i; 501 502 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 503 return -EIO; 504 505 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 506 MAX_SPAN); 507 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 508 sizeof(struct mlxsw_sp_span_entry), 509 GFP_KERNEL); 510 if (!mlxsw_sp->span.entries) 511 return -ENOMEM; 512 513 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 514 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 515 516 return 0; 517 } 518 519 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 520 { 521 int i; 522 523 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 524 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 525 526 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 527 } 528 kfree(mlxsw_sp->span.entries); 529 } 530 531 static struct mlxsw_sp_span_entry * 532 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 533 { 534 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 535 struct mlxsw_sp_span_entry *span_entry; 536 char mpat_pl[MLXSW_REG_MPAT_LEN]; 537 u8 local_port = port->local_port; 538 int index; 539 int i; 540 int err; 541 542 /* find a free entry to use */ 543 index = -1; 544 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 545 if (!mlxsw_sp->span.entries[i].used) { 546 index = i; 547 span_entry = &mlxsw_sp->span.entries[i]; 548 break; 549 } 550 } 551 if (index < 0) 552 return NULL; 553 554 /* create a new port analayzer entry for local_port */ 555 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 556 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 557 if (err) 558 return NULL; 559 560 span_entry->used = true; 561 span_entry->id = index; 562 span_entry->ref_count = 1; 563 span_entry->local_port = local_port; 564 return span_entry; 565 } 566 567 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 568 struct mlxsw_sp_span_entry *span_entry) 569 { 570 u8 local_port = span_entry->local_port; 571 char mpat_pl[MLXSW_REG_MPAT_LEN]; 572 int pa_id = span_entry->id; 573 574 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 575 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 576 span_entry->used = false; 577 } 578 579 static struct mlxsw_sp_span_entry * 580 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 581 { 582 int i; 583 584 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 585 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 586 587 if (curr->used && curr->local_port == local_port) 588 return curr; 589 } 590 return NULL; 591 } 592 593 static struct mlxsw_sp_span_entry 594 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 595 { 596 struct mlxsw_sp_span_entry *span_entry; 597 598 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 599 port->local_port); 600 if (span_entry) { 601 /* Already exists, just take a reference */ 602 span_entry->ref_count++; 603 return span_entry; 604 } 605 606 return mlxsw_sp_span_entry_create(port); 607 } 608 609 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 610 struct mlxsw_sp_span_entry *span_entry) 611 { 612 WARN_ON(!span_entry->ref_count); 613 if (--span_entry->ref_count == 0) 614 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 615 return 0; 616 } 617 618 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 619 { 620 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 621 struct mlxsw_sp_span_inspected_port *p; 622 int i; 623 624 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 625 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 626 627 list_for_each_entry(p, &curr->bound_ports_list, list) 628 if (p->local_port == port->local_port && 629 p->type == MLXSW_SP_SPAN_EGRESS) 630 return true; 631 } 632 633 return false; 634 } 635 636 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 637 int mtu) 638 { 639 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 640 } 641 642 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 643 { 644 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 645 char sbib_pl[MLXSW_REG_SBIB_LEN]; 646 int err; 647 648 /* If port is egress mirrored, the shared buffer size should be 649 * updated according to the mtu value 650 */ 651 if (mlxsw_sp_span_is_egress_mirror(port)) { 652 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 653 654 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 656 if (err) { 657 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 658 return err; 659 } 660 } 661 662 return 0; 663 } 664 665 static struct mlxsw_sp_span_inspected_port * 666 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 667 struct mlxsw_sp_span_entry *span_entry) 668 { 669 struct mlxsw_sp_span_inspected_port *p; 670 671 list_for_each_entry(p, &span_entry->bound_ports_list, list) 672 if (port->local_port == p->local_port) 673 return p; 674 return NULL; 675 } 676 677 static int 678 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 679 struct mlxsw_sp_span_entry *span_entry, 680 enum mlxsw_sp_span_type type) 681 { 682 struct mlxsw_sp_span_inspected_port *inspected_port; 683 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 684 char mpar_pl[MLXSW_REG_MPAR_LEN]; 685 char sbib_pl[MLXSW_REG_SBIB_LEN]; 686 int pa_id = span_entry->id; 687 int err; 688 689 /* if it is an egress SPAN, bind a shared buffer to it */ 690 if (type == MLXSW_SP_SPAN_EGRESS) { 691 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 692 port->dev->mtu); 693 694 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 695 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 696 if (err) { 697 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 698 return err; 699 } 700 } 701 702 /* bind the port to the SPAN entry */ 703 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 704 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 706 if (err) 707 goto err_mpar_reg_write; 708 709 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 710 if (!inspected_port) { 711 err = -ENOMEM; 712 goto err_inspected_port_alloc; 713 } 714 inspected_port->local_port = port->local_port; 715 inspected_port->type = type; 716 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 717 718 return 0; 719 720 err_mpar_reg_write: 721 err_inspected_port_alloc: 722 if (type == MLXSW_SP_SPAN_EGRESS) { 723 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 724 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 725 } 726 return err; 727 } 728 729 static void 730 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 731 struct mlxsw_sp_span_entry *span_entry, 732 enum mlxsw_sp_span_type type) 733 { 734 struct mlxsw_sp_span_inspected_port *inspected_port; 735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 736 char mpar_pl[MLXSW_REG_MPAR_LEN]; 737 char sbib_pl[MLXSW_REG_SBIB_LEN]; 738 int pa_id = span_entry->id; 739 740 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 741 if (!inspected_port) 742 return; 743 744 /* remove the inspected port */ 745 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 746 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 748 749 /* remove the SBIB buffer if it was egress SPAN */ 750 if (type == MLXSW_SP_SPAN_EGRESS) { 751 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 752 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 753 } 754 755 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 756 757 list_del(&inspected_port->list); 758 kfree(inspected_port); 759 } 760 761 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 762 struct mlxsw_sp_port *to, 763 enum mlxsw_sp_span_type type) 764 { 765 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 766 struct mlxsw_sp_span_entry *span_entry; 767 int err; 768 769 span_entry = mlxsw_sp_span_entry_get(to); 770 if (!span_entry) 771 return -ENOENT; 772 773 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 774 span_entry->id); 775 776 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 777 if (err) 778 goto err_port_bind; 779 780 return 0; 781 782 err_port_bind: 783 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 784 return err; 785 } 786 787 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 788 u8 destination_port, 789 enum mlxsw_sp_span_type type) 790 { 791 struct mlxsw_sp_span_entry *span_entry; 792 793 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 794 destination_port); 795 if (!span_entry) { 796 netdev_err(from->dev, "no span entry found\n"); 797 return; 798 } 799 800 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 801 span_entry->id); 802 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 803 } 804 805 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 806 bool enable, u32 rate) 807 { 808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 809 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 810 811 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 813 } 814 815 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 816 bool is_up) 817 { 818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 819 char paos_pl[MLXSW_REG_PAOS_LEN]; 820 821 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 822 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 823 MLXSW_PORT_ADMIN_STATUS_DOWN); 824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 825 } 826 827 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 828 unsigned char *addr) 829 { 830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 831 char ppad_pl[MLXSW_REG_PPAD_LEN]; 832 833 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 834 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 836 } 837 838 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 839 { 840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 841 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 842 843 ether_addr_copy(addr, mlxsw_sp->base_mac); 844 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 845 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 846 } 847 848 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 849 { 850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 851 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 852 int max_mtu; 853 int err; 854 855 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 856 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 857 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 858 if (err) 859 return err; 860 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 861 862 if (mtu > max_mtu) 863 return -EINVAL; 864 865 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 867 } 868 869 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 870 { 871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 872 char pspa_pl[MLXSW_REG_PSPA_LEN]; 873 874 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 875 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 876 } 877 878 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 879 { 880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 881 char svpe_pl[MLXSW_REG_SVPE_LEN]; 882 883 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 885 } 886 887 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 888 bool learn_enable) 889 { 890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 891 char *spvmlr_pl; 892 int err; 893 894 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 895 if (!spvmlr_pl) 896 return -ENOMEM; 897 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 898 learn_enable); 899 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 900 kfree(spvmlr_pl); 901 return err; 902 } 903 904 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 905 u16 vid) 906 { 907 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 908 char spvid_pl[MLXSW_REG_SPVID_LEN]; 909 910 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 912 } 913 914 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 915 bool allow) 916 { 917 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 918 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 919 920 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 921 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 922 } 923 924 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 925 { 926 int err; 927 928 if (!vid) { 929 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 930 if (err) 931 return err; 932 } else { 933 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 934 if (err) 935 return err; 936 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 937 if (err) 938 goto err_port_allow_untagged_set; 939 } 940 941 mlxsw_sp_port->pvid = vid; 942 return 0; 943 944 err_port_allow_untagged_set: 945 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 946 return err; 947 } 948 949 static int 950 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 951 { 952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 953 char sspr_pl[MLXSW_REG_SSPR_LEN]; 954 955 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 957 } 958 959 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 960 u8 local_port, u8 *p_module, 961 u8 *p_width, u8 *p_lane) 962 { 963 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 964 int err; 965 966 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 967 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 968 if (err) 969 return err; 970 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 971 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 972 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 973 return 0; 974 } 975 976 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 977 u8 module, u8 width, u8 lane) 978 { 979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 980 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 981 int i; 982 983 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 984 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 985 for (i = 0; i < width; i++) { 986 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 987 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 988 } 989 990 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 991 } 992 993 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 994 { 995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 996 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 997 998 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 999 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 1000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 1001 } 1002 1003 static int mlxsw_sp_port_open(struct net_device *dev) 1004 { 1005 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1006 int err; 1007 1008 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1009 if (err) 1010 return err; 1011 netif_start_queue(dev); 1012 return 0; 1013 } 1014 1015 static int mlxsw_sp_port_stop(struct net_device *dev) 1016 { 1017 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1018 1019 netif_stop_queue(dev); 1020 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1021 } 1022 1023 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1024 struct net_device *dev) 1025 { 1026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1028 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1029 const struct mlxsw_tx_info tx_info = { 1030 .local_port = mlxsw_sp_port->local_port, 1031 .is_emad = false, 1032 }; 1033 u64 len; 1034 int err; 1035 1036 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1037 return NETDEV_TX_BUSY; 1038 1039 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1040 struct sk_buff *skb_orig = skb; 1041 1042 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1043 if (!skb) { 1044 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1045 dev_kfree_skb_any(skb_orig); 1046 return NETDEV_TX_OK; 1047 } 1048 dev_consume_skb_any(skb_orig); 1049 } 1050 1051 if (eth_skb_pad(skb)) { 1052 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1053 return NETDEV_TX_OK; 1054 } 1055 1056 mlxsw_sp_txhdr_construct(skb, &tx_info); 1057 /* TX header is consumed by HW on the way so we shouldn't count its 1058 * bytes as being sent. 1059 */ 1060 len = skb->len - MLXSW_TXHDR_LEN; 1061 1062 /* Due to a race we might fail here because of a full queue. In that 1063 * unlikely case we simply drop the packet. 1064 */ 1065 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1066 1067 if (!err) { 1068 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1069 u64_stats_update_begin(&pcpu_stats->syncp); 1070 pcpu_stats->tx_packets++; 1071 pcpu_stats->tx_bytes += len; 1072 u64_stats_update_end(&pcpu_stats->syncp); 1073 } else { 1074 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1075 dev_kfree_skb_any(skb); 1076 } 1077 return NETDEV_TX_OK; 1078 } 1079 1080 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1081 { 1082 } 1083 1084 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1085 { 1086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1087 struct sockaddr *addr = p; 1088 int err; 1089 1090 if (!is_valid_ether_addr(addr->sa_data)) 1091 return -EADDRNOTAVAIL; 1092 1093 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1094 if (err) 1095 return err; 1096 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1097 return 0; 1098 } 1099 1100 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1101 int mtu) 1102 { 1103 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1104 } 1105 1106 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1107 1108 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1109 u16 delay) 1110 { 1111 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1112 BITS_PER_BYTE)); 1113 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1114 mtu); 1115 } 1116 1117 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1118 * Assumes 100m cable and maximum MTU. 1119 */ 1120 #define MLXSW_SP_PAUSE_DELAY 58752 1121 1122 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1123 u16 delay, bool pfc, bool pause) 1124 { 1125 if (pfc) 1126 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1127 else if (pause) 1128 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1129 else 1130 return 0; 1131 } 1132 1133 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1134 bool lossy) 1135 { 1136 if (lossy) 1137 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1138 else 1139 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1140 thres); 1141 } 1142 1143 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1144 u8 *prio_tc, bool pause_en, 1145 struct ieee_pfc *my_pfc) 1146 { 1147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1148 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1149 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1150 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1151 int i, j, err; 1152 1153 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1155 if (err) 1156 return err; 1157 1158 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1159 bool configure = false; 1160 bool pfc = false; 1161 bool lossy; 1162 u16 thres; 1163 1164 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1165 if (prio_tc[j] == i) { 1166 pfc = pfc_en & BIT(j); 1167 configure = true; 1168 break; 1169 } 1170 } 1171 1172 if (!configure) 1173 continue; 1174 1175 lossy = !(pfc || pause_en); 1176 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1177 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1178 pause_en); 1179 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1180 } 1181 1182 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1183 } 1184 1185 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1186 int mtu, bool pause_en) 1187 { 1188 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1189 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1190 struct ieee_pfc *my_pfc; 1191 u8 *prio_tc; 1192 1193 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1194 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1195 1196 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1197 pause_en, my_pfc); 1198 } 1199 1200 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1201 { 1202 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1203 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1204 int err; 1205 1206 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1207 if (err) 1208 return err; 1209 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1210 if (err) 1211 goto err_span_port_mtu_update; 1212 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1213 if (err) 1214 goto err_port_mtu_set; 1215 dev->mtu = mtu; 1216 return 0; 1217 1218 err_port_mtu_set: 1219 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1220 err_span_port_mtu_update: 1221 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1222 return err; 1223 } 1224 1225 static int 1226 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1227 struct rtnl_link_stats64 *stats) 1228 { 1229 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1230 struct mlxsw_sp_port_pcpu_stats *p; 1231 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1232 u32 tx_dropped = 0; 1233 unsigned int start; 1234 int i; 1235 1236 for_each_possible_cpu(i) { 1237 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1238 do { 1239 start = u64_stats_fetch_begin_irq(&p->syncp); 1240 rx_packets = p->rx_packets; 1241 rx_bytes = p->rx_bytes; 1242 tx_packets = p->tx_packets; 1243 tx_bytes = p->tx_bytes; 1244 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1245 1246 stats->rx_packets += rx_packets; 1247 stats->rx_bytes += rx_bytes; 1248 stats->tx_packets += tx_packets; 1249 stats->tx_bytes += tx_bytes; 1250 /* tx_dropped is u32, updated without syncp protection. */ 1251 tx_dropped += p->tx_dropped; 1252 } 1253 stats->tx_dropped = tx_dropped; 1254 return 0; 1255 } 1256 1257 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1258 { 1259 switch (attr_id) { 1260 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1268 void *sp) 1269 { 1270 switch (attr_id) { 1271 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1272 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1273 } 1274 1275 return -EINVAL; 1276 } 1277 1278 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1279 int prio, char *ppcnt_pl) 1280 { 1281 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1283 1284 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1285 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1286 } 1287 1288 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1289 struct rtnl_link_stats64 *stats) 1290 { 1291 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1292 int err; 1293 1294 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1295 0, ppcnt_pl); 1296 if (err) 1297 goto out; 1298 1299 stats->tx_packets = 1300 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1301 stats->rx_packets = 1302 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1303 stats->tx_bytes = 1304 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1305 stats->rx_bytes = 1306 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1307 stats->multicast = 1308 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1309 1310 stats->rx_crc_errors = 1311 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1312 stats->rx_frame_errors = 1313 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1314 1315 stats->rx_length_errors = ( 1316 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1317 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1318 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1319 1320 stats->rx_errors = (stats->rx_crc_errors + 1321 stats->rx_frame_errors + stats->rx_length_errors); 1322 1323 out: 1324 return err; 1325 } 1326 1327 static void 1328 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1329 struct mlxsw_sp_port_xstats *xstats) 1330 { 1331 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1332 int err, i; 1333 1334 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1335 ppcnt_pl); 1336 if (!err) 1337 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1338 1339 for (i = 0; i < TC_MAX_QUEUE; i++) { 1340 err = mlxsw_sp_port_get_stats_raw(dev, 1341 MLXSW_REG_PPCNT_TC_CONG_TC, 1342 i, ppcnt_pl); 1343 if (!err) 1344 xstats->wred_drop[i] = 1345 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1346 1347 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1348 i, ppcnt_pl); 1349 if (err) 1350 continue; 1351 1352 xstats->backlog[i] = 1353 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1354 xstats->tail_drop[i] = 1355 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1356 } 1357 } 1358 1359 static void update_stats_cache(struct work_struct *work) 1360 { 1361 struct mlxsw_sp_port *mlxsw_sp_port = 1362 container_of(work, struct mlxsw_sp_port, 1363 periodic_hw_stats.update_dw.work); 1364 1365 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1366 goto out; 1367 1368 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1369 &mlxsw_sp_port->periodic_hw_stats.stats); 1370 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1371 &mlxsw_sp_port->periodic_hw_stats.xstats); 1372 1373 out: 1374 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1375 MLXSW_HW_STATS_UPDATE_TIME); 1376 } 1377 1378 /* Return the stats from a cache that is updated periodically, 1379 * as this function might get called in an atomic context. 1380 */ 1381 static void 1382 mlxsw_sp_port_get_stats64(struct net_device *dev, 1383 struct rtnl_link_stats64 *stats) 1384 { 1385 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1386 1387 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1388 } 1389 1390 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1391 u16 vid_begin, u16 vid_end, 1392 bool is_member, bool untagged) 1393 { 1394 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1395 char *spvm_pl; 1396 int err; 1397 1398 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1399 if (!spvm_pl) 1400 return -ENOMEM; 1401 1402 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1403 vid_end, is_member, untagged); 1404 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1405 kfree(spvm_pl); 1406 return err; 1407 } 1408 1409 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1410 u16 vid_end, bool is_member, bool untagged) 1411 { 1412 u16 vid, vid_e; 1413 int err; 1414 1415 for (vid = vid_begin; vid <= vid_end; 1416 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1417 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1418 vid_end); 1419 1420 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1421 is_member, untagged); 1422 if (err) 1423 return err; 1424 } 1425 1426 return 0; 1427 } 1428 1429 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1430 { 1431 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1432 1433 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1434 &mlxsw_sp_port->vlans_list, list) 1435 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1436 } 1437 1438 static struct mlxsw_sp_port_vlan * 1439 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1440 { 1441 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1442 bool untagged = vid == 1; 1443 int err; 1444 1445 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1446 if (err) 1447 return ERR_PTR(err); 1448 1449 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1450 if (!mlxsw_sp_port_vlan) { 1451 err = -ENOMEM; 1452 goto err_port_vlan_alloc; 1453 } 1454 1455 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1456 mlxsw_sp_port_vlan->vid = vid; 1457 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1458 1459 return mlxsw_sp_port_vlan; 1460 1461 err_port_vlan_alloc: 1462 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1463 return ERR_PTR(err); 1464 } 1465 1466 static void 1467 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1468 { 1469 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1470 u16 vid = mlxsw_sp_port_vlan->vid; 1471 1472 list_del(&mlxsw_sp_port_vlan->list); 1473 kfree(mlxsw_sp_port_vlan); 1474 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1475 } 1476 1477 struct mlxsw_sp_port_vlan * 1478 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1479 { 1480 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1481 1482 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1483 if (mlxsw_sp_port_vlan) 1484 return mlxsw_sp_port_vlan; 1485 1486 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1487 } 1488 1489 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1490 { 1491 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1492 1493 if (mlxsw_sp_port_vlan->bridge_port) 1494 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1495 else if (fid) 1496 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1497 1498 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1499 } 1500 1501 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1502 __be16 __always_unused proto, u16 vid) 1503 { 1504 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1505 1506 /* VLAN 0 is added to HW filter when device goes up, but it is 1507 * reserved in our case, so simply return. 1508 */ 1509 if (!vid) 1510 return 0; 1511 1512 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1513 } 1514 1515 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1516 __be16 __always_unused proto, u16 vid) 1517 { 1518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1519 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1520 1521 /* VLAN 0 is removed from HW filter when device goes down, but 1522 * it is reserved in our case, so simply return. 1523 */ 1524 if (!vid) 1525 return 0; 1526 1527 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1528 if (!mlxsw_sp_port_vlan) 1529 return 0; 1530 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1531 1532 return 0; 1533 } 1534 1535 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1536 size_t len) 1537 { 1538 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1539 u8 module = mlxsw_sp_port->mapping.module; 1540 u8 width = mlxsw_sp_port->mapping.width; 1541 u8 lane = mlxsw_sp_port->mapping.lane; 1542 int err; 1543 1544 if (!mlxsw_sp_port->split) 1545 err = snprintf(name, len, "p%d", module + 1); 1546 else 1547 err = snprintf(name, len, "p%ds%d", module + 1, 1548 lane / width); 1549 1550 if (err >= len) 1551 return -EINVAL; 1552 1553 return 0; 1554 } 1555 1556 static struct mlxsw_sp_port_mall_tc_entry * 1557 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1558 unsigned long cookie) { 1559 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1560 1561 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1562 if (mall_tc_entry->cookie == cookie) 1563 return mall_tc_entry; 1564 1565 return NULL; 1566 } 1567 1568 static int 1569 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1570 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1571 const struct tc_action *a, 1572 bool ingress) 1573 { 1574 enum mlxsw_sp_span_type span_type; 1575 struct mlxsw_sp_port *to_port; 1576 struct net_device *to_dev; 1577 1578 to_dev = tcf_mirred_dev(a); 1579 if (!to_dev) { 1580 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1581 return -EINVAL; 1582 } 1583 1584 if (!mlxsw_sp_port_dev_check(to_dev)) { 1585 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1586 return -EOPNOTSUPP; 1587 } 1588 to_port = netdev_priv(to_dev); 1589 1590 mirror->to_local_port = to_port->local_port; 1591 mirror->ingress = ingress; 1592 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1593 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1594 } 1595 1596 static void 1597 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1598 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1599 { 1600 enum mlxsw_sp_span_type span_type; 1601 1602 span_type = mirror->ingress ? 1603 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1604 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, 1605 span_type); 1606 } 1607 1608 static int 1609 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1610 struct tc_cls_matchall_offload *cls, 1611 const struct tc_action *a, 1612 bool ingress) 1613 { 1614 int err; 1615 1616 if (!mlxsw_sp_port->sample) 1617 return -EOPNOTSUPP; 1618 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1619 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1620 return -EEXIST; 1621 } 1622 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1623 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1624 return -EOPNOTSUPP; 1625 } 1626 1627 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1628 tcf_sample_psample_group(a)); 1629 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1630 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1631 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1632 1633 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1634 if (err) 1635 goto err_port_sample_set; 1636 return 0; 1637 1638 err_port_sample_set: 1639 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1640 return err; 1641 } 1642 1643 static void 1644 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1645 { 1646 if (!mlxsw_sp_port->sample) 1647 return; 1648 1649 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1650 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1651 } 1652 1653 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1654 struct tc_cls_matchall_offload *f, 1655 bool ingress) 1656 { 1657 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1658 __be16 protocol = f->common.protocol; 1659 const struct tc_action *a; 1660 LIST_HEAD(actions); 1661 int err; 1662 1663 if (!tcf_exts_has_one_action(f->exts)) { 1664 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1665 return -EOPNOTSUPP; 1666 } 1667 1668 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1669 if (!mall_tc_entry) 1670 return -ENOMEM; 1671 mall_tc_entry->cookie = f->cookie; 1672 1673 tcf_exts_to_list(f->exts, &actions); 1674 a = list_first_entry(&actions, struct tc_action, list); 1675 1676 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1677 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1678 1679 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1680 mirror = &mall_tc_entry->mirror; 1681 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1682 mirror, a, ingress); 1683 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1684 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1685 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1686 a, ingress); 1687 } else { 1688 err = -EOPNOTSUPP; 1689 } 1690 1691 if (err) 1692 goto err_add_action; 1693 1694 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1695 return 0; 1696 1697 err_add_action: 1698 kfree(mall_tc_entry); 1699 return err; 1700 } 1701 1702 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1703 struct tc_cls_matchall_offload *f) 1704 { 1705 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1706 1707 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1708 f->cookie); 1709 if (!mall_tc_entry) { 1710 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1711 return; 1712 } 1713 list_del(&mall_tc_entry->list); 1714 1715 switch (mall_tc_entry->type) { 1716 case MLXSW_SP_PORT_MALL_MIRROR: 1717 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1718 &mall_tc_entry->mirror); 1719 break; 1720 case MLXSW_SP_PORT_MALL_SAMPLE: 1721 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1722 break; 1723 default: 1724 WARN_ON(1); 1725 } 1726 1727 kfree(mall_tc_entry); 1728 } 1729 1730 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1731 struct tc_cls_matchall_offload *f, 1732 bool ingress) 1733 { 1734 if (f->common.chain_index) 1735 return -EOPNOTSUPP; 1736 1737 switch (f->command) { 1738 case TC_CLSMATCHALL_REPLACE: 1739 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1740 ingress); 1741 case TC_CLSMATCHALL_DESTROY: 1742 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1743 return 0; 1744 default: 1745 return -EOPNOTSUPP; 1746 } 1747 } 1748 1749 static int 1750 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, 1751 struct tc_cls_flower_offload *f, 1752 bool ingress) 1753 { 1754 switch (f->command) { 1755 case TC_CLSFLOWER_REPLACE: 1756 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); 1757 case TC_CLSFLOWER_DESTROY: 1758 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); 1759 return 0; 1760 case TC_CLSFLOWER_STATS: 1761 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); 1762 default: 1763 return -EOPNOTSUPP; 1764 } 1765 } 1766 1767 static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1768 void *cb_priv, bool ingress) 1769 { 1770 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1771 1772 if (!tc_can_offload(mlxsw_sp_port->dev)) 1773 return -EOPNOTSUPP; 1774 1775 switch (type) { 1776 case TC_SETUP_CLSMATCHALL: 1777 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1778 ingress); 1779 case TC_SETUP_CLSFLOWER: 1780 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data, 1781 ingress); 1782 default: 1783 return -EOPNOTSUPP; 1784 } 1785 } 1786 1787 static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type, 1788 void *type_data, void *cb_priv) 1789 { 1790 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true); 1791 } 1792 1793 static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type, 1794 void *type_data, void *cb_priv) 1795 { 1796 return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false); 1797 } 1798 1799 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1800 struct tc_block_offload *f) 1801 { 1802 tc_setup_cb_t *cb; 1803 1804 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1805 cb = mlxsw_sp_setup_tc_block_cb_ig; 1806 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1807 cb = mlxsw_sp_setup_tc_block_cb_eg; 1808 else 1809 return -EOPNOTSUPP; 1810 1811 switch (f->command) { 1812 case TC_BLOCK_BIND: 1813 return tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1814 mlxsw_sp_port); 1815 case TC_BLOCK_UNBIND: 1816 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1817 return 0; 1818 default: 1819 return -EOPNOTSUPP; 1820 } 1821 } 1822 1823 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1824 void *type_data) 1825 { 1826 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1827 1828 switch (type) { 1829 case TC_SETUP_BLOCK: 1830 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1831 case TC_SETUP_QDISC_RED: 1832 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1833 default: 1834 return -EOPNOTSUPP; 1835 } 1836 } 1837 1838 1839 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1840 { 1841 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1842 1843 if (!enable && (mlxsw_sp_port->acl_rule_count || 1844 !list_empty(&mlxsw_sp_port->mall_tc_list))) { 1845 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1846 return -EINVAL; 1847 } 1848 return 0; 1849 } 1850 1851 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1852 1853 static int mlxsw_sp_handle_feature(struct net_device *dev, 1854 netdev_features_t wanted_features, 1855 netdev_features_t feature, 1856 mlxsw_sp_feature_handler feature_handler) 1857 { 1858 netdev_features_t changes = wanted_features ^ dev->features; 1859 bool enable = !!(wanted_features & feature); 1860 int err; 1861 1862 if (!(changes & feature)) 1863 return 0; 1864 1865 err = feature_handler(dev, enable); 1866 if (err) { 1867 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1868 enable ? "Enable" : "Disable", &feature, err); 1869 return err; 1870 } 1871 1872 if (enable) 1873 dev->features |= feature; 1874 else 1875 dev->features &= ~feature; 1876 1877 return 0; 1878 } 1879 static int mlxsw_sp_set_features(struct net_device *dev, 1880 netdev_features_t features) 1881 { 1882 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1883 mlxsw_sp_feature_hw_tc); 1884 } 1885 1886 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1887 .ndo_open = mlxsw_sp_port_open, 1888 .ndo_stop = mlxsw_sp_port_stop, 1889 .ndo_start_xmit = mlxsw_sp_port_xmit, 1890 .ndo_setup_tc = mlxsw_sp_setup_tc, 1891 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1892 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1893 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1894 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1895 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1896 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1897 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1898 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1899 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1900 .ndo_set_features = mlxsw_sp_set_features, 1901 }; 1902 1903 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1904 struct ethtool_drvinfo *drvinfo) 1905 { 1906 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1907 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1908 1909 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1910 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1911 sizeof(drvinfo->version)); 1912 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1913 "%d.%d.%d", 1914 mlxsw_sp->bus_info->fw_rev.major, 1915 mlxsw_sp->bus_info->fw_rev.minor, 1916 mlxsw_sp->bus_info->fw_rev.subminor); 1917 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1918 sizeof(drvinfo->bus_info)); 1919 } 1920 1921 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1922 struct ethtool_pauseparam *pause) 1923 { 1924 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1925 1926 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1927 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1928 } 1929 1930 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1931 struct ethtool_pauseparam *pause) 1932 { 1933 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1934 1935 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1936 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1937 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1938 1939 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1940 pfcc_pl); 1941 } 1942 1943 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1944 struct ethtool_pauseparam *pause) 1945 { 1946 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1947 bool pause_en = pause->tx_pause || pause->rx_pause; 1948 int err; 1949 1950 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1951 netdev_err(dev, "PFC already enabled on port\n"); 1952 return -EINVAL; 1953 } 1954 1955 if (pause->autoneg) { 1956 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1957 return -EINVAL; 1958 } 1959 1960 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1961 if (err) { 1962 netdev_err(dev, "Failed to configure port's headroom\n"); 1963 return err; 1964 } 1965 1966 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1967 if (err) { 1968 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1969 goto err_port_pause_configure; 1970 } 1971 1972 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1973 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1974 1975 return 0; 1976 1977 err_port_pause_configure: 1978 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1979 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1980 return err; 1981 } 1982 1983 struct mlxsw_sp_port_hw_stats { 1984 char str[ETH_GSTRING_LEN]; 1985 u64 (*getter)(const char *payload); 1986 bool cells_bytes; 1987 }; 1988 1989 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1990 { 1991 .str = "a_frames_transmitted_ok", 1992 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1993 }, 1994 { 1995 .str = "a_frames_received_ok", 1996 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1997 }, 1998 { 1999 .str = "a_frame_check_sequence_errors", 2000 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2001 }, 2002 { 2003 .str = "a_alignment_errors", 2004 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2005 }, 2006 { 2007 .str = "a_octets_transmitted_ok", 2008 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2009 }, 2010 { 2011 .str = "a_octets_received_ok", 2012 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2013 }, 2014 { 2015 .str = "a_multicast_frames_xmitted_ok", 2016 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2017 }, 2018 { 2019 .str = "a_broadcast_frames_xmitted_ok", 2020 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2021 }, 2022 { 2023 .str = "a_multicast_frames_received_ok", 2024 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2025 }, 2026 { 2027 .str = "a_broadcast_frames_received_ok", 2028 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2029 }, 2030 { 2031 .str = "a_in_range_length_errors", 2032 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2033 }, 2034 { 2035 .str = "a_out_of_range_length_field", 2036 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2037 }, 2038 { 2039 .str = "a_frame_too_long_errors", 2040 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2041 }, 2042 { 2043 .str = "a_symbol_error_during_carrier", 2044 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2045 }, 2046 { 2047 .str = "a_mac_control_frames_transmitted", 2048 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2049 }, 2050 { 2051 .str = "a_mac_control_frames_received", 2052 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2053 }, 2054 { 2055 .str = "a_unsupported_opcodes_received", 2056 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2057 }, 2058 { 2059 .str = "a_pause_mac_ctrl_frames_received", 2060 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2061 }, 2062 { 2063 .str = "a_pause_mac_ctrl_frames_xmitted", 2064 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2065 }, 2066 }; 2067 2068 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2069 2070 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2071 { 2072 .str = "rx_octets_prio", 2073 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2074 }, 2075 { 2076 .str = "rx_frames_prio", 2077 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2078 }, 2079 { 2080 .str = "tx_octets_prio", 2081 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2082 }, 2083 { 2084 .str = "tx_frames_prio", 2085 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2086 }, 2087 { 2088 .str = "rx_pause_prio", 2089 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2090 }, 2091 { 2092 .str = "rx_pause_duration_prio", 2093 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2094 }, 2095 { 2096 .str = "tx_pause_prio", 2097 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2098 }, 2099 { 2100 .str = "tx_pause_duration_prio", 2101 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2102 }, 2103 }; 2104 2105 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2106 2107 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2108 { 2109 .str = "tc_transmit_queue_tc", 2110 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2111 .cells_bytes = true, 2112 }, 2113 { 2114 .str = "tc_no_buffer_discard_uc_tc", 2115 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2116 }, 2117 }; 2118 2119 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2120 2121 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2122 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2123 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2124 IEEE_8021QAZ_MAX_TCS) 2125 2126 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2127 { 2128 int i; 2129 2130 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2131 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2132 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2133 *p += ETH_GSTRING_LEN; 2134 } 2135 } 2136 2137 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2138 { 2139 int i; 2140 2141 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2142 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2143 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2144 *p += ETH_GSTRING_LEN; 2145 } 2146 } 2147 2148 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2149 u32 stringset, u8 *data) 2150 { 2151 u8 *p = data; 2152 int i; 2153 2154 switch (stringset) { 2155 case ETH_SS_STATS: 2156 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2157 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2158 ETH_GSTRING_LEN); 2159 p += ETH_GSTRING_LEN; 2160 } 2161 2162 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2163 mlxsw_sp_port_get_prio_strings(&p, i); 2164 2165 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2166 mlxsw_sp_port_get_tc_strings(&p, i); 2167 2168 break; 2169 } 2170 } 2171 2172 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2173 enum ethtool_phys_id_state state) 2174 { 2175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2177 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2178 bool active; 2179 2180 switch (state) { 2181 case ETHTOOL_ID_ACTIVE: 2182 active = true; 2183 break; 2184 case ETHTOOL_ID_INACTIVE: 2185 active = false; 2186 break; 2187 default: 2188 return -EOPNOTSUPP; 2189 } 2190 2191 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2193 } 2194 2195 static int 2196 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2197 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2198 { 2199 switch (grp) { 2200 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2201 *p_hw_stats = mlxsw_sp_port_hw_stats; 2202 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2203 break; 2204 case MLXSW_REG_PPCNT_PRIO_CNT: 2205 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2206 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2207 break; 2208 case MLXSW_REG_PPCNT_TC_CNT: 2209 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2210 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2211 break; 2212 default: 2213 WARN_ON(1); 2214 return -EOPNOTSUPP; 2215 } 2216 return 0; 2217 } 2218 2219 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2220 enum mlxsw_reg_ppcnt_grp grp, int prio, 2221 u64 *data, int data_index) 2222 { 2223 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2224 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2225 struct mlxsw_sp_port_hw_stats *hw_stats; 2226 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2227 int i, len; 2228 int err; 2229 2230 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2231 if (err) 2232 return; 2233 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2234 for (i = 0; i < len; i++) { 2235 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2236 if (!hw_stats[i].cells_bytes) 2237 continue; 2238 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2239 data[data_index + i]); 2240 } 2241 } 2242 2243 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2244 struct ethtool_stats *stats, u64 *data) 2245 { 2246 int i, data_index = 0; 2247 2248 /* IEEE 802.3 Counters */ 2249 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2250 data, data_index); 2251 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2252 2253 /* Per-Priority Counters */ 2254 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2255 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2256 data, data_index); 2257 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2258 } 2259 2260 /* Per-TC Counters */ 2261 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2262 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2263 data, data_index); 2264 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2265 } 2266 } 2267 2268 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2269 { 2270 switch (sset) { 2271 case ETH_SS_STATS: 2272 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2273 default: 2274 return -EOPNOTSUPP; 2275 } 2276 } 2277 2278 struct mlxsw_sp_port_link_mode { 2279 enum ethtool_link_mode_bit_indices mask_ethtool; 2280 u32 mask; 2281 u32 speed; 2282 }; 2283 2284 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2285 { 2286 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2287 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2288 .speed = SPEED_100, 2289 }, 2290 { 2291 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2292 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2293 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2294 .speed = SPEED_1000, 2295 }, 2296 { 2297 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2298 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2299 .speed = SPEED_10000, 2300 }, 2301 { 2302 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2303 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2304 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2305 .speed = SPEED_10000, 2306 }, 2307 { 2308 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2309 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2310 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2311 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2312 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2313 .speed = SPEED_10000, 2314 }, 2315 { 2316 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2317 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2318 .speed = SPEED_20000, 2319 }, 2320 { 2321 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2322 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2323 .speed = SPEED_40000, 2324 }, 2325 { 2326 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2327 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2328 .speed = SPEED_40000, 2329 }, 2330 { 2331 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2332 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2333 .speed = SPEED_40000, 2334 }, 2335 { 2336 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2337 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2338 .speed = SPEED_40000, 2339 }, 2340 { 2341 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2342 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2343 .speed = SPEED_25000, 2344 }, 2345 { 2346 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2347 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2348 .speed = SPEED_25000, 2349 }, 2350 { 2351 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2352 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2353 .speed = SPEED_25000, 2354 }, 2355 { 2356 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2357 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2358 .speed = SPEED_25000, 2359 }, 2360 { 2361 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2362 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2363 .speed = SPEED_50000, 2364 }, 2365 { 2366 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2367 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2368 .speed = SPEED_50000, 2369 }, 2370 { 2371 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2372 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2373 .speed = SPEED_50000, 2374 }, 2375 { 2376 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2377 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2378 .speed = SPEED_56000, 2379 }, 2380 { 2381 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2382 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2383 .speed = SPEED_56000, 2384 }, 2385 { 2386 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2387 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2388 .speed = SPEED_56000, 2389 }, 2390 { 2391 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2392 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2393 .speed = SPEED_56000, 2394 }, 2395 { 2396 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2397 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2398 .speed = SPEED_100000, 2399 }, 2400 { 2401 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2402 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2403 .speed = SPEED_100000, 2404 }, 2405 { 2406 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2407 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2408 .speed = SPEED_100000, 2409 }, 2410 { 2411 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2412 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2413 .speed = SPEED_100000, 2414 }, 2415 }; 2416 2417 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2418 2419 static void 2420 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2421 struct ethtool_link_ksettings *cmd) 2422 { 2423 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2424 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2425 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2426 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2427 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2428 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2429 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2430 2431 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2432 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2433 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2434 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2435 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2436 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2437 } 2438 2439 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2440 { 2441 int i; 2442 2443 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2444 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2445 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2446 mode); 2447 } 2448 } 2449 2450 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2451 struct ethtool_link_ksettings *cmd) 2452 { 2453 u32 speed = SPEED_UNKNOWN; 2454 u8 duplex = DUPLEX_UNKNOWN; 2455 int i; 2456 2457 if (!carrier_ok) 2458 goto out; 2459 2460 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2461 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2462 speed = mlxsw_sp_port_link_mode[i].speed; 2463 duplex = DUPLEX_FULL; 2464 break; 2465 } 2466 } 2467 out: 2468 cmd->base.speed = speed; 2469 cmd->base.duplex = duplex; 2470 } 2471 2472 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2473 { 2474 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2475 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2476 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2477 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2478 return PORT_FIBRE; 2479 2480 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2481 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2482 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2483 return PORT_DA; 2484 2485 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2486 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2487 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2488 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2489 return PORT_NONE; 2490 2491 return PORT_OTHER; 2492 } 2493 2494 static u32 2495 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2496 { 2497 u32 ptys_proto = 0; 2498 int i; 2499 2500 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2501 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2502 cmd->link_modes.advertising)) 2503 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2504 } 2505 return ptys_proto; 2506 } 2507 2508 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2509 { 2510 u32 ptys_proto = 0; 2511 int i; 2512 2513 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2514 if (speed == mlxsw_sp_port_link_mode[i].speed) 2515 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2516 } 2517 return ptys_proto; 2518 } 2519 2520 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2521 { 2522 u32 ptys_proto = 0; 2523 int i; 2524 2525 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2526 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2527 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2528 } 2529 return ptys_proto; 2530 } 2531 2532 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2533 struct ethtool_link_ksettings *cmd) 2534 { 2535 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2536 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2537 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2538 2539 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2540 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2541 } 2542 2543 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2544 struct ethtool_link_ksettings *cmd) 2545 { 2546 if (!autoneg) 2547 return; 2548 2549 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2550 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2551 } 2552 2553 static void 2554 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2555 struct ethtool_link_ksettings *cmd) 2556 { 2557 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2558 return; 2559 2560 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2561 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2562 } 2563 2564 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2565 struct ethtool_link_ksettings *cmd) 2566 { 2567 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2568 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2570 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2571 u8 autoneg_status; 2572 bool autoneg; 2573 int err; 2574 2575 autoneg = mlxsw_sp_port->link.autoneg; 2576 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2578 if (err) 2579 return err; 2580 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2581 ð_proto_oper); 2582 2583 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2584 2585 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2586 2587 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2588 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2589 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2590 2591 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2592 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2593 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2594 cmd); 2595 2596 return 0; 2597 } 2598 2599 static int 2600 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2601 const struct ethtool_link_ksettings *cmd) 2602 { 2603 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2605 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2606 u32 eth_proto_cap, eth_proto_new; 2607 bool autoneg; 2608 int err; 2609 2610 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2611 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2612 if (err) 2613 return err; 2614 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2615 2616 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2617 eth_proto_new = autoneg ? 2618 mlxsw_sp_to_ptys_advert_link(cmd) : 2619 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2620 2621 eth_proto_new = eth_proto_new & eth_proto_cap; 2622 if (!eth_proto_new) { 2623 netdev_err(dev, "No supported speed requested\n"); 2624 return -EINVAL; 2625 } 2626 2627 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2628 eth_proto_new); 2629 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2630 if (err) 2631 return err; 2632 2633 if (!netif_running(dev)) 2634 return 0; 2635 2636 mlxsw_sp_port->link.autoneg = autoneg; 2637 2638 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2639 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2640 2641 return 0; 2642 } 2643 2644 static int mlxsw_sp_flash_device(struct net_device *dev, 2645 struct ethtool_flash *flash) 2646 { 2647 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2648 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2649 const struct firmware *firmware; 2650 int err; 2651 2652 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2653 return -EOPNOTSUPP; 2654 2655 dev_hold(dev); 2656 rtnl_unlock(); 2657 2658 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2659 if (err) 2660 goto out; 2661 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2662 release_firmware(firmware); 2663 out: 2664 rtnl_lock(); 2665 dev_put(dev); 2666 return err; 2667 } 2668 2669 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2670 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2671 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2672 2673 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2674 u16 offset, u16 size, void *data, 2675 unsigned int *p_read_size) 2676 { 2677 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2678 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2679 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2680 u16 i2c_addr; 2681 int status; 2682 int err; 2683 2684 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2685 2686 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2687 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2688 /* Cross pages read, read until offset 256 in low page */ 2689 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2690 2691 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2692 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2693 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2694 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2695 } 2696 2697 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2698 0, 0, offset, size, i2c_addr); 2699 2700 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2701 if (err) 2702 return err; 2703 2704 status = mlxsw_reg_mcia_status_get(mcia_pl); 2705 if (status) 2706 return -EIO; 2707 2708 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2709 memcpy(data, eeprom_tmp, size); 2710 *p_read_size = size; 2711 2712 return 0; 2713 } 2714 2715 enum mlxsw_sp_eeprom_module_info_rev_id { 2716 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2717 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2718 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2719 }; 2720 2721 enum mlxsw_sp_eeprom_module_info_id { 2722 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2723 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2724 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2725 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2726 }; 2727 2728 enum mlxsw_sp_eeprom_module_info { 2729 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2730 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2731 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2732 }; 2733 2734 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2735 struct ethtool_modinfo *modinfo) 2736 { 2737 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2738 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2739 u8 module_rev_id, module_id; 2740 unsigned int read_size; 2741 int err; 2742 2743 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2744 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2745 module_info, &read_size); 2746 if (err) 2747 return err; 2748 2749 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2750 return -EIO; 2751 2752 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2753 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2754 2755 switch (module_id) { 2756 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2757 modinfo->type = ETH_MODULE_SFF_8436; 2758 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2759 break; 2760 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2761 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2762 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2763 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2764 modinfo->type = ETH_MODULE_SFF_8636; 2765 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2766 } else { 2767 modinfo->type = ETH_MODULE_SFF_8436; 2768 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2769 } 2770 break; 2771 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2772 modinfo->type = ETH_MODULE_SFF_8472; 2773 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2774 break; 2775 default: 2776 return -EINVAL; 2777 } 2778 2779 return 0; 2780 } 2781 2782 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2783 struct ethtool_eeprom *ee, 2784 u8 *data) 2785 { 2786 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2787 int offset = ee->offset; 2788 unsigned int read_size; 2789 int i = 0; 2790 int err; 2791 2792 if (!ee->len) 2793 return -EINVAL; 2794 2795 memset(data, 0, ee->len); 2796 2797 while (i < ee->len) { 2798 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2799 ee->len - i, data + i, 2800 &read_size); 2801 if (err) { 2802 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2803 return err; 2804 } 2805 2806 i += read_size; 2807 offset += read_size; 2808 } 2809 2810 return 0; 2811 } 2812 2813 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2814 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2815 .get_link = ethtool_op_get_link, 2816 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2817 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2818 .get_strings = mlxsw_sp_port_get_strings, 2819 .set_phys_id = mlxsw_sp_port_set_phys_id, 2820 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2821 .get_sset_count = mlxsw_sp_port_get_sset_count, 2822 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2823 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2824 .flash_device = mlxsw_sp_flash_device, 2825 .get_module_info = mlxsw_sp_get_module_info, 2826 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2827 }; 2828 2829 static int 2830 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2831 { 2832 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2833 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2834 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2835 u32 eth_proto_admin; 2836 2837 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2838 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2839 eth_proto_admin); 2840 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2841 } 2842 2843 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2844 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2845 bool dwrr, u8 dwrr_weight) 2846 { 2847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2848 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2849 2850 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2851 next_index); 2852 mlxsw_reg_qeec_de_set(qeec_pl, true); 2853 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2854 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2855 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2856 } 2857 2858 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2859 enum mlxsw_reg_qeec_hr hr, u8 index, 2860 u8 next_index, u32 maxrate) 2861 { 2862 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2863 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2864 2865 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2866 next_index); 2867 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2868 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2870 } 2871 2872 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2873 u8 switch_prio, u8 tclass) 2874 { 2875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2876 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2877 2878 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2879 tclass); 2880 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2881 } 2882 2883 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2884 { 2885 int err, i; 2886 2887 /* Setup the elements hierarcy, so that each TC is linked to 2888 * one subgroup, which are all member in the same group. 2889 */ 2890 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2891 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2892 0); 2893 if (err) 2894 return err; 2895 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2896 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2897 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2898 0, false, 0); 2899 if (err) 2900 return err; 2901 } 2902 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2903 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2904 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2905 false, 0); 2906 if (err) 2907 return err; 2908 } 2909 2910 /* Make sure the max shaper is disabled in all hierarcies that 2911 * support it. 2912 */ 2913 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2914 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2915 MLXSW_REG_QEEC_MAS_DIS); 2916 if (err) 2917 return err; 2918 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2919 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2920 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2921 i, 0, 2922 MLXSW_REG_QEEC_MAS_DIS); 2923 if (err) 2924 return err; 2925 } 2926 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2927 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2928 MLXSW_REG_QEEC_HIERARCY_TC, 2929 i, i, 2930 MLXSW_REG_QEEC_MAS_DIS); 2931 if (err) 2932 return err; 2933 } 2934 2935 /* Map all priorities to traffic class 0. */ 2936 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2937 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2938 if (err) 2939 return err; 2940 } 2941 2942 return 0; 2943 } 2944 2945 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2946 bool split, u8 module, u8 width, u8 lane) 2947 { 2948 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2949 struct mlxsw_sp_port *mlxsw_sp_port; 2950 struct net_device *dev; 2951 int err; 2952 2953 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2954 if (err) { 2955 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2956 local_port); 2957 return err; 2958 } 2959 2960 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2961 if (!dev) { 2962 err = -ENOMEM; 2963 goto err_alloc_etherdev; 2964 } 2965 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2966 mlxsw_sp_port = netdev_priv(dev); 2967 mlxsw_sp_port->dev = dev; 2968 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2969 mlxsw_sp_port->local_port = local_port; 2970 mlxsw_sp_port->pvid = 1; 2971 mlxsw_sp_port->split = split; 2972 mlxsw_sp_port->mapping.module = module; 2973 mlxsw_sp_port->mapping.width = width; 2974 mlxsw_sp_port->mapping.lane = lane; 2975 mlxsw_sp_port->link.autoneg = 1; 2976 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2977 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2978 2979 mlxsw_sp_port->pcpu_stats = 2980 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2981 if (!mlxsw_sp_port->pcpu_stats) { 2982 err = -ENOMEM; 2983 goto err_alloc_stats; 2984 } 2985 2986 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2987 GFP_KERNEL); 2988 if (!mlxsw_sp_port->sample) { 2989 err = -ENOMEM; 2990 goto err_alloc_sample; 2991 } 2992 2993 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 2994 &update_stats_cache); 2995 2996 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2997 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2998 2999 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3000 if (err) { 3001 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3002 mlxsw_sp_port->local_port); 3003 goto err_port_module_map; 3004 } 3005 3006 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3007 if (err) { 3008 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3009 mlxsw_sp_port->local_port); 3010 goto err_port_swid_set; 3011 } 3012 3013 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3016 mlxsw_sp_port->local_port); 3017 goto err_dev_addr_init; 3018 } 3019 3020 netif_carrier_off(dev); 3021 3022 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3023 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3024 dev->hw_features |= NETIF_F_HW_TC; 3025 3026 dev->min_mtu = 0; 3027 dev->max_mtu = ETH_MAX_MTU; 3028 3029 /* Each packet needs to have a Tx header (metadata) on top all other 3030 * headers. 3031 */ 3032 dev->needed_headroom = MLXSW_TXHDR_LEN; 3033 3034 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3035 if (err) { 3036 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3037 mlxsw_sp_port->local_port); 3038 goto err_port_system_port_mapping_set; 3039 } 3040 3041 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3042 if (err) { 3043 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3044 mlxsw_sp_port->local_port); 3045 goto err_port_speed_by_width_set; 3046 } 3047 3048 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3049 if (err) { 3050 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3051 mlxsw_sp_port->local_port); 3052 goto err_port_mtu_set; 3053 } 3054 3055 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3056 if (err) 3057 goto err_port_admin_status_set; 3058 3059 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3060 if (err) { 3061 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3062 mlxsw_sp_port->local_port); 3063 goto err_port_buffers_init; 3064 } 3065 3066 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3067 if (err) { 3068 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3069 mlxsw_sp_port->local_port); 3070 goto err_port_ets_init; 3071 } 3072 3073 /* ETS and buffers must be initialized before DCB. */ 3074 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3075 if (err) { 3076 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3077 mlxsw_sp_port->local_port); 3078 goto err_port_dcb_init; 3079 } 3080 3081 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3082 if (err) { 3083 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3084 mlxsw_sp_port->local_port); 3085 goto err_port_fids_init; 3086 } 3087 3088 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3089 if (IS_ERR(mlxsw_sp_port_vlan)) { 3090 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3091 mlxsw_sp_port->local_port); 3092 err = PTR_ERR(mlxsw_sp_port_vlan); 3093 goto err_port_vlan_get; 3094 } 3095 3096 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3097 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3098 err = register_netdev(dev); 3099 if (err) { 3100 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3101 mlxsw_sp_port->local_port); 3102 goto err_register_netdev; 3103 } 3104 3105 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3106 mlxsw_sp_port, dev, mlxsw_sp_port->split, 3107 module); 3108 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3109 return 0; 3110 3111 err_register_netdev: 3112 mlxsw_sp->ports[local_port] = NULL; 3113 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3114 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3115 err_port_vlan_get: 3116 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3117 err_port_fids_init: 3118 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3119 err_port_dcb_init: 3120 err_port_ets_init: 3121 err_port_buffers_init: 3122 err_port_admin_status_set: 3123 err_port_mtu_set: 3124 err_port_speed_by_width_set: 3125 err_port_system_port_mapping_set: 3126 err_dev_addr_init: 3127 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3128 err_port_swid_set: 3129 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3130 err_port_module_map: 3131 kfree(mlxsw_sp_port->sample); 3132 err_alloc_sample: 3133 free_percpu(mlxsw_sp_port->pcpu_stats); 3134 err_alloc_stats: 3135 free_netdev(dev); 3136 err_alloc_etherdev: 3137 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3138 return err; 3139 } 3140 3141 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3142 { 3143 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3144 3145 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3146 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3147 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3148 mlxsw_sp->ports[local_port] = NULL; 3149 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3150 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3151 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3152 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3153 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3154 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3155 kfree(mlxsw_sp_port->sample); 3156 free_percpu(mlxsw_sp_port->pcpu_stats); 3157 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3158 free_netdev(mlxsw_sp_port->dev); 3159 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3160 } 3161 3162 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3163 { 3164 return mlxsw_sp->ports[local_port] != NULL; 3165 } 3166 3167 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3168 { 3169 int i; 3170 3171 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3172 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3173 mlxsw_sp_port_remove(mlxsw_sp, i); 3174 kfree(mlxsw_sp->port_to_module); 3175 kfree(mlxsw_sp->ports); 3176 } 3177 3178 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3179 { 3180 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3181 u8 module, width, lane; 3182 size_t alloc_size; 3183 int i; 3184 int err; 3185 3186 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3187 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3188 if (!mlxsw_sp->ports) 3189 return -ENOMEM; 3190 3191 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3192 GFP_KERNEL); 3193 if (!mlxsw_sp->port_to_module) { 3194 err = -ENOMEM; 3195 goto err_port_to_module_alloc; 3196 } 3197 3198 for (i = 1; i < max_ports; i++) { 3199 /* Mark as invalid */ 3200 mlxsw_sp->port_to_module[i] = -1; 3201 3202 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3203 &width, &lane); 3204 if (err) 3205 goto err_port_module_info_get; 3206 if (!width) 3207 continue; 3208 mlxsw_sp->port_to_module[i] = module; 3209 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3210 module, width, lane); 3211 if (err) 3212 goto err_port_create; 3213 } 3214 return 0; 3215 3216 err_port_create: 3217 err_port_module_info_get: 3218 for (i--; i >= 1; i--) 3219 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3220 mlxsw_sp_port_remove(mlxsw_sp, i); 3221 kfree(mlxsw_sp->port_to_module); 3222 err_port_to_module_alloc: 3223 kfree(mlxsw_sp->ports); 3224 return err; 3225 } 3226 3227 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3228 { 3229 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3230 3231 return local_port - offset; 3232 } 3233 3234 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3235 u8 module, unsigned int count) 3236 { 3237 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3238 int err, i; 3239 3240 for (i = 0; i < count; i++) { 3241 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3242 module, width, i * width); 3243 if (err) 3244 goto err_port_create; 3245 } 3246 3247 return 0; 3248 3249 err_port_create: 3250 for (i--; i >= 0; i--) 3251 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3252 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3253 return err; 3254 } 3255 3256 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3257 u8 base_port, unsigned int count) 3258 { 3259 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3260 int i; 3261 3262 /* Split by four means we need to re-create two ports, otherwise 3263 * only one. 3264 */ 3265 count = count / 2; 3266 3267 for (i = 0; i < count; i++) { 3268 local_port = base_port + i * 2; 3269 if (mlxsw_sp->port_to_module[local_port] < 0) 3270 continue; 3271 module = mlxsw_sp->port_to_module[local_port]; 3272 3273 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3274 width, 0); 3275 } 3276 } 3277 3278 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3279 unsigned int count) 3280 { 3281 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3282 struct mlxsw_sp_port *mlxsw_sp_port; 3283 u8 module, cur_width, base_port; 3284 int i; 3285 int err; 3286 3287 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3288 if (!mlxsw_sp_port) { 3289 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3290 local_port); 3291 return -EINVAL; 3292 } 3293 3294 module = mlxsw_sp_port->mapping.module; 3295 cur_width = mlxsw_sp_port->mapping.width; 3296 3297 if (count != 2 && count != 4) { 3298 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3299 return -EINVAL; 3300 } 3301 3302 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3303 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3304 return -EINVAL; 3305 } 3306 3307 /* Make sure we have enough slave (even) ports for the split. */ 3308 if (count == 2) { 3309 base_port = local_port; 3310 if (mlxsw_sp->ports[base_port + 1]) { 3311 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3312 return -EINVAL; 3313 } 3314 } else { 3315 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3316 if (mlxsw_sp->ports[base_port + 1] || 3317 mlxsw_sp->ports[base_port + 3]) { 3318 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3319 return -EINVAL; 3320 } 3321 } 3322 3323 for (i = 0; i < count; i++) 3324 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3325 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3326 3327 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3328 if (err) { 3329 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3330 goto err_port_split_create; 3331 } 3332 3333 return 0; 3334 3335 err_port_split_create: 3336 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3337 return err; 3338 } 3339 3340 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3341 { 3342 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3343 struct mlxsw_sp_port *mlxsw_sp_port; 3344 u8 cur_width, base_port; 3345 unsigned int count; 3346 int i; 3347 3348 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3349 if (!mlxsw_sp_port) { 3350 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3351 local_port); 3352 return -EINVAL; 3353 } 3354 3355 if (!mlxsw_sp_port->split) { 3356 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3357 return -EINVAL; 3358 } 3359 3360 cur_width = mlxsw_sp_port->mapping.width; 3361 count = cur_width == 1 ? 4 : 2; 3362 3363 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3364 3365 /* Determine which ports to remove. */ 3366 if (count == 2 && local_port >= base_port + 2) 3367 base_port = base_port + 2; 3368 3369 for (i = 0; i < count; i++) 3370 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3371 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3372 3373 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3374 3375 return 0; 3376 } 3377 3378 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3379 char *pude_pl, void *priv) 3380 { 3381 struct mlxsw_sp *mlxsw_sp = priv; 3382 struct mlxsw_sp_port *mlxsw_sp_port; 3383 enum mlxsw_reg_pude_oper_status status; 3384 u8 local_port; 3385 3386 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3387 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3388 if (!mlxsw_sp_port) 3389 return; 3390 3391 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3392 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3393 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3394 netif_carrier_on(mlxsw_sp_port->dev); 3395 } else { 3396 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3397 netif_carrier_off(mlxsw_sp_port->dev); 3398 } 3399 } 3400 3401 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3402 u8 local_port, void *priv) 3403 { 3404 struct mlxsw_sp *mlxsw_sp = priv; 3405 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3406 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3407 3408 if (unlikely(!mlxsw_sp_port)) { 3409 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3410 local_port); 3411 return; 3412 } 3413 3414 skb->dev = mlxsw_sp_port->dev; 3415 3416 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3417 u64_stats_update_begin(&pcpu_stats->syncp); 3418 pcpu_stats->rx_packets++; 3419 pcpu_stats->rx_bytes += skb->len; 3420 u64_stats_update_end(&pcpu_stats->syncp); 3421 3422 skb->protocol = eth_type_trans(skb, skb->dev); 3423 netif_receive_skb(skb); 3424 } 3425 3426 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3427 void *priv) 3428 { 3429 skb->offload_fwd_mark = 1; 3430 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3431 } 3432 3433 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3434 u8 local_port, void *priv) 3435 { 3436 skb->offload_mr_fwd_mark = 1; 3437 skb->offload_fwd_mark = 1; 3438 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3439 } 3440 3441 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3442 void *priv) 3443 { 3444 struct mlxsw_sp *mlxsw_sp = priv; 3445 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3446 struct psample_group *psample_group; 3447 u32 size; 3448 3449 if (unlikely(!mlxsw_sp_port)) { 3450 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3451 local_port); 3452 goto out; 3453 } 3454 if (unlikely(!mlxsw_sp_port->sample)) { 3455 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3456 local_port); 3457 goto out; 3458 } 3459 3460 size = mlxsw_sp_port->sample->truncate ? 3461 mlxsw_sp_port->sample->trunc_size : skb->len; 3462 3463 rcu_read_lock(); 3464 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3465 if (!psample_group) 3466 goto out_unlock; 3467 psample_sample_packet(psample_group, skb, size, 3468 mlxsw_sp_port->dev->ifindex, 0, 3469 mlxsw_sp_port->sample->rate); 3470 out_unlock: 3471 rcu_read_unlock(); 3472 out: 3473 consume_skb(skb); 3474 } 3475 3476 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3477 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3478 _is_ctrl, SP_##_trap_group, DISCARD) 3479 3480 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3481 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3482 _is_ctrl, SP_##_trap_group, DISCARD) 3483 3484 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3485 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3486 _is_ctrl, SP_##_trap_group, DISCARD) 3487 3488 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3489 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3490 3491 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3492 /* Events */ 3493 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3494 /* L2 traps */ 3495 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3496 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3497 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3498 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3499 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3500 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3501 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3502 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3503 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3504 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3505 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3506 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3507 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3508 false), 3509 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3510 false), 3511 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3512 false), 3513 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3514 false), 3515 /* L3 traps */ 3516 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3517 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3518 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3519 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3520 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3521 false), 3522 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3523 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3524 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3525 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3526 false), 3527 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3528 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3529 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3530 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3531 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3532 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3533 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3534 false), 3535 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3536 false), 3537 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3538 false), 3539 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3540 false), 3541 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3542 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3543 false), 3544 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3545 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3546 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3547 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3548 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3549 /* PKT Sample trap */ 3550 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3551 false, SP_IP2ME, DISCARD), 3552 /* ACL trap */ 3553 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3554 /* Multicast Router Traps */ 3555 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3556 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3557 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3558 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3559 }; 3560 3561 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3562 { 3563 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3564 enum mlxsw_reg_qpcr_ir_units ir_units; 3565 int max_cpu_policers; 3566 bool is_bytes; 3567 u8 burst_size; 3568 u32 rate; 3569 int i, err; 3570 3571 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3572 return -EIO; 3573 3574 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3575 3576 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3577 for (i = 0; i < max_cpu_policers; i++) { 3578 is_bytes = false; 3579 switch (i) { 3580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3583 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3584 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3585 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3586 rate = 128; 3587 burst_size = 7; 3588 break; 3589 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3590 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3591 rate = 16 * 1024; 3592 burst_size = 10; 3593 break; 3594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3599 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3600 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3602 rate = 1024; 3603 burst_size = 7; 3604 break; 3605 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3606 is_bytes = true; 3607 rate = 4 * 1024; 3608 burst_size = 4; 3609 break; 3610 default: 3611 continue; 3612 } 3613 3614 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3615 burst_size); 3616 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3617 if (err) 3618 return err; 3619 } 3620 3621 return 0; 3622 } 3623 3624 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3625 { 3626 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3627 enum mlxsw_reg_htgt_trap_group i; 3628 int max_cpu_policers; 3629 int max_trap_groups; 3630 u8 priority, tc; 3631 u16 policer_id; 3632 int err; 3633 3634 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3635 return -EIO; 3636 3637 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3638 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3639 3640 for (i = 0; i < max_trap_groups; i++) { 3641 policer_id = i; 3642 switch (i) { 3643 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3644 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3645 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3646 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3647 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3648 priority = 5; 3649 tc = 5; 3650 break; 3651 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3652 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3653 priority = 4; 3654 tc = 4; 3655 break; 3656 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3657 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3658 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3659 priority = 3; 3660 tc = 3; 3661 break; 3662 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3663 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3664 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3665 priority = 2; 3666 tc = 2; 3667 break; 3668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3669 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3670 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3671 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3672 priority = 1; 3673 tc = 1; 3674 break; 3675 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3676 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3677 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3678 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3679 break; 3680 default: 3681 continue; 3682 } 3683 3684 if (max_cpu_policers <= policer_id && 3685 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3686 return -EIO; 3687 3688 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3689 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3690 if (err) 3691 return err; 3692 } 3693 3694 return 0; 3695 } 3696 3697 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3698 { 3699 int i; 3700 int err; 3701 3702 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3703 if (err) 3704 return err; 3705 3706 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3707 if (err) 3708 return err; 3709 3710 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3711 err = mlxsw_core_trap_register(mlxsw_sp->core, 3712 &mlxsw_sp_listener[i], 3713 mlxsw_sp); 3714 if (err) 3715 goto err_listener_register; 3716 3717 } 3718 return 0; 3719 3720 err_listener_register: 3721 for (i--; i >= 0; i--) { 3722 mlxsw_core_trap_unregister(mlxsw_sp->core, 3723 &mlxsw_sp_listener[i], 3724 mlxsw_sp); 3725 } 3726 return err; 3727 } 3728 3729 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3730 { 3731 int i; 3732 3733 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3734 mlxsw_core_trap_unregister(mlxsw_sp->core, 3735 &mlxsw_sp_listener[i], 3736 mlxsw_sp); 3737 } 3738 } 3739 3740 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3741 { 3742 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3743 int err; 3744 3745 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3746 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3747 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3748 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3749 MLXSW_REG_SLCR_LAG_HASH_SIP | 3750 MLXSW_REG_SLCR_LAG_HASH_DIP | 3751 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3752 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3753 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3754 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3755 if (err) 3756 return err; 3757 3758 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3759 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3760 return -EIO; 3761 3762 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3763 sizeof(struct mlxsw_sp_upper), 3764 GFP_KERNEL); 3765 if (!mlxsw_sp->lags) 3766 return -ENOMEM; 3767 3768 return 0; 3769 } 3770 3771 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3772 { 3773 kfree(mlxsw_sp->lags); 3774 } 3775 3776 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3777 { 3778 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3779 3780 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3781 MLXSW_REG_HTGT_INVALID_POLICER, 3782 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3783 MLXSW_REG_HTGT_DEFAULT_TC); 3784 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3785 } 3786 3787 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3788 unsigned long event, void *ptr); 3789 3790 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3791 const struct mlxsw_bus_info *mlxsw_bus_info) 3792 { 3793 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3794 int err; 3795 3796 mlxsw_sp->core = mlxsw_core; 3797 mlxsw_sp->bus_info = mlxsw_bus_info; 3798 3799 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3800 if (err) { 3801 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3802 return err; 3803 } 3804 3805 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3806 if (err) { 3807 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3808 return err; 3809 } 3810 3811 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3812 if (err) { 3813 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3814 return err; 3815 } 3816 3817 err = mlxsw_sp_fids_init(mlxsw_sp); 3818 if (err) { 3819 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3820 goto err_fids_init; 3821 } 3822 3823 err = mlxsw_sp_traps_init(mlxsw_sp); 3824 if (err) { 3825 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3826 goto err_traps_init; 3827 } 3828 3829 err = mlxsw_sp_buffers_init(mlxsw_sp); 3830 if (err) { 3831 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3832 goto err_buffers_init; 3833 } 3834 3835 err = mlxsw_sp_lag_init(mlxsw_sp); 3836 if (err) { 3837 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3838 goto err_lag_init; 3839 } 3840 3841 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3842 if (err) { 3843 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3844 goto err_switchdev_init; 3845 } 3846 3847 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3848 if (err) { 3849 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3850 goto err_counter_pool_init; 3851 } 3852 3853 err = mlxsw_sp_afa_init(mlxsw_sp); 3854 if (err) { 3855 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3856 goto err_afa_init; 3857 } 3858 3859 err = mlxsw_sp_router_init(mlxsw_sp); 3860 if (err) { 3861 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3862 goto err_router_init; 3863 } 3864 3865 /* Initialize netdevice notifier after router is initialized, so that 3866 * the event handler can use router structures. 3867 */ 3868 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3869 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3870 if (err) { 3871 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3872 goto err_netdev_notifier; 3873 } 3874 3875 err = mlxsw_sp_span_init(mlxsw_sp); 3876 if (err) { 3877 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3878 goto err_span_init; 3879 } 3880 3881 err = mlxsw_sp_acl_init(mlxsw_sp); 3882 if (err) { 3883 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3884 goto err_acl_init; 3885 } 3886 3887 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3888 if (err) { 3889 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3890 goto err_dpipe_init; 3891 } 3892 3893 err = mlxsw_sp_ports_create(mlxsw_sp); 3894 if (err) { 3895 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3896 goto err_ports_create; 3897 } 3898 3899 return 0; 3900 3901 err_ports_create: 3902 mlxsw_sp_dpipe_fini(mlxsw_sp); 3903 err_dpipe_init: 3904 mlxsw_sp_acl_fini(mlxsw_sp); 3905 err_acl_init: 3906 mlxsw_sp_span_fini(mlxsw_sp); 3907 err_span_init: 3908 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3909 err_netdev_notifier: 3910 mlxsw_sp_router_fini(mlxsw_sp); 3911 err_router_init: 3912 mlxsw_sp_afa_fini(mlxsw_sp); 3913 err_afa_init: 3914 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3915 err_counter_pool_init: 3916 mlxsw_sp_switchdev_fini(mlxsw_sp); 3917 err_switchdev_init: 3918 mlxsw_sp_lag_fini(mlxsw_sp); 3919 err_lag_init: 3920 mlxsw_sp_buffers_fini(mlxsw_sp); 3921 err_buffers_init: 3922 mlxsw_sp_traps_fini(mlxsw_sp); 3923 err_traps_init: 3924 mlxsw_sp_fids_fini(mlxsw_sp); 3925 err_fids_init: 3926 mlxsw_sp_kvdl_fini(mlxsw_sp); 3927 return err; 3928 } 3929 3930 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3931 { 3932 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3933 3934 mlxsw_sp_ports_remove(mlxsw_sp); 3935 mlxsw_sp_dpipe_fini(mlxsw_sp); 3936 mlxsw_sp_acl_fini(mlxsw_sp); 3937 mlxsw_sp_span_fini(mlxsw_sp); 3938 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3939 mlxsw_sp_router_fini(mlxsw_sp); 3940 mlxsw_sp_afa_fini(mlxsw_sp); 3941 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3942 mlxsw_sp_switchdev_fini(mlxsw_sp); 3943 mlxsw_sp_lag_fini(mlxsw_sp); 3944 mlxsw_sp_buffers_fini(mlxsw_sp); 3945 mlxsw_sp_traps_fini(mlxsw_sp); 3946 mlxsw_sp_fids_fini(mlxsw_sp); 3947 mlxsw_sp_kvdl_fini(mlxsw_sp); 3948 } 3949 3950 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 3951 .used_max_vepa_channels = 1, 3952 .max_vepa_channels = 0, 3953 .used_max_mid = 1, 3954 .max_mid = MLXSW_SP_MID_MAX, 3955 .used_max_pgt = 1, 3956 .max_pgt = 0, 3957 .used_flood_tables = 1, 3958 .used_flood_mode = 1, 3959 .flood_mode = 3, 3960 .max_fid_offset_flood_tables = 3, 3961 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3962 .max_fid_flood_tables = 3, 3963 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3964 .used_max_ib_mc = 1, 3965 .max_ib_mc = 0, 3966 .used_max_pkey = 1, 3967 .max_pkey = 0, 3968 .used_kvd_split_data = 1, 3969 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3970 .kvd_hash_single_parts = 59, 3971 .kvd_hash_double_parts = 41, 3972 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3973 .swid_config = { 3974 { 3975 .used_type = 1, 3976 .type = MLXSW_PORT_SWID_TYPE_ETH, 3977 } 3978 }, 3979 .resource_query_enable = 1, 3980 }; 3981 3982 static struct mlxsw_driver mlxsw_sp_driver = { 3983 .kind = mlxsw_sp_driver_name, 3984 .priv_size = sizeof(struct mlxsw_sp), 3985 .init = mlxsw_sp_init, 3986 .fini = mlxsw_sp_fini, 3987 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3988 .port_split = mlxsw_sp_port_split, 3989 .port_unsplit = mlxsw_sp_port_unsplit, 3990 .sb_pool_get = mlxsw_sp_sb_pool_get, 3991 .sb_pool_set = mlxsw_sp_sb_pool_set, 3992 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3993 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3994 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3995 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3996 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3997 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3998 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3999 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4000 .txhdr_construct = mlxsw_sp_txhdr_construct, 4001 .txhdr_len = MLXSW_TXHDR_LEN, 4002 .profile = &mlxsw_sp_config_profile, 4003 }; 4004 4005 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4006 { 4007 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4008 } 4009 4010 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4011 { 4012 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4013 int ret = 0; 4014 4015 if (mlxsw_sp_port_dev_check(lower_dev)) { 4016 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4017 ret = 1; 4018 } 4019 4020 return ret; 4021 } 4022 4023 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4024 { 4025 struct mlxsw_sp_port *mlxsw_sp_port; 4026 4027 if (mlxsw_sp_port_dev_check(dev)) 4028 return netdev_priv(dev); 4029 4030 mlxsw_sp_port = NULL; 4031 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4032 4033 return mlxsw_sp_port; 4034 } 4035 4036 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4037 { 4038 struct mlxsw_sp_port *mlxsw_sp_port; 4039 4040 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4041 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4042 } 4043 4044 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4045 { 4046 struct mlxsw_sp_port *mlxsw_sp_port; 4047 4048 if (mlxsw_sp_port_dev_check(dev)) 4049 return netdev_priv(dev); 4050 4051 mlxsw_sp_port = NULL; 4052 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4053 &mlxsw_sp_port); 4054 4055 return mlxsw_sp_port; 4056 } 4057 4058 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4059 { 4060 struct mlxsw_sp_port *mlxsw_sp_port; 4061 4062 rcu_read_lock(); 4063 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4064 if (mlxsw_sp_port) 4065 dev_hold(mlxsw_sp_port->dev); 4066 rcu_read_unlock(); 4067 return mlxsw_sp_port; 4068 } 4069 4070 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4071 { 4072 dev_put(mlxsw_sp_port->dev); 4073 } 4074 4075 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4076 { 4077 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4078 4079 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4080 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4081 } 4082 4083 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4084 { 4085 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4086 4087 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4088 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4089 } 4090 4091 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4092 u16 lag_id, u8 port_index) 4093 { 4094 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4095 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4096 4097 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4098 lag_id, port_index); 4099 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4100 } 4101 4102 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4103 u16 lag_id) 4104 { 4105 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4106 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4107 4108 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4109 lag_id); 4110 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4111 } 4112 4113 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4114 u16 lag_id) 4115 { 4116 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4117 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4118 4119 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4120 lag_id); 4121 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4122 } 4123 4124 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4125 u16 lag_id) 4126 { 4127 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4128 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4129 4130 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4131 lag_id); 4132 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4133 } 4134 4135 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4136 struct net_device *lag_dev, 4137 u16 *p_lag_id) 4138 { 4139 struct mlxsw_sp_upper *lag; 4140 int free_lag_id = -1; 4141 u64 max_lag; 4142 int i; 4143 4144 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4145 for (i = 0; i < max_lag; i++) { 4146 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4147 if (lag->ref_count) { 4148 if (lag->dev == lag_dev) { 4149 *p_lag_id = i; 4150 return 0; 4151 } 4152 } else if (free_lag_id < 0) { 4153 free_lag_id = i; 4154 } 4155 } 4156 if (free_lag_id < 0) 4157 return -EBUSY; 4158 *p_lag_id = free_lag_id; 4159 return 0; 4160 } 4161 4162 static bool 4163 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4164 struct net_device *lag_dev, 4165 struct netdev_lag_upper_info *lag_upper_info, 4166 struct netlink_ext_ack *extack) 4167 { 4168 u16 lag_id; 4169 4170 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4171 NL_SET_ERR_MSG(extack, 4172 "spectrum: Exceeded number of supported LAG devices"); 4173 return false; 4174 } 4175 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4176 NL_SET_ERR_MSG(extack, 4177 "spectrum: LAG device using unsupported Tx type"); 4178 return false; 4179 } 4180 return true; 4181 } 4182 4183 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4184 u16 lag_id, u8 *p_port_index) 4185 { 4186 u64 max_lag_members; 4187 int i; 4188 4189 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4190 MAX_LAG_MEMBERS); 4191 for (i = 0; i < max_lag_members; i++) { 4192 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4193 *p_port_index = i; 4194 return 0; 4195 } 4196 } 4197 return -EBUSY; 4198 } 4199 4200 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4201 struct net_device *lag_dev) 4202 { 4203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4204 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4205 struct mlxsw_sp_upper *lag; 4206 u16 lag_id; 4207 u8 port_index; 4208 int err; 4209 4210 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4211 if (err) 4212 return err; 4213 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4214 if (!lag->ref_count) { 4215 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4216 if (err) 4217 return err; 4218 lag->dev = lag_dev; 4219 } 4220 4221 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4222 if (err) 4223 return err; 4224 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4225 if (err) 4226 goto err_col_port_add; 4227 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4228 if (err) 4229 goto err_col_port_enable; 4230 4231 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4232 mlxsw_sp_port->local_port); 4233 mlxsw_sp_port->lag_id = lag_id; 4234 mlxsw_sp_port->lagged = 1; 4235 lag->ref_count++; 4236 4237 /* Port is no longer usable as a router interface */ 4238 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4239 if (mlxsw_sp_port_vlan->fid) 4240 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4241 4242 return 0; 4243 4244 err_col_port_enable: 4245 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4246 err_col_port_add: 4247 if (!lag->ref_count) 4248 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4249 return err; 4250 } 4251 4252 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4253 struct net_device *lag_dev) 4254 { 4255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4256 u16 lag_id = mlxsw_sp_port->lag_id; 4257 struct mlxsw_sp_upper *lag; 4258 4259 if (!mlxsw_sp_port->lagged) 4260 return; 4261 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4262 WARN_ON(lag->ref_count == 0); 4263 4264 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4265 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4266 4267 /* Any VLANs configured on the port are no longer valid */ 4268 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4269 4270 if (lag->ref_count == 1) 4271 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4272 4273 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4274 mlxsw_sp_port->local_port); 4275 mlxsw_sp_port->lagged = 0; 4276 lag->ref_count--; 4277 4278 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4279 /* Make sure untagged frames are allowed to ingress */ 4280 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4281 } 4282 4283 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4284 u16 lag_id) 4285 { 4286 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4287 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4288 4289 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4290 mlxsw_sp_port->local_port); 4291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4292 } 4293 4294 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4295 u16 lag_id) 4296 { 4297 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4298 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4299 4300 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4301 mlxsw_sp_port->local_port); 4302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4303 } 4304 4305 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4306 bool lag_tx_enabled) 4307 { 4308 if (lag_tx_enabled) 4309 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4310 mlxsw_sp_port->lag_id); 4311 else 4312 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4313 mlxsw_sp_port->lag_id); 4314 } 4315 4316 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4317 struct netdev_lag_lower_state_info *info) 4318 { 4319 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4320 } 4321 4322 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4323 bool enable) 4324 { 4325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4326 enum mlxsw_reg_spms_state spms_state; 4327 char *spms_pl; 4328 u16 vid; 4329 int err; 4330 4331 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4332 MLXSW_REG_SPMS_STATE_DISCARDING; 4333 4334 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4335 if (!spms_pl) 4336 return -ENOMEM; 4337 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4338 4339 for (vid = 0; vid < VLAN_N_VID; vid++) 4340 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4341 4342 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4343 kfree(spms_pl); 4344 return err; 4345 } 4346 4347 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4348 { 4349 u16 vid = 1; 4350 int err; 4351 4352 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4353 if (err) 4354 return err; 4355 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4356 if (err) 4357 goto err_port_stp_set; 4358 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4359 true, false); 4360 if (err) 4361 goto err_port_vlan_set; 4362 4363 for (; vid <= VLAN_N_VID - 1; vid++) { 4364 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4365 vid, false); 4366 if (err) 4367 goto err_vid_learning_set; 4368 } 4369 4370 return 0; 4371 4372 err_vid_learning_set: 4373 for (vid--; vid >= 1; vid--) 4374 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4375 err_port_vlan_set: 4376 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4377 err_port_stp_set: 4378 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4379 return err; 4380 } 4381 4382 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4383 { 4384 u16 vid; 4385 4386 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4387 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4388 vid, true); 4389 4390 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4391 false, false); 4392 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4393 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4394 } 4395 4396 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4397 struct net_device *dev, 4398 unsigned long event, void *ptr) 4399 { 4400 struct netdev_notifier_changeupper_info *info; 4401 struct mlxsw_sp_port *mlxsw_sp_port; 4402 struct netlink_ext_ack *extack; 4403 struct net_device *upper_dev; 4404 struct mlxsw_sp *mlxsw_sp; 4405 int err = 0; 4406 4407 mlxsw_sp_port = netdev_priv(dev); 4408 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4409 info = ptr; 4410 extack = netdev_notifier_info_to_extack(&info->info); 4411 4412 switch (event) { 4413 case NETDEV_PRECHANGEUPPER: 4414 upper_dev = info->upper_dev; 4415 if (!is_vlan_dev(upper_dev) && 4416 !netif_is_lag_master(upper_dev) && 4417 !netif_is_bridge_master(upper_dev) && 4418 !netif_is_ovs_master(upper_dev)) { 4419 NL_SET_ERR_MSG(extack, 4420 "spectrum: Unknown upper device type"); 4421 return -EINVAL; 4422 } 4423 if (!info->linking) 4424 break; 4425 if (netdev_has_any_upper_dev(upper_dev)) { 4426 NL_SET_ERR_MSG(extack, 4427 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4428 return -EINVAL; 4429 } 4430 if (netif_is_lag_master(upper_dev) && 4431 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4432 info->upper_info, extack)) 4433 return -EINVAL; 4434 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4435 NL_SET_ERR_MSG(extack, 4436 "spectrum: Master device is a LAG master and this device has a VLAN"); 4437 return -EINVAL; 4438 } 4439 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4440 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4441 NL_SET_ERR_MSG(extack, 4442 "spectrum: Can not put a VLAN on a LAG port"); 4443 return -EINVAL; 4444 } 4445 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4446 NL_SET_ERR_MSG(extack, 4447 "spectrum: Master device is an OVS master and this device has a VLAN"); 4448 return -EINVAL; 4449 } 4450 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4451 NL_SET_ERR_MSG(extack, 4452 "spectrum: Can not put a VLAN on an OVS port"); 4453 return -EINVAL; 4454 } 4455 break; 4456 case NETDEV_CHANGEUPPER: 4457 upper_dev = info->upper_dev; 4458 if (netif_is_bridge_master(upper_dev)) { 4459 if (info->linking) 4460 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4461 lower_dev, 4462 upper_dev, 4463 extack); 4464 else 4465 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4466 lower_dev, 4467 upper_dev); 4468 } else if (netif_is_lag_master(upper_dev)) { 4469 if (info->linking) 4470 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4471 upper_dev); 4472 else 4473 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4474 upper_dev); 4475 } else if (netif_is_ovs_master(upper_dev)) { 4476 if (info->linking) 4477 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4478 else 4479 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4480 } 4481 break; 4482 } 4483 4484 return err; 4485 } 4486 4487 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4488 unsigned long event, void *ptr) 4489 { 4490 struct netdev_notifier_changelowerstate_info *info; 4491 struct mlxsw_sp_port *mlxsw_sp_port; 4492 int err; 4493 4494 mlxsw_sp_port = netdev_priv(dev); 4495 info = ptr; 4496 4497 switch (event) { 4498 case NETDEV_CHANGELOWERSTATE: 4499 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4500 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4501 info->lower_state_info); 4502 if (err) 4503 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4504 } 4505 break; 4506 } 4507 4508 return 0; 4509 } 4510 4511 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4512 struct net_device *port_dev, 4513 unsigned long event, void *ptr) 4514 { 4515 switch (event) { 4516 case NETDEV_PRECHANGEUPPER: 4517 case NETDEV_CHANGEUPPER: 4518 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4519 event, ptr); 4520 case NETDEV_CHANGELOWERSTATE: 4521 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4522 ptr); 4523 } 4524 4525 return 0; 4526 } 4527 4528 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4529 unsigned long event, void *ptr) 4530 { 4531 struct net_device *dev; 4532 struct list_head *iter; 4533 int ret; 4534 4535 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4536 if (mlxsw_sp_port_dev_check(dev)) { 4537 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4538 ptr); 4539 if (ret) 4540 return ret; 4541 } 4542 } 4543 4544 return 0; 4545 } 4546 4547 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4548 struct net_device *dev, 4549 unsigned long event, void *ptr, 4550 u16 vid) 4551 { 4552 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4553 struct netdev_notifier_changeupper_info *info = ptr; 4554 struct netlink_ext_ack *extack; 4555 struct net_device *upper_dev; 4556 int err = 0; 4557 4558 extack = netdev_notifier_info_to_extack(&info->info); 4559 4560 switch (event) { 4561 case NETDEV_PRECHANGEUPPER: 4562 upper_dev = info->upper_dev; 4563 if (!netif_is_bridge_master(upper_dev)) { 4564 NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); 4565 return -EINVAL; 4566 } 4567 if (!info->linking) 4568 break; 4569 if (netdev_has_any_upper_dev(upper_dev)) { 4570 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4571 return -EINVAL; 4572 } 4573 break; 4574 case NETDEV_CHANGEUPPER: 4575 upper_dev = info->upper_dev; 4576 if (netif_is_bridge_master(upper_dev)) { 4577 if (info->linking) 4578 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4579 vlan_dev, 4580 upper_dev, 4581 extack); 4582 else 4583 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4584 vlan_dev, 4585 upper_dev); 4586 } else { 4587 err = -EINVAL; 4588 WARN_ON(1); 4589 } 4590 break; 4591 } 4592 4593 return err; 4594 } 4595 4596 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4597 struct net_device *lag_dev, 4598 unsigned long event, 4599 void *ptr, u16 vid) 4600 { 4601 struct net_device *dev; 4602 struct list_head *iter; 4603 int ret; 4604 4605 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4606 if (mlxsw_sp_port_dev_check(dev)) { 4607 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4608 event, ptr, 4609 vid); 4610 if (ret) 4611 return ret; 4612 } 4613 } 4614 4615 return 0; 4616 } 4617 4618 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4619 unsigned long event, void *ptr) 4620 { 4621 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4622 u16 vid = vlan_dev_vlan_id(vlan_dev); 4623 4624 if (mlxsw_sp_port_dev_check(real_dev)) 4625 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4626 event, ptr, vid); 4627 else if (netif_is_lag_master(real_dev)) 4628 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4629 real_dev, event, 4630 ptr, vid); 4631 4632 return 0; 4633 } 4634 4635 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4636 { 4637 struct netdev_notifier_changeupper_info *info = ptr; 4638 4639 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4640 return false; 4641 return netif_is_l3_master(info->upper_dev); 4642 } 4643 4644 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4645 unsigned long event, void *ptr) 4646 { 4647 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4648 struct mlxsw_sp *mlxsw_sp; 4649 int err = 0; 4650 4651 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4652 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4653 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4654 event, ptr); 4655 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4656 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4657 event, ptr); 4658 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4659 err = mlxsw_sp_netdevice_router_port_event(dev); 4660 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4661 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4662 else if (mlxsw_sp_port_dev_check(dev)) 4663 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4664 else if (netif_is_lag_master(dev)) 4665 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4666 else if (is_vlan_dev(dev)) 4667 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4668 4669 return notifier_from_errno(err); 4670 } 4671 4672 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4673 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4674 }; 4675 4676 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4677 .notifier_call = mlxsw_sp_inetaddr_event, 4678 }; 4679 4680 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4681 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4682 }; 4683 4684 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4685 .notifier_call = mlxsw_sp_inet6addr_event, 4686 }; 4687 4688 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4689 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4690 {0, }, 4691 }; 4692 4693 static struct pci_driver mlxsw_sp_pci_driver = { 4694 .name = mlxsw_sp_driver_name, 4695 .id_table = mlxsw_sp_pci_id_table, 4696 }; 4697 4698 static int __init mlxsw_sp_module_init(void) 4699 { 4700 int err; 4701 4702 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4703 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4704 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4705 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4706 4707 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4708 if (err) 4709 goto err_core_driver_register; 4710 4711 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4712 if (err) 4713 goto err_pci_driver_register; 4714 4715 return 0; 4716 4717 err_pci_driver_register: 4718 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4719 err_core_driver_register: 4720 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4721 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4722 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4723 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4724 return err; 4725 } 4726 4727 static void __exit mlxsw_sp_module_exit(void) 4728 { 4729 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4730 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4731 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4732 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4733 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4734 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4735 } 4736 4737 module_init(mlxsw_sp_module_init); 4738 module_exit(mlxsw_sp_module_exit); 4739 4740 MODULE_LICENSE("Dual BSD/GPL"); 4741 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4742 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4743 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4744 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4745