1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <linux/netlink.h> 57 #include <net/switchdev.h> 58 #include <net/pkt_cls.h> 59 #include <net/tc_act/tc_mirred.h> 60 #include <net/netevent.h> 61 #include <net/tc_act/tc_sample.h> 62 #include <net/addrconf.h> 63 64 #include "spectrum.h" 65 #include "pci.h" 66 #include "core.h" 67 #include "reg.h" 68 #include "port.h" 69 #include "trap.h" 70 #include "txheader.h" 71 #include "spectrum_cnt.h" 72 #include "spectrum_dpipe.h" 73 #include "spectrum_acl_flex_actions.h" 74 #include "../mlxfw/mlxfw.h" 75 76 #define MLXSW_FWREV_MAJOR 13 77 #define MLXSW_FWREV_MINOR 1530 78 #define MLXSW_FWREV_SUBMINOR 152 79 #define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 80 81 #define MLXSW_SP_FW_FILENAME \ 82 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 83 "." __stringify(MLXSW_FWREV_MINOR) \ 84 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 85 86 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 87 static const char mlxsw_sp_driver_version[] = "1.0"; 88 89 /* tx_hdr_version 90 * Tx header version. 91 * Must be set to 1. 92 */ 93 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 94 95 /* tx_hdr_ctl 96 * Packet control type. 97 * 0 - Ethernet control (e.g. EMADs, LACP) 98 * 1 - Ethernet data 99 */ 100 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 101 102 /* tx_hdr_proto 103 * Packet protocol type. Must be set to 1 (Ethernet). 104 */ 105 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 106 107 /* tx_hdr_rx_is_router 108 * Packet is sent from the router. Valid for data packets only. 109 */ 110 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 111 112 /* tx_hdr_fid_valid 113 * Indicates if the 'fid' field is valid and should be used for 114 * forwarding lookup. Valid for data packets only. 115 */ 116 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 117 118 /* tx_hdr_swid 119 * Switch partition ID. Must be set to 0. 120 */ 121 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 122 123 /* tx_hdr_control_tclass 124 * Indicates if the packet should use the control TClass and not one 125 * of the data TClasses. 126 */ 127 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 128 129 /* tx_hdr_etclass 130 * Egress TClass to be used on the egress device on the egress port. 131 */ 132 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 133 134 /* tx_hdr_port_mid 135 * Destination local port for unicast packets. 136 * Destination multicast ID for multicast packets. 137 * 138 * Control packets are directed to a specific egress port, while data 139 * packets are transmitted through the CPU port (0) into the switch partition, 140 * where forwarding rules are applied. 141 */ 142 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 143 144 /* tx_hdr_fid 145 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 146 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 147 * Valid for data packets only. 148 */ 149 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 150 151 /* tx_hdr_type 152 * 0 - Data packets 153 * 6 - Control packets 154 */ 155 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 156 157 struct mlxsw_sp_mlxfw_dev { 158 struct mlxfw_dev mlxfw_dev; 159 struct mlxsw_sp *mlxsw_sp; 160 }; 161 162 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 163 u16 component_index, u32 *p_max_size, 164 u8 *p_align_bits, u16 *p_max_write_size) 165 { 166 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 167 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 168 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 169 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 170 int err; 171 172 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 173 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 174 if (err) 175 return err; 176 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 177 p_max_write_size); 178 179 *p_align_bits = max_t(u8, *p_align_bits, 2); 180 *p_max_write_size = min_t(u16, *p_max_write_size, 181 MLXSW_REG_MCDA_MAX_DATA_LEN); 182 return 0; 183 } 184 185 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 186 { 187 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 188 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 189 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 190 char mcc_pl[MLXSW_REG_MCC_LEN]; 191 u8 control_state; 192 int err; 193 194 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 196 if (err) 197 return err; 198 199 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 200 if (control_state != MLXFW_FSM_STATE_IDLE) 201 return -EBUSY; 202 203 mlxsw_reg_mcc_pack(mcc_pl, 204 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 205 0, *fwhandle, 0); 206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 207 } 208 209 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 210 u32 fwhandle, u16 component_index, 211 u32 component_size) 212 { 213 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 214 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 216 char mcc_pl[MLXSW_REG_MCC_LEN]; 217 218 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 219 component_index, fwhandle, component_size); 220 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 221 } 222 223 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 224 u32 fwhandle, u8 *data, u16 size, 225 u32 offset) 226 { 227 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 228 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 230 char mcda_pl[MLXSW_REG_MCDA_LEN]; 231 232 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 233 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 234 } 235 236 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 237 u32 fwhandle, u16 component_index) 238 { 239 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 240 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 242 char mcc_pl[MLXSW_REG_MCC_LEN]; 243 244 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 245 component_index, fwhandle, 0); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 247 } 248 249 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 256 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 257 fwhandle, 0); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 259 } 260 261 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 262 enum mlxfw_fsm_state *fsm_state, 263 enum mlxfw_fsm_state_err *fsm_state_err) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcc_pl[MLXSW_REG_MCC_LEN]; 269 u8 control_state; 270 u8 error_code; 271 int err; 272 273 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 274 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 275 if (err) 276 return err; 277 278 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 279 *fsm_state = control_state; 280 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 281 MLXFW_FSM_STATE_ERR_MAX); 282 return 0; 283 } 284 285 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 286 { 287 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 288 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 290 char mcc_pl[MLXSW_REG_MCC_LEN]; 291 292 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 293 fwhandle, 0); 294 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 295 } 296 297 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 298 { 299 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 300 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 302 char mcc_pl[MLXSW_REG_MCC_LEN]; 303 304 mlxsw_reg_mcc_pack(mcc_pl, 305 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 306 fwhandle, 0); 307 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 308 } 309 310 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 311 .component_query = mlxsw_sp_component_query, 312 .fsm_lock = mlxsw_sp_fsm_lock, 313 .fsm_component_update = mlxsw_sp_fsm_component_update, 314 .fsm_block_download = mlxsw_sp_fsm_block_download, 315 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 316 .fsm_activate = mlxsw_sp_fsm_activate, 317 .fsm_query_state = mlxsw_sp_fsm_query_state, 318 .fsm_cancel = mlxsw_sp_fsm_cancel, 319 .fsm_release = mlxsw_sp_fsm_release 320 }; 321 322 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 323 const struct firmware *firmware) 324 { 325 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 326 .mlxfw_dev = { 327 .ops = &mlxsw_sp_mlxfw_dev_ops, 328 .psid = mlxsw_sp->bus_info->psid, 329 .psid_size = strlen(mlxsw_sp->bus_info->psid), 330 }, 331 .mlxsw_sp = mlxsw_sp 332 }; 333 334 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 335 } 336 337 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 338 { 339 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 340 const struct firmware *firmware; 341 int err; 342 343 /* Validate driver & FW are compatible */ 344 if (rev->major != MLXSW_FWREV_MAJOR) { 345 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 346 rev->major, MLXSW_FWREV_MAJOR); 347 return -EINVAL; 348 } 349 if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) == 350 MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR)) 351 return 0; 352 353 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 354 rev->major, rev->minor, rev->subminor); 355 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 356 MLXSW_SP_FW_FILENAME); 357 358 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 359 mlxsw_sp->bus_info->dev); 360 if (err) { 361 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 362 MLXSW_SP_FW_FILENAME); 363 return err; 364 } 365 366 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 367 release_firmware(firmware); 368 return err; 369 } 370 371 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 372 unsigned int counter_index, u64 *packets, 373 u64 *bytes) 374 { 375 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 376 int err; 377 378 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 379 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 380 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 381 if (err) 382 return err; 383 if (packets) 384 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 385 if (bytes) 386 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 387 return 0; 388 } 389 390 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 391 unsigned int counter_index) 392 { 393 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 394 395 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 396 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 397 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 398 } 399 400 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 401 unsigned int *p_counter_index) 402 { 403 int err; 404 405 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 406 p_counter_index); 407 if (err) 408 return err; 409 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 410 if (err) 411 goto err_counter_clear; 412 return 0; 413 414 err_counter_clear: 415 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 416 *p_counter_index); 417 return err; 418 } 419 420 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 421 unsigned int counter_index) 422 { 423 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 424 counter_index); 425 } 426 427 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 428 const struct mlxsw_tx_info *tx_info) 429 { 430 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 431 432 memset(txhdr, 0, MLXSW_TXHDR_LEN); 433 434 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 435 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 436 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 437 mlxsw_tx_hdr_swid_set(txhdr, 0); 438 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 439 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 440 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 441 } 442 443 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 444 u8 state) 445 { 446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 447 enum mlxsw_reg_spms_state spms_state; 448 char *spms_pl; 449 int err; 450 451 switch (state) { 452 case BR_STATE_FORWARDING: 453 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 454 break; 455 case BR_STATE_LEARNING: 456 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 457 break; 458 case BR_STATE_LISTENING: /* fall-through */ 459 case BR_STATE_DISABLED: /* fall-through */ 460 case BR_STATE_BLOCKING: 461 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 462 break; 463 default: 464 BUG(); 465 } 466 467 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 468 if (!spms_pl) 469 return -ENOMEM; 470 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 471 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 472 473 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 474 kfree(spms_pl); 475 return err; 476 } 477 478 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 479 { 480 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 481 int err; 482 483 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 484 if (err) 485 return err; 486 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 487 return 0; 488 } 489 490 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 491 { 492 int i; 493 494 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 495 return -EIO; 496 497 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 498 MAX_SPAN); 499 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 500 sizeof(struct mlxsw_sp_span_entry), 501 GFP_KERNEL); 502 if (!mlxsw_sp->span.entries) 503 return -ENOMEM; 504 505 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 506 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 507 508 return 0; 509 } 510 511 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 512 { 513 int i; 514 515 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 516 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 517 518 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 519 } 520 kfree(mlxsw_sp->span.entries); 521 } 522 523 static struct mlxsw_sp_span_entry * 524 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 525 { 526 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 527 struct mlxsw_sp_span_entry *span_entry; 528 char mpat_pl[MLXSW_REG_MPAT_LEN]; 529 u8 local_port = port->local_port; 530 int index; 531 int i; 532 int err; 533 534 /* find a free entry to use */ 535 index = -1; 536 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 537 if (!mlxsw_sp->span.entries[i].used) { 538 index = i; 539 span_entry = &mlxsw_sp->span.entries[i]; 540 break; 541 } 542 } 543 if (index < 0) 544 return NULL; 545 546 /* create a new port analayzer entry for local_port */ 547 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 548 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 549 if (err) 550 return NULL; 551 552 span_entry->used = true; 553 span_entry->id = index; 554 span_entry->ref_count = 1; 555 span_entry->local_port = local_port; 556 return span_entry; 557 } 558 559 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 560 struct mlxsw_sp_span_entry *span_entry) 561 { 562 u8 local_port = span_entry->local_port; 563 char mpat_pl[MLXSW_REG_MPAT_LEN]; 564 int pa_id = span_entry->id; 565 566 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 567 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 568 span_entry->used = false; 569 } 570 571 struct mlxsw_sp_span_entry * 572 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) 573 { 574 int i; 575 576 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 577 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 578 579 if (curr->used && curr->local_port == local_port) 580 return curr; 581 } 582 return NULL; 583 } 584 585 static struct mlxsw_sp_span_entry 586 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 587 { 588 struct mlxsw_sp_span_entry *span_entry; 589 590 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, 591 port->local_port); 592 if (span_entry) { 593 /* Already exists, just take a reference */ 594 span_entry->ref_count++; 595 return span_entry; 596 } 597 598 return mlxsw_sp_span_entry_create(port); 599 } 600 601 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 602 struct mlxsw_sp_span_entry *span_entry) 603 { 604 WARN_ON(!span_entry->ref_count); 605 if (--span_entry->ref_count == 0) 606 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 607 return 0; 608 } 609 610 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 611 { 612 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 613 struct mlxsw_sp_span_inspected_port *p; 614 int i; 615 616 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 617 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 618 619 list_for_each_entry(p, &curr->bound_ports_list, list) 620 if (p->local_port == port->local_port && 621 p->type == MLXSW_SP_SPAN_EGRESS) 622 return true; 623 } 624 625 return false; 626 } 627 628 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 629 int mtu) 630 { 631 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 632 } 633 634 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 635 { 636 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 637 char sbib_pl[MLXSW_REG_SBIB_LEN]; 638 int err; 639 640 /* If port is egress mirrored, the shared buffer size should be 641 * updated according to the mtu value 642 */ 643 if (mlxsw_sp_span_is_egress_mirror(port)) { 644 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 645 646 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 647 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 648 if (err) { 649 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 650 return err; 651 } 652 } 653 654 return 0; 655 } 656 657 static struct mlxsw_sp_span_inspected_port * 658 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 659 struct mlxsw_sp_span_entry *span_entry) 660 { 661 struct mlxsw_sp_span_inspected_port *p; 662 663 list_for_each_entry(p, &span_entry->bound_ports_list, list) 664 if (port->local_port == p->local_port) 665 return p; 666 return NULL; 667 } 668 669 static int 670 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 671 struct mlxsw_sp_span_entry *span_entry, 672 enum mlxsw_sp_span_type type, 673 bool bind) 674 { 675 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 676 char mpar_pl[MLXSW_REG_MPAR_LEN]; 677 int pa_id = span_entry->id; 678 679 /* bind the port to the SPAN entry */ 680 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 681 (enum mlxsw_reg_mpar_i_e) type, bind, pa_id); 682 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 683 } 684 685 static int 686 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, 687 struct mlxsw_sp_span_entry *span_entry, 688 enum mlxsw_sp_span_type type, 689 bool bind) 690 { 691 struct mlxsw_sp_span_inspected_port *inspected_port; 692 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 693 char sbib_pl[MLXSW_REG_SBIB_LEN]; 694 int err; 695 696 /* if it is an egress SPAN, bind a shared buffer to it */ 697 if (type == MLXSW_SP_SPAN_EGRESS) { 698 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 699 port->dev->mtu); 700 701 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 702 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 703 if (err) { 704 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 705 return err; 706 } 707 } 708 709 if (bind) { 710 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 711 true); 712 if (err) 713 goto err_port_bind; 714 } 715 716 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 717 if (!inspected_port) { 718 err = -ENOMEM; 719 goto err_inspected_port_alloc; 720 } 721 inspected_port->local_port = port->local_port; 722 inspected_port->type = type; 723 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 724 725 return 0; 726 727 err_inspected_port_alloc: 728 if (bind) 729 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 730 false); 731 err_port_bind: 732 if (type == MLXSW_SP_SPAN_EGRESS) { 733 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 734 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 735 } 736 return err; 737 } 738 739 static void 740 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, 741 struct mlxsw_sp_span_entry *span_entry, 742 enum mlxsw_sp_span_type type, 743 bool bind) 744 { 745 struct mlxsw_sp_span_inspected_port *inspected_port; 746 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 747 char sbib_pl[MLXSW_REG_SBIB_LEN]; 748 749 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 750 if (!inspected_port) 751 return; 752 753 if (bind) 754 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 755 false); 756 /* remove the SBIB buffer if it was egress SPAN */ 757 if (type == MLXSW_SP_SPAN_EGRESS) { 758 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 759 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 760 } 761 762 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 763 764 list_del(&inspected_port->list); 765 kfree(inspected_port); 766 } 767 768 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 769 struct mlxsw_sp_port *to, 770 enum mlxsw_sp_span_type type, bool bind) 771 { 772 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 773 struct mlxsw_sp_span_entry *span_entry; 774 int err; 775 776 span_entry = mlxsw_sp_span_entry_get(to); 777 if (!span_entry) 778 return -ENOENT; 779 780 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 781 span_entry->id); 782 783 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); 784 if (err) 785 goto err_port_bind; 786 787 return 0; 788 789 err_port_bind: 790 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 791 return err; 792 } 793 794 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, 795 enum mlxsw_sp_span_type type, bool bind) 796 { 797 struct mlxsw_sp_span_entry *span_entry; 798 799 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, 800 destination_port); 801 if (!span_entry) { 802 netdev_err(from->dev, "no span entry found\n"); 803 return; 804 } 805 806 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 807 span_entry->id); 808 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); 809 } 810 811 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 812 bool enable, u32 rate) 813 { 814 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 815 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 816 817 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 818 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 819 } 820 821 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 822 bool is_up) 823 { 824 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 825 char paos_pl[MLXSW_REG_PAOS_LEN]; 826 827 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 828 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 829 MLXSW_PORT_ADMIN_STATUS_DOWN); 830 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 831 } 832 833 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 834 unsigned char *addr) 835 { 836 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 837 char ppad_pl[MLXSW_REG_PPAD_LEN]; 838 839 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 840 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 841 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 842 } 843 844 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 845 { 846 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 847 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 848 849 ether_addr_copy(addr, mlxsw_sp->base_mac); 850 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 851 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 852 } 853 854 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 855 { 856 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 857 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 858 int max_mtu; 859 int err; 860 861 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 862 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 863 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 864 if (err) 865 return err; 866 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 867 868 if (mtu > max_mtu) 869 return -EINVAL; 870 871 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 872 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 873 } 874 875 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 876 { 877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 878 char pspa_pl[MLXSW_REG_PSPA_LEN]; 879 880 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 881 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 882 } 883 884 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 885 { 886 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 887 char svpe_pl[MLXSW_REG_SVPE_LEN]; 888 889 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 891 } 892 893 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 894 bool learn_enable) 895 { 896 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 897 char *spvmlr_pl; 898 int err; 899 900 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 901 if (!spvmlr_pl) 902 return -ENOMEM; 903 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 904 learn_enable); 905 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 906 kfree(spvmlr_pl); 907 return err; 908 } 909 910 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 911 u16 vid) 912 { 913 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 914 char spvid_pl[MLXSW_REG_SPVID_LEN]; 915 916 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 917 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 918 } 919 920 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 921 bool allow) 922 { 923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 924 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 925 926 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 927 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 928 } 929 930 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 931 { 932 int err; 933 934 if (!vid) { 935 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 936 if (err) 937 return err; 938 } else { 939 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 940 if (err) 941 return err; 942 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 943 if (err) 944 goto err_port_allow_untagged_set; 945 } 946 947 mlxsw_sp_port->pvid = vid; 948 return 0; 949 950 err_port_allow_untagged_set: 951 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 952 return err; 953 } 954 955 static int 956 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 957 { 958 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 959 char sspr_pl[MLXSW_REG_SSPR_LEN]; 960 961 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 962 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 963 } 964 965 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 966 u8 local_port, u8 *p_module, 967 u8 *p_width, u8 *p_lane) 968 { 969 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 970 int err; 971 972 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 973 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 974 if (err) 975 return err; 976 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 977 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 978 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 979 return 0; 980 } 981 982 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 983 u8 module, u8 width, u8 lane) 984 { 985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 986 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 987 int i; 988 989 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 990 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 991 for (i = 0; i < width; i++) { 992 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 993 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 994 } 995 996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 997 } 998 999 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 1000 { 1001 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1002 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1003 1004 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 1005 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 1006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 1007 } 1008 1009 static int mlxsw_sp_port_open(struct net_device *dev) 1010 { 1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1012 int err; 1013 1014 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1015 if (err) 1016 return err; 1017 netif_start_queue(dev); 1018 return 0; 1019 } 1020 1021 static int mlxsw_sp_port_stop(struct net_device *dev) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1024 1025 netif_stop_queue(dev); 1026 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1027 } 1028 1029 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1030 struct net_device *dev) 1031 { 1032 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1033 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1034 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1035 const struct mlxsw_tx_info tx_info = { 1036 .local_port = mlxsw_sp_port->local_port, 1037 .is_emad = false, 1038 }; 1039 u64 len; 1040 int err; 1041 1042 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1043 return NETDEV_TX_BUSY; 1044 1045 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1046 struct sk_buff *skb_orig = skb; 1047 1048 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1049 if (!skb) { 1050 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1051 dev_kfree_skb_any(skb_orig); 1052 return NETDEV_TX_OK; 1053 } 1054 dev_consume_skb_any(skb_orig); 1055 } 1056 1057 if (eth_skb_pad(skb)) { 1058 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1059 return NETDEV_TX_OK; 1060 } 1061 1062 mlxsw_sp_txhdr_construct(skb, &tx_info); 1063 /* TX header is consumed by HW on the way so we shouldn't count its 1064 * bytes as being sent. 1065 */ 1066 len = skb->len - MLXSW_TXHDR_LEN; 1067 1068 /* Due to a race we might fail here because of a full queue. In that 1069 * unlikely case we simply drop the packet. 1070 */ 1071 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1072 1073 if (!err) { 1074 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1075 u64_stats_update_begin(&pcpu_stats->syncp); 1076 pcpu_stats->tx_packets++; 1077 pcpu_stats->tx_bytes += len; 1078 u64_stats_update_end(&pcpu_stats->syncp); 1079 } else { 1080 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1081 dev_kfree_skb_any(skb); 1082 } 1083 return NETDEV_TX_OK; 1084 } 1085 1086 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1087 { 1088 } 1089 1090 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1091 { 1092 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1093 struct sockaddr *addr = p; 1094 int err; 1095 1096 if (!is_valid_ether_addr(addr->sa_data)) 1097 return -EADDRNOTAVAIL; 1098 1099 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1100 if (err) 1101 return err; 1102 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1103 return 0; 1104 } 1105 1106 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1107 int mtu) 1108 { 1109 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1110 } 1111 1112 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1113 1114 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1115 u16 delay) 1116 { 1117 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1118 BITS_PER_BYTE)); 1119 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1120 mtu); 1121 } 1122 1123 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1124 * Assumes 100m cable and maximum MTU. 1125 */ 1126 #define MLXSW_SP_PAUSE_DELAY 58752 1127 1128 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1129 u16 delay, bool pfc, bool pause) 1130 { 1131 if (pfc) 1132 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1133 else if (pause) 1134 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1135 else 1136 return 0; 1137 } 1138 1139 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1140 bool lossy) 1141 { 1142 if (lossy) 1143 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1144 else 1145 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1146 thres); 1147 } 1148 1149 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1150 u8 *prio_tc, bool pause_en, 1151 struct ieee_pfc *my_pfc) 1152 { 1153 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1154 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1155 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1156 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1157 int i, j, err; 1158 1159 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1160 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1161 if (err) 1162 return err; 1163 1164 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1165 bool configure = false; 1166 bool pfc = false; 1167 bool lossy; 1168 u16 thres; 1169 1170 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1171 if (prio_tc[j] == i) { 1172 pfc = pfc_en & BIT(j); 1173 configure = true; 1174 break; 1175 } 1176 } 1177 1178 if (!configure) 1179 continue; 1180 1181 lossy = !(pfc || pause_en); 1182 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1183 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1184 pause_en); 1185 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1186 } 1187 1188 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1189 } 1190 1191 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1192 int mtu, bool pause_en) 1193 { 1194 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1195 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1196 struct ieee_pfc *my_pfc; 1197 u8 *prio_tc; 1198 1199 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1200 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1201 1202 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1203 pause_en, my_pfc); 1204 } 1205 1206 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1207 { 1208 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1209 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1210 int err; 1211 1212 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1213 if (err) 1214 return err; 1215 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1216 if (err) 1217 goto err_span_port_mtu_update; 1218 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1219 if (err) 1220 goto err_port_mtu_set; 1221 dev->mtu = mtu; 1222 return 0; 1223 1224 err_port_mtu_set: 1225 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1226 err_span_port_mtu_update: 1227 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1228 return err; 1229 } 1230 1231 static int 1232 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1233 struct rtnl_link_stats64 *stats) 1234 { 1235 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1236 struct mlxsw_sp_port_pcpu_stats *p; 1237 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1238 u32 tx_dropped = 0; 1239 unsigned int start; 1240 int i; 1241 1242 for_each_possible_cpu(i) { 1243 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1244 do { 1245 start = u64_stats_fetch_begin_irq(&p->syncp); 1246 rx_packets = p->rx_packets; 1247 rx_bytes = p->rx_bytes; 1248 tx_packets = p->tx_packets; 1249 tx_bytes = p->tx_bytes; 1250 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1251 1252 stats->rx_packets += rx_packets; 1253 stats->rx_bytes += rx_bytes; 1254 stats->tx_packets += tx_packets; 1255 stats->tx_bytes += tx_bytes; 1256 /* tx_dropped is u32, updated without syncp protection. */ 1257 tx_dropped += p->tx_dropped; 1258 } 1259 stats->tx_dropped = tx_dropped; 1260 return 0; 1261 } 1262 1263 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1264 { 1265 switch (attr_id) { 1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1267 return true; 1268 } 1269 1270 return false; 1271 } 1272 1273 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1274 void *sp) 1275 { 1276 switch (attr_id) { 1277 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1278 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1279 } 1280 1281 return -EINVAL; 1282 } 1283 1284 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1285 int prio, char *ppcnt_pl) 1286 { 1287 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1289 1290 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1291 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1292 } 1293 1294 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1295 struct rtnl_link_stats64 *stats) 1296 { 1297 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1298 int err; 1299 1300 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1301 0, ppcnt_pl); 1302 if (err) 1303 goto out; 1304 1305 stats->tx_packets = 1306 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1307 stats->rx_packets = 1308 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1309 stats->tx_bytes = 1310 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1311 stats->rx_bytes = 1312 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1313 stats->multicast = 1314 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1315 1316 stats->rx_crc_errors = 1317 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1318 stats->rx_frame_errors = 1319 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1320 1321 stats->rx_length_errors = ( 1322 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1323 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1324 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1325 1326 stats->rx_errors = (stats->rx_crc_errors + 1327 stats->rx_frame_errors + stats->rx_length_errors); 1328 1329 out: 1330 return err; 1331 } 1332 1333 static void 1334 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1335 struct mlxsw_sp_port_xstats *xstats) 1336 { 1337 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1338 int err, i; 1339 1340 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1341 ppcnt_pl); 1342 if (!err) 1343 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1344 1345 for (i = 0; i < TC_MAX_QUEUE; i++) { 1346 err = mlxsw_sp_port_get_stats_raw(dev, 1347 MLXSW_REG_PPCNT_TC_CONG_TC, 1348 i, ppcnt_pl); 1349 if (!err) 1350 xstats->wred_drop[i] = 1351 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1352 1353 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1354 i, ppcnt_pl); 1355 if (err) 1356 continue; 1357 1358 xstats->backlog[i] = 1359 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1360 xstats->tail_drop[i] = 1361 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1362 } 1363 } 1364 1365 static void update_stats_cache(struct work_struct *work) 1366 { 1367 struct mlxsw_sp_port *mlxsw_sp_port = 1368 container_of(work, struct mlxsw_sp_port, 1369 periodic_hw_stats.update_dw.work); 1370 1371 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1372 goto out; 1373 1374 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1375 &mlxsw_sp_port->periodic_hw_stats.stats); 1376 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1377 &mlxsw_sp_port->periodic_hw_stats.xstats); 1378 1379 out: 1380 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1381 MLXSW_HW_STATS_UPDATE_TIME); 1382 } 1383 1384 /* Return the stats from a cache that is updated periodically, 1385 * as this function might get called in an atomic context. 1386 */ 1387 static void 1388 mlxsw_sp_port_get_stats64(struct net_device *dev, 1389 struct rtnl_link_stats64 *stats) 1390 { 1391 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1392 1393 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1394 } 1395 1396 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1397 u16 vid_begin, u16 vid_end, 1398 bool is_member, bool untagged) 1399 { 1400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1401 char *spvm_pl; 1402 int err; 1403 1404 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1405 if (!spvm_pl) 1406 return -ENOMEM; 1407 1408 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1409 vid_end, is_member, untagged); 1410 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1411 kfree(spvm_pl); 1412 return err; 1413 } 1414 1415 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1416 u16 vid_end, bool is_member, bool untagged) 1417 { 1418 u16 vid, vid_e; 1419 int err; 1420 1421 for (vid = vid_begin; vid <= vid_end; 1422 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1423 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1424 vid_end); 1425 1426 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1427 is_member, untagged); 1428 if (err) 1429 return err; 1430 } 1431 1432 return 0; 1433 } 1434 1435 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1436 { 1437 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1438 1439 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1440 &mlxsw_sp_port->vlans_list, list) 1441 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1442 } 1443 1444 static struct mlxsw_sp_port_vlan * 1445 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1446 { 1447 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1448 bool untagged = vid == 1; 1449 int err; 1450 1451 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1452 if (err) 1453 return ERR_PTR(err); 1454 1455 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1456 if (!mlxsw_sp_port_vlan) { 1457 err = -ENOMEM; 1458 goto err_port_vlan_alloc; 1459 } 1460 1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1462 mlxsw_sp_port_vlan->vid = vid; 1463 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1464 1465 return mlxsw_sp_port_vlan; 1466 1467 err_port_vlan_alloc: 1468 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1469 return ERR_PTR(err); 1470 } 1471 1472 static void 1473 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1474 { 1475 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1476 u16 vid = mlxsw_sp_port_vlan->vid; 1477 1478 list_del(&mlxsw_sp_port_vlan->list); 1479 kfree(mlxsw_sp_port_vlan); 1480 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1481 } 1482 1483 struct mlxsw_sp_port_vlan * 1484 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1485 { 1486 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1487 1488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1489 if (mlxsw_sp_port_vlan) 1490 return mlxsw_sp_port_vlan; 1491 1492 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1493 } 1494 1495 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1496 { 1497 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1498 1499 if (mlxsw_sp_port_vlan->bridge_port) 1500 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1501 else if (fid) 1502 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1503 1504 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1505 } 1506 1507 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1508 __be16 __always_unused proto, u16 vid) 1509 { 1510 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1511 1512 /* VLAN 0 is added to HW filter when device goes up, but it is 1513 * reserved in our case, so simply return. 1514 */ 1515 if (!vid) 1516 return 0; 1517 1518 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1519 } 1520 1521 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1522 __be16 __always_unused proto, u16 vid) 1523 { 1524 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1525 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1526 1527 /* VLAN 0 is removed from HW filter when device goes down, but 1528 * it is reserved in our case, so simply return. 1529 */ 1530 if (!vid) 1531 return 0; 1532 1533 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1534 if (!mlxsw_sp_port_vlan) 1535 return 0; 1536 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1537 1538 return 0; 1539 } 1540 1541 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1542 size_t len) 1543 { 1544 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1545 u8 module = mlxsw_sp_port->mapping.module; 1546 u8 width = mlxsw_sp_port->mapping.width; 1547 u8 lane = mlxsw_sp_port->mapping.lane; 1548 int err; 1549 1550 if (!mlxsw_sp_port->split) 1551 err = snprintf(name, len, "p%d", module + 1); 1552 else 1553 err = snprintf(name, len, "p%ds%d", module + 1, 1554 lane / width); 1555 1556 if (err >= len) 1557 return -EINVAL; 1558 1559 return 0; 1560 } 1561 1562 static struct mlxsw_sp_port_mall_tc_entry * 1563 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1564 unsigned long cookie) { 1565 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1566 1567 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1568 if (mall_tc_entry->cookie == cookie) 1569 return mall_tc_entry; 1570 1571 return NULL; 1572 } 1573 1574 static int 1575 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1576 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1577 const struct tc_action *a, 1578 bool ingress) 1579 { 1580 enum mlxsw_sp_span_type span_type; 1581 struct mlxsw_sp_port *to_port; 1582 struct net_device *to_dev; 1583 1584 to_dev = tcf_mirred_dev(a); 1585 if (!to_dev) { 1586 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1587 return -EINVAL; 1588 } 1589 1590 if (!mlxsw_sp_port_dev_check(to_dev)) { 1591 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1592 return -EOPNOTSUPP; 1593 } 1594 to_port = netdev_priv(to_dev); 1595 1596 mirror->to_local_port = to_port->local_port; 1597 mirror->ingress = ingress; 1598 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1599 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type, 1600 true); 1601 } 1602 1603 static void 1604 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1605 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1606 { 1607 enum mlxsw_sp_span_type span_type; 1608 1609 span_type = mirror->ingress ? 1610 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1611 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port, 1612 span_type, true); 1613 } 1614 1615 static int 1616 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1617 struct tc_cls_matchall_offload *cls, 1618 const struct tc_action *a, 1619 bool ingress) 1620 { 1621 int err; 1622 1623 if (!mlxsw_sp_port->sample) 1624 return -EOPNOTSUPP; 1625 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1626 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1627 return -EEXIST; 1628 } 1629 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1630 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1631 return -EOPNOTSUPP; 1632 } 1633 1634 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1635 tcf_sample_psample_group(a)); 1636 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1637 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1638 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1639 1640 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1641 if (err) 1642 goto err_port_sample_set; 1643 return 0; 1644 1645 err_port_sample_set: 1646 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1647 return err; 1648 } 1649 1650 static void 1651 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1652 { 1653 if (!mlxsw_sp_port->sample) 1654 return; 1655 1656 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1657 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1658 } 1659 1660 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1661 struct tc_cls_matchall_offload *f, 1662 bool ingress) 1663 { 1664 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1665 __be16 protocol = f->common.protocol; 1666 const struct tc_action *a; 1667 LIST_HEAD(actions); 1668 int err; 1669 1670 if (!tcf_exts_has_one_action(f->exts)) { 1671 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1672 return -EOPNOTSUPP; 1673 } 1674 1675 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1676 if (!mall_tc_entry) 1677 return -ENOMEM; 1678 mall_tc_entry->cookie = f->cookie; 1679 1680 tcf_exts_to_list(f->exts, &actions); 1681 a = list_first_entry(&actions, struct tc_action, list); 1682 1683 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1684 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1685 1686 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1687 mirror = &mall_tc_entry->mirror; 1688 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1689 mirror, a, ingress); 1690 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1691 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1692 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1693 a, ingress); 1694 } else { 1695 err = -EOPNOTSUPP; 1696 } 1697 1698 if (err) 1699 goto err_add_action; 1700 1701 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1702 return 0; 1703 1704 err_add_action: 1705 kfree(mall_tc_entry); 1706 return err; 1707 } 1708 1709 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1710 struct tc_cls_matchall_offload *f) 1711 { 1712 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1713 1714 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1715 f->cookie); 1716 if (!mall_tc_entry) { 1717 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1718 return; 1719 } 1720 list_del(&mall_tc_entry->list); 1721 1722 switch (mall_tc_entry->type) { 1723 case MLXSW_SP_PORT_MALL_MIRROR: 1724 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1725 &mall_tc_entry->mirror); 1726 break; 1727 case MLXSW_SP_PORT_MALL_SAMPLE: 1728 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1729 break; 1730 default: 1731 WARN_ON(1); 1732 } 1733 1734 kfree(mall_tc_entry); 1735 } 1736 1737 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1738 struct tc_cls_matchall_offload *f, 1739 bool ingress) 1740 { 1741 switch (f->command) { 1742 case TC_CLSMATCHALL_REPLACE: 1743 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1744 ingress); 1745 case TC_CLSMATCHALL_DESTROY: 1746 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1747 return 0; 1748 default: 1749 return -EOPNOTSUPP; 1750 } 1751 } 1752 1753 static int 1754 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1755 struct tc_cls_flower_offload *f) 1756 { 1757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1758 1759 switch (f->command) { 1760 case TC_CLSFLOWER_REPLACE: 1761 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1762 case TC_CLSFLOWER_DESTROY: 1763 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1764 return 0; 1765 case TC_CLSFLOWER_STATS: 1766 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1767 default: 1768 return -EOPNOTSUPP; 1769 } 1770 } 1771 1772 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1773 void *type_data, 1774 void *cb_priv, bool ingress) 1775 { 1776 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1777 1778 switch (type) { 1779 case TC_SETUP_CLSMATCHALL: 1780 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1781 type_data)) 1782 return -EOPNOTSUPP; 1783 1784 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1785 ingress); 1786 case TC_SETUP_CLSFLOWER: 1787 return 0; 1788 default: 1789 return -EOPNOTSUPP; 1790 } 1791 } 1792 1793 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1794 void *type_data, 1795 void *cb_priv) 1796 { 1797 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1798 cb_priv, true); 1799 } 1800 1801 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1802 void *type_data, 1803 void *cb_priv) 1804 { 1805 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1806 cb_priv, false); 1807 } 1808 1809 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1810 void *type_data, void *cb_priv) 1811 { 1812 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1813 1814 switch (type) { 1815 case TC_SETUP_CLSMATCHALL: 1816 return 0; 1817 case TC_SETUP_CLSFLOWER: 1818 if (mlxsw_sp_acl_block_disabled(acl_block)) 1819 return -EOPNOTSUPP; 1820 1821 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1822 default: 1823 return -EOPNOTSUPP; 1824 } 1825 } 1826 1827 static int 1828 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1829 struct tcf_block *block, bool ingress) 1830 { 1831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1832 struct mlxsw_sp_acl_block *acl_block; 1833 struct tcf_block_cb *block_cb; 1834 int err; 1835 1836 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1837 mlxsw_sp); 1838 if (!block_cb) { 1839 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1840 if (!acl_block) 1841 return -ENOMEM; 1842 block_cb = __tcf_block_cb_register(block, 1843 mlxsw_sp_setup_tc_block_cb_flower, 1844 mlxsw_sp, acl_block); 1845 if (IS_ERR(block_cb)) { 1846 err = PTR_ERR(block_cb); 1847 goto err_cb_register; 1848 } 1849 } else { 1850 acl_block = tcf_block_cb_priv(block_cb); 1851 } 1852 tcf_block_cb_incref(block_cb); 1853 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1854 mlxsw_sp_port, ingress); 1855 if (err) 1856 goto err_block_bind; 1857 1858 if (ingress) 1859 mlxsw_sp_port->ing_acl_block = acl_block; 1860 else 1861 mlxsw_sp_port->eg_acl_block = acl_block; 1862 1863 return 0; 1864 1865 err_block_bind: 1866 if (!tcf_block_cb_decref(block_cb)) { 1867 __tcf_block_cb_unregister(block_cb); 1868 err_cb_register: 1869 mlxsw_sp_acl_block_destroy(acl_block); 1870 } 1871 return err; 1872 } 1873 1874 static void 1875 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1876 struct tcf_block *block, bool ingress) 1877 { 1878 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1879 struct mlxsw_sp_acl_block *acl_block; 1880 struct tcf_block_cb *block_cb; 1881 int err; 1882 1883 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1884 mlxsw_sp); 1885 if (!block_cb) 1886 return; 1887 1888 if (ingress) 1889 mlxsw_sp_port->ing_acl_block = NULL; 1890 else 1891 mlxsw_sp_port->eg_acl_block = NULL; 1892 1893 acl_block = tcf_block_cb_priv(block_cb); 1894 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1895 mlxsw_sp_port, ingress); 1896 if (!err && !tcf_block_cb_decref(block_cb)) { 1897 __tcf_block_cb_unregister(block_cb); 1898 mlxsw_sp_acl_block_destroy(acl_block); 1899 } 1900 } 1901 1902 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1903 struct tc_block_offload *f) 1904 { 1905 tc_setup_cb_t *cb; 1906 bool ingress; 1907 int err; 1908 1909 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1910 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1911 ingress = true; 1912 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1913 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1914 ingress = false; 1915 } else { 1916 return -EOPNOTSUPP; 1917 } 1918 1919 switch (f->command) { 1920 case TC_BLOCK_BIND: 1921 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1922 mlxsw_sp_port); 1923 if (err) 1924 return err; 1925 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1926 f->block, ingress); 1927 if (err) { 1928 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1929 return err; 1930 } 1931 return 0; 1932 case TC_BLOCK_UNBIND: 1933 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1934 f->block, ingress); 1935 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1936 return 0; 1937 default: 1938 return -EOPNOTSUPP; 1939 } 1940 } 1941 1942 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1943 void *type_data) 1944 { 1945 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1946 1947 switch (type) { 1948 case TC_SETUP_BLOCK: 1949 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1950 case TC_SETUP_QDISC_RED: 1951 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1952 case TC_SETUP_QDISC_PRIO: 1953 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1954 default: 1955 return -EOPNOTSUPP; 1956 } 1957 } 1958 1959 1960 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1961 { 1962 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1963 1964 if (!enable) { 1965 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1966 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1967 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1968 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1969 return -EINVAL; 1970 } 1971 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1972 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1973 } else { 1974 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1975 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1976 } 1977 return 0; 1978 } 1979 1980 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1981 1982 static int mlxsw_sp_handle_feature(struct net_device *dev, 1983 netdev_features_t wanted_features, 1984 netdev_features_t feature, 1985 mlxsw_sp_feature_handler feature_handler) 1986 { 1987 netdev_features_t changes = wanted_features ^ dev->features; 1988 bool enable = !!(wanted_features & feature); 1989 int err; 1990 1991 if (!(changes & feature)) 1992 return 0; 1993 1994 err = feature_handler(dev, enable); 1995 if (err) { 1996 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1997 enable ? "Enable" : "Disable", &feature, err); 1998 return err; 1999 } 2000 2001 if (enable) 2002 dev->features |= feature; 2003 else 2004 dev->features &= ~feature; 2005 2006 return 0; 2007 } 2008 static int mlxsw_sp_set_features(struct net_device *dev, 2009 netdev_features_t features) 2010 { 2011 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 2012 mlxsw_sp_feature_hw_tc); 2013 } 2014 2015 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 2016 .ndo_open = mlxsw_sp_port_open, 2017 .ndo_stop = mlxsw_sp_port_stop, 2018 .ndo_start_xmit = mlxsw_sp_port_xmit, 2019 .ndo_setup_tc = mlxsw_sp_setup_tc, 2020 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 2021 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 2022 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 2023 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 2024 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 2025 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 2026 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 2027 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 2028 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 2029 .ndo_set_features = mlxsw_sp_set_features, 2030 }; 2031 2032 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 2033 struct ethtool_drvinfo *drvinfo) 2034 { 2035 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2036 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2037 2038 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 2039 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 2040 sizeof(drvinfo->version)); 2041 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 2042 "%d.%d.%d", 2043 mlxsw_sp->bus_info->fw_rev.major, 2044 mlxsw_sp->bus_info->fw_rev.minor, 2045 mlxsw_sp->bus_info->fw_rev.subminor); 2046 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 2047 sizeof(drvinfo->bus_info)); 2048 } 2049 2050 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 2051 struct ethtool_pauseparam *pause) 2052 { 2053 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2054 2055 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2056 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2057 } 2058 2059 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2060 struct ethtool_pauseparam *pause) 2061 { 2062 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2063 2064 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2065 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2066 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2067 2068 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2069 pfcc_pl); 2070 } 2071 2072 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2073 struct ethtool_pauseparam *pause) 2074 { 2075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2076 bool pause_en = pause->tx_pause || pause->rx_pause; 2077 int err; 2078 2079 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2080 netdev_err(dev, "PFC already enabled on port\n"); 2081 return -EINVAL; 2082 } 2083 2084 if (pause->autoneg) { 2085 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2086 return -EINVAL; 2087 } 2088 2089 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2090 if (err) { 2091 netdev_err(dev, "Failed to configure port's headroom\n"); 2092 return err; 2093 } 2094 2095 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2096 if (err) { 2097 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2098 goto err_port_pause_configure; 2099 } 2100 2101 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2102 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2103 2104 return 0; 2105 2106 err_port_pause_configure: 2107 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2108 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2109 return err; 2110 } 2111 2112 struct mlxsw_sp_port_hw_stats { 2113 char str[ETH_GSTRING_LEN]; 2114 u64 (*getter)(const char *payload); 2115 bool cells_bytes; 2116 }; 2117 2118 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2119 { 2120 .str = "a_frames_transmitted_ok", 2121 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2122 }, 2123 { 2124 .str = "a_frames_received_ok", 2125 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2126 }, 2127 { 2128 .str = "a_frame_check_sequence_errors", 2129 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2130 }, 2131 { 2132 .str = "a_alignment_errors", 2133 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2134 }, 2135 { 2136 .str = "a_octets_transmitted_ok", 2137 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2138 }, 2139 { 2140 .str = "a_octets_received_ok", 2141 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2142 }, 2143 { 2144 .str = "a_multicast_frames_xmitted_ok", 2145 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2146 }, 2147 { 2148 .str = "a_broadcast_frames_xmitted_ok", 2149 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2150 }, 2151 { 2152 .str = "a_multicast_frames_received_ok", 2153 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2154 }, 2155 { 2156 .str = "a_broadcast_frames_received_ok", 2157 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2158 }, 2159 { 2160 .str = "a_in_range_length_errors", 2161 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2162 }, 2163 { 2164 .str = "a_out_of_range_length_field", 2165 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2166 }, 2167 { 2168 .str = "a_frame_too_long_errors", 2169 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2170 }, 2171 { 2172 .str = "a_symbol_error_during_carrier", 2173 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2174 }, 2175 { 2176 .str = "a_mac_control_frames_transmitted", 2177 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2178 }, 2179 { 2180 .str = "a_mac_control_frames_received", 2181 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2182 }, 2183 { 2184 .str = "a_unsupported_opcodes_received", 2185 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2186 }, 2187 { 2188 .str = "a_pause_mac_ctrl_frames_received", 2189 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2190 }, 2191 { 2192 .str = "a_pause_mac_ctrl_frames_xmitted", 2193 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2194 }, 2195 }; 2196 2197 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2198 2199 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2200 { 2201 .str = "rx_octets_prio", 2202 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2203 }, 2204 { 2205 .str = "rx_frames_prio", 2206 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2207 }, 2208 { 2209 .str = "tx_octets_prio", 2210 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2211 }, 2212 { 2213 .str = "tx_frames_prio", 2214 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2215 }, 2216 { 2217 .str = "rx_pause_prio", 2218 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2219 }, 2220 { 2221 .str = "rx_pause_duration_prio", 2222 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2223 }, 2224 { 2225 .str = "tx_pause_prio", 2226 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2227 }, 2228 { 2229 .str = "tx_pause_duration_prio", 2230 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2231 }, 2232 }; 2233 2234 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2235 2236 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2237 { 2238 .str = "tc_transmit_queue_tc", 2239 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2240 .cells_bytes = true, 2241 }, 2242 { 2243 .str = "tc_no_buffer_discard_uc_tc", 2244 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2245 }, 2246 }; 2247 2248 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2249 2250 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2251 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 2252 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 2253 IEEE_8021QAZ_MAX_TCS) 2254 2255 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2256 { 2257 int i; 2258 2259 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2260 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2261 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2262 *p += ETH_GSTRING_LEN; 2263 } 2264 } 2265 2266 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2267 { 2268 int i; 2269 2270 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2271 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2272 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2273 *p += ETH_GSTRING_LEN; 2274 } 2275 } 2276 2277 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2278 u32 stringset, u8 *data) 2279 { 2280 u8 *p = data; 2281 int i; 2282 2283 switch (stringset) { 2284 case ETH_SS_STATS: 2285 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2286 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2287 ETH_GSTRING_LEN); 2288 p += ETH_GSTRING_LEN; 2289 } 2290 2291 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2292 mlxsw_sp_port_get_prio_strings(&p, i); 2293 2294 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2295 mlxsw_sp_port_get_tc_strings(&p, i); 2296 2297 break; 2298 } 2299 } 2300 2301 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2302 enum ethtool_phys_id_state state) 2303 { 2304 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2306 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2307 bool active; 2308 2309 switch (state) { 2310 case ETHTOOL_ID_ACTIVE: 2311 active = true; 2312 break; 2313 case ETHTOOL_ID_INACTIVE: 2314 active = false; 2315 break; 2316 default: 2317 return -EOPNOTSUPP; 2318 } 2319 2320 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2321 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2322 } 2323 2324 static int 2325 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2326 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2327 { 2328 switch (grp) { 2329 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2330 *p_hw_stats = mlxsw_sp_port_hw_stats; 2331 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2332 break; 2333 case MLXSW_REG_PPCNT_PRIO_CNT: 2334 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2335 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2336 break; 2337 case MLXSW_REG_PPCNT_TC_CNT: 2338 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2339 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2340 break; 2341 default: 2342 WARN_ON(1); 2343 return -EOPNOTSUPP; 2344 } 2345 return 0; 2346 } 2347 2348 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2349 enum mlxsw_reg_ppcnt_grp grp, int prio, 2350 u64 *data, int data_index) 2351 { 2352 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2353 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2354 struct mlxsw_sp_port_hw_stats *hw_stats; 2355 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2356 int i, len; 2357 int err; 2358 2359 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2360 if (err) 2361 return; 2362 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2363 for (i = 0; i < len; i++) { 2364 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2365 if (!hw_stats[i].cells_bytes) 2366 continue; 2367 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2368 data[data_index + i]); 2369 } 2370 } 2371 2372 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2373 struct ethtool_stats *stats, u64 *data) 2374 { 2375 int i, data_index = 0; 2376 2377 /* IEEE 802.3 Counters */ 2378 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2379 data, data_index); 2380 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2381 2382 /* Per-Priority Counters */ 2383 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2384 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2385 data, data_index); 2386 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2387 } 2388 2389 /* Per-TC Counters */ 2390 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2391 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2392 data, data_index); 2393 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2394 } 2395 } 2396 2397 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2398 { 2399 switch (sset) { 2400 case ETH_SS_STATS: 2401 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2402 default: 2403 return -EOPNOTSUPP; 2404 } 2405 } 2406 2407 struct mlxsw_sp_port_link_mode { 2408 enum ethtool_link_mode_bit_indices mask_ethtool; 2409 u32 mask; 2410 u32 speed; 2411 }; 2412 2413 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2414 { 2415 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2416 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2417 .speed = SPEED_100, 2418 }, 2419 { 2420 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2421 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2422 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2423 .speed = SPEED_1000, 2424 }, 2425 { 2426 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2427 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2428 .speed = SPEED_10000, 2429 }, 2430 { 2431 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2432 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2433 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2434 .speed = SPEED_10000, 2435 }, 2436 { 2437 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2438 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2439 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2440 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2441 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2442 .speed = SPEED_10000, 2443 }, 2444 { 2445 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2446 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2447 .speed = SPEED_20000, 2448 }, 2449 { 2450 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2451 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2452 .speed = SPEED_40000, 2453 }, 2454 { 2455 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2456 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2457 .speed = SPEED_40000, 2458 }, 2459 { 2460 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2461 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2462 .speed = SPEED_40000, 2463 }, 2464 { 2465 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2466 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2467 .speed = SPEED_40000, 2468 }, 2469 { 2470 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2471 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2472 .speed = SPEED_25000, 2473 }, 2474 { 2475 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2476 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2477 .speed = SPEED_25000, 2478 }, 2479 { 2480 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2481 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2482 .speed = SPEED_25000, 2483 }, 2484 { 2485 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2486 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2487 .speed = SPEED_25000, 2488 }, 2489 { 2490 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2491 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2492 .speed = SPEED_50000, 2493 }, 2494 { 2495 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2496 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2497 .speed = SPEED_50000, 2498 }, 2499 { 2500 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2501 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2502 .speed = SPEED_50000, 2503 }, 2504 { 2505 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2506 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2507 .speed = SPEED_56000, 2508 }, 2509 { 2510 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2511 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2512 .speed = SPEED_56000, 2513 }, 2514 { 2515 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2516 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2517 .speed = SPEED_56000, 2518 }, 2519 { 2520 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2521 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2522 .speed = SPEED_56000, 2523 }, 2524 { 2525 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2526 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2527 .speed = SPEED_100000, 2528 }, 2529 { 2530 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2531 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2532 .speed = SPEED_100000, 2533 }, 2534 { 2535 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2536 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2537 .speed = SPEED_100000, 2538 }, 2539 { 2540 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2541 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2542 .speed = SPEED_100000, 2543 }, 2544 }; 2545 2546 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2547 2548 static void 2549 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2550 struct ethtool_link_ksettings *cmd) 2551 { 2552 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2553 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2554 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2555 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2556 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2557 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2558 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2559 2560 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2561 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2562 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2563 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2564 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2565 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2566 } 2567 2568 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2569 { 2570 int i; 2571 2572 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2573 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2574 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2575 mode); 2576 } 2577 } 2578 2579 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2580 struct ethtool_link_ksettings *cmd) 2581 { 2582 u32 speed = SPEED_UNKNOWN; 2583 u8 duplex = DUPLEX_UNKNOWN; 2584 int i; 2585 2586 if (!carrier_ok) 2587 goto out; 2588 2589 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2590 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2591 speed = mlxsw_sp_port_link_mode[i].speed; 2592 duplex = DUPLEX_FULL; 2593 break; 2594 } 2595 } 2596 out: 2597 cmd->base.speed = speed; 2598 cmd->base.duplex = duplex; 2599 } 2600 2601 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2602 { 2603 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2604 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2605 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2606 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2607 return PORT_FIBRE; 2608 2609 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2610 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2611 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2612 return PORT_DA; 2613 2614 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2615 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2616 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2617 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2618 return PORT_NONE; 2619 2620 return PORT_OTHER; 2621 } 2622 2623 static u32 2624 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2625 { 2626 u32 ptys_proto = 0; 2627 int i; 2628 2629 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2630 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2631 cmd->link_modes.advertising)) 2632 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2633 } 2634 return ptys_proto; 2635 } 2636 2637 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2638 { 2639 u32 ptys_proto = 0; 2640 int i; 2641 2642 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2643 if (speed == mlxsw_sp_port_link_mode[i].speed) 2644 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2645 } 2646 return ptys_proto; 2647 } 2648 2649 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2650 { 2651 u32 ptys_proto = 0; 2652 int i; 2653 2654 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2655 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2656 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2657 } 2658 return ptys_proto; 2659 } 2660 2661 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2662 struct ethtool_link_ksettings *cmd) 2663 { 2664 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2665 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2666 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2667 2668 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2669 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2670 } 2671 2672 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2673 struct ethtool_link_ksettings *cmd) 2674 { 2675 if (!autoneg) 2676 return; 2677 2678 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2679 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2680 } 2681 2682 static void 2683 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2684 struct ethtool_link_ksettings *cmd) 2685 { 2686 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2687 return; 2688 2689 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2690 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2691 } 2692 2693 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2694 struct ethtool_link_ksettings *cmd) 2695 { 2696 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2699 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2700 u8 autoneg_status; 2701 bool autoneg; 2702 int err; 2703 2704 autoneg = mlxsw_sp_port->link.autoneg; 2705 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2706 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2707 if (err) 2708 return err; 2709 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2710 ð_proto_oper); 2711 2712 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2713 2714 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2715 2716 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2717 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2718 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2719 2720 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2721 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2722 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2723 cmd); 2724 2725 return 0; 2726 } 2727 2728 static int 2729 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2730 const struct ethtool_link_ksettings *cmd) 2731 { 2732 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2733 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2734 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2735 u32 eth_proto_cap, eth_proto_new; 2736 bool autoneg; 2737 int err; 2738 2739 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2740 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2741 if (err) 2742 return err; 2743 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2744 2745 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2746 eth_proto_new = autoneg ? 2747 mlxsw_sp_to_ptys_advert_link(cmd) : 2748 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2749 2750 eth_proto_new = eth_proto_new & eth_proto_cap; 2751 if (!eth_proto_new) { 2752 netdev_err(dev, "No supported speed requested\n"); 2753 return -EINVAL; 2754 } 2755 2756 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2757 eth_proto_new); 2758 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2759 if (err) 2760 return err; 2761 2762 if (!netif_running(dev)) 2763 return 0; 2764 2765 mlxsw_sp_port->link.autoneg = autoneg; 2766 2767 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2768 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2769 2770 return 0; 2771 } 2772 2773 static int mlxsw_sp_flash_device(struct net_device *dev, 2774 struct ethtool_flash *flash) 2775 { 2776 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2777 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2778 const struct firmware *firmware; 2779 int err; 2780 2781 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2782 return -EOPNOTSUPP; 2783 2784 dev_hold(dev); 2785 rtnl_unlock(); 2786 2787 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2788 if (err) 2789 goto out; 2790 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2791 release_firmware(firmware); 2792 out: 2793 rtnl_lock(); 2794 dev_put(dev); 2795 return err; 2796 } 2797 2798 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2799 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2800 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2801 2802 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2803 u16 offset, u16 size, void *data, 2804 unsigned int *p_read_size) 2805 { 2806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2807 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2808 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2809 u16 i2c_addr; 2810 int status; 2811 int err; 2812 2813 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2814 2815 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2816 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2817 /* Cross pages read, read until offset 256 in low page */ 2818 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2819 2820 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2821 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2822 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2823 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2824 } 2825 2826 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2827 0, 0, offset, size, i2c_addr); 2828 2829 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2830 if (err) 2831 return err; 2832 2833 status = mlxsw_reg_mcia_status_get(mcia_pl); 2834 if (status) 2835 return -EIO; 2836 2837 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2838 memcpy(data, eeprom_tmp, size); 2839 *p_read_size = size; 2840 2841 return 0; 2842 } 2843 2844 enum mlxsw_sp_eeprom_module_info_rev_id { 2845 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2846 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2847 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2848 }; 2849 2850 enum mlxsw_sp_eeprom_module_info_id { 2851 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2852 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2853 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2854 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2855 }; 2856 2857 enum mlxsw_sp_eeprom_module_info { 2858 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2859 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2860 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2861 }; 2862 2863 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2864 struct ethtool_modinfo *modinfo) 2865 { 2866 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2867 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2868 u8 module_rev_id, module_id; 2869 unsigned int read_size; 2870 int err; 2871 2872 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2873 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2874 module_info, &read_size); 2875 if (err) 2876 return err; 2877 2878 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2879 return -EIO; 2880 2881 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2882 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2883 2884 switch (module_id) { 2885 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2886 modinfo->type = ETH_MODULE_SFF_8436; 2887 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2888 break; 2889 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2890 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2891 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2892 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2893 modinfo->type = ETH_MODULE_SFF_8636; 2894 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2895 } else { 2896 modinfo->type = ETH_MODULE_SFF_8436; 2897 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2898 } 2899 break; 2900 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2901 modinfo->type = ETH_MODULE_SFF_8472; 2902 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2903 break; 2904 default: 2905 return -EINVAL; 2906 } 2907 2908 return 0; 2909 } 2910 2911 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2912 struct ethtool_eeprom *ee, 2913 u8 *data) 2914 { 2915 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2916 int offset = ee->offset; 2917 unsigned int read_size; 2918 int i = 0; 2919 int err; 2920 2921 if (!ee->len) 2922 return -EINVAL; 2923 2924 memset(data, 0, ee->len); 2925 2926 while (i < ee->len) { 2927 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2928 ee->len - i, data + i, 2929 &read_size); 2930 if (err) { 2931 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2932 return err; 2933 } 2934 2935 i += read_size; 2936 offset += read_size; 2937 } 2938 2939 return 0; 2940 } 2941 2942 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2943 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2944 .get_link = ethtool_op_get_link, 2945 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2946 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2947 .get_strings = mlxsw_sp_port_get_strings, 2948 .set_phys_id = mlxsw_sp_port_set_phys_id, 2949 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2950 .get_sset_count = mlxsw_sp_port_get_sset_count, 2951 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2952 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2953 .flash_device = mlxsw_sp_flash_device, 2954 .get_module_info = mlxsw_sp_get_module_info, 2955 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2956 }; 2957 2958 static int 2959 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2960 { 2961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2962 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2963 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2964 u32 eth_proto_admin; 2965 2966 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2967 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2968 eth_proto_admin); 2969 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2970 } 2971 2972 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2973 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2974 bool dwrr, u8 dwrr_weight) 2975 { 2976 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2977 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2978 2979 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2980 next_index); 2981 mlxsw_reg_qeec_de_set(qeec_pl, true); 2982 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2983 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2985 } 2986 2987 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2988 enum mlxsw_reg_qeec_hr hr, u8 index, 2989 u8 next_index, u32 maxrate) 2990 { 2991 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2992 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2993 2994 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2995 next_index); 2996 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2997 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2998 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2999 } 3000 3001 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3002 u8 switch_prio, u8 tclass) 3003 { 3004 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3005 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3006 3007 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3008 tclass); 3009 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3010 } 3011 3012 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3013 { 3014 int err, i; 3015 3016 /* Setup the elements hierarcy, so that each TC is linked to 3017 * one subgroup, which are all member in the same group. 3018 */ 3019 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3020 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3021 0); 3022 if (err) 3023 return err; 3024 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3025 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3026 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3027 0, false, 0); 3028 if (err) 3029 return err; 3030 } 3031 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3032 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3033 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3034 false, 0); 3035 if (err) 3036 return err; 3037 } 3038 3039 /* Make sure the max shaper is disabled in all hierarcies that 3040 * support it. 3041 */ 3042 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3043 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3044 MLXSW_REG_QEEC_MAS_DIS); 3045 if (err) 3046 return err; 3047 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3048 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3049 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3050 i, 0, 3051 MLXSW_REG_QEEC_MAS_DIS); 3052 if (err) 3053 return err; 3054 } 3055 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3056 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3057 MLXSW_REG_QEEC_HIERARCY_TC, 3058 i, i, 3059 MLXSW_REG_QEEC_MAS_DIS); 3060 if (err) 3061 return err; 3062 } 3063 3064 /* Map all priorities to traffic class 0. */ 3065 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3066 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3067 if (err) 3068 return err; 3069 } 3070 3071 return 0; 3072 } 3073 3074 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3075 bool split, u8 module, u8 width, u8 lane) 3076 { 3077 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3078 struct mlxsw_sp_port *mlxsw_sp_port; 3079 struct net_device *dev; 3080 int err; 3081 3082 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 3083 if (err) { 3084 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3085 local_port); 3086 return err; 3087 } 3088 3089 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3090 if (!dev) { 3091 err = -ENOMEM; 3092 goto err_alloc_etherdev; 3093 } 3094 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3095 mlxsw_sp_port = netdev_priv(dev); 3096 mlxsw_sp_port->dev = dev; 3097 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3098 mlxsw_sp_port->local_port = local_port; 3099 mlxsw_sp_port->pvid = 1; 3100 mlxsw_sp_port->split = split; 3101 mlxsw_sp_port->mapping.module = module; 3102 mlxsw_sp_port->mapping.width = width; 3103 mlxsw_sp_port->mapping.lane = lane; 3104 mlxsw_sp_port->link.autoneg = 1; 3105 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3106 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3107 3108 mlxsw_sp_port->pcpu_stats = 3109 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3110 if (!mlxsw_sp_port->pcpu_stats) { 3111 err = -ENOMEM; 3112 goto err_alloc_stats; 3113 } 3114 3115 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3116 GFP_KERNEL); 3117 if (!mlxsw_sp_port->sample) { 3118 err = -ENOMEM; 3119 goto err_alloc_sample; 3120 } 3121 3122 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3123 &update_stats_cache); 3124 3125 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3126 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3127 3128 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3129 if (err) { 3130 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3131 mlxsw_sp_port->local_port); 3132 goto err_port_module_map; 3133 } 3134 3135 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3136 if (err) { 3137 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3138 mlxsw_sp_port->local_port); 3139 goto err_port_swid_set; 3140 } 3141 3142 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3143 if (err) { 3144 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3145 mlxsw_sp_port->local_port); 3146 goto err_dev_addr_init; 3147 } 3148 3149 netif_carrier_off(dev); 3150 3151 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3152 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3153 dev->hw_features |= NETIF_F_HW_TC; 3154 3155 dev->min_mtu = 0; 3156 dev->max_mtu = ETH_MAX_MTU; 3157 3158 /* Each packet needs to have a Tx header (metadata) on top all other 3159 * headers. 3160 */ 3161 dev->needed_headroom = MLXSW_TXHDR_LEN; 3162 3163 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3164 if (err) { 3165 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3166 mlxsw_sp_port->local_port); 3167 goto err_port_system_port_mapping_set; 3168 } 3169 3170 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3171 if (err) { 3172 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3173 mlxsw_sp_port->local_port); 3174 goto err_port_speed_by_width_set; 3175 } 3176 3177 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3178 if (err) { 3179 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3180 mlxsw_sp_port->local_port); 3181 goto err_port_mtu_set; 3182 } 3183 3184 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3185 if (err) 3186 goto err_port_admin_status_set; 3187 3188 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3189 if (err) { 3190 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3191 mlxsw_sp_port->local_port); 3192 goto err_port_buffers_init; 3193 } 3194 3195 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3196 if (err) { 3197 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3198 mlxsw_sp_port->local_port); 3199 goto err_port_ets_init; 3200 } 3201 3202 /* ETS and buffers must be initialized before DCB. */ 3203 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3204 if (err) { 3205 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3206 mlxsw_sp_port->local_port); 3207 goto err_port_dcb_init; 3208 } 3209 3210 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3211 if (err) { 3212 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3213 mlxsw_sp_port->local_port); 3214 goto err_port_fids_init; 3215 } 3216 3217 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3218 if (err) { 3219 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3220 mlxsw_sp_port->local_port); 3221 goto err_port_qdiscs_init; 3222 } 3223 3224 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3225 if (IS_ERR(mlxsw_sp_port_vlan)) { 3226 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3227 mlxsw_sp_port->local_port); 3228 err = PTR_ERR(mlxsw_sp_port_vlan); 3229 goto err_port_vlan_get; 3230 } 3231 3232 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3233 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3234 err = register_netdev(dev); 3235 if (err) { 3236 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3237 mlxsw_sp_port->local_port); 3238 goto err_register_netdev; 3239 } 3240 3241 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3242 mlxsw_sp_port, dev, mlxsw_sp_port->split, 3243 module); 3244 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3245 return 0; 3246 3247 err_register_netdev: 3248 mlxsw_sp->ports[local_port] = NULL; 3249 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3250 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3251 err_port_vlan_get: 3252 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3253 err_port_qdiscs_init: 3254 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3255 err_port_fids_init: 3256 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3257 err_port_dcb_init: 3258 err_port_ets_init: 3259 err_port_buffers_init: 3260 err_port_admin_status_set: 3261 err_port_mtu_set: 3262 err_port_speed_by_width_set: 3263 err_port_system_port_mapping_set: 3264 err_dev_addr_init: 3265 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3266 err_port_swid_set: 3267 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3268 err_port_module_map: 3269 kfree(mlxsw_sp_port->sample); 3270 err_alloc_sample: 3271 free_percpu(mlxsw_sp_port->pcpu_stats); 3272 err_alloc_stats: 3273 free_netdev(dev); 3274 err_alloc_etherdev: 3275 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3276 return err; 3277 } 3278 3279 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3280 { 3281 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3282 3283 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3284 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3285 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3286 mlxsw_sp->ports[local_port] = NULL; 3287 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3288 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3289 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3290 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3291 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3292 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3293 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3294 kfree(mlxsw_sp_port->sample); 3295 free_percpu(mlxsw_sp_port->pcpu_stats); 3296 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3297 free_netdev(mlxsw_sp_port->dev); 3298 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3299 } 3300 3301 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3302 { 3303 return mlxsw_sp->ports[local_port] != NULL; 3304 } 3305 3306 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3307 { 3308 int i; 3309 3310 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3311 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3312 mlxsw_sp_port_remove(mlxsw_sp, i); 3313 kfree(mlxsw_sp->port_to_module); 3314 kfree(mlxsw_sp->ports); 3315 } 3316 3317 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3318 { 3319 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3320 u8 module, width, lane; 3321 size_t alloc_size; 3322 int i; 3323 int err; 3324 3325 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3326 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3327 if (!mlxsw_sp->ports) 3328 return -ENOMEM; 3329 3330 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3331 GFP_KERNEL); 3332 if (!mlxsw_sp->port_to_module) { 3333 err = -ENOMEM; 3334 goto err_port_to_module_alloc; 3335 } 3336 3337 for (i = 1; i < max_ports; i++) { 3338 /* Mark as invalid */ 3339 mlxsw_sp->port_to_module[i] = -1; 3340 3341 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3342 &width, &lane); 3343 if (err) 3344 goto err_port_module_info_get; 3345 if (!width) 3346 continue; 3347 mlxsw_sp->port_to_module[i] = module; 3348 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3349 module, width, lane); 3350 if (err) 3351 goto err_port_create; 3352 } 3353 return 0; 3354 3355 err_port_create: 3356 err_port_module_info_get: 3357 for (i--; i >= 1; i--) 3358 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3359 mlxsw_sp_port_remove(mlxsw_sp, i); 3360 kfree(mlxsw_sp->port_to_module); 3361 err_port_to_module_alloc: 3362 kfree(mlxsw_sp->ports); 3363 return err; 3364 } 3365 3366 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3367 { 3368 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3369 3370 return local_port - offset; 3371 } 3372 3373 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3374 u8 module, unsigned int count) 3375 { 3376 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3377 int err, i; 3378 3379 for (i = 0; i < count; i++) { 3380 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3381 module, width, i * width); 3382 if (err) 3383 goto err_port_create; 3384 } 3385 3386 return 0; 3387 3388 err_port_create: 3389 for (i--; i >= 0; i--) 3390 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3391 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3392 return err; 3393 } 3394 3395 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3396 u8 base_port, unsigned int count) 3397 { 3398 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3399 int i; 3400 3401 /* Split by four means we need to re-create two ports, otherwise 3402 * only one. 3403 */ 3404 count = count / 2; 3405 3406 for (i = 0; i < count; i++) { 3407 local_port = base_port + i * 2; 3408 if (mlxsw_sp->port_to_module[local_port] < 0) 3409 continue; 3410 module = mlxsw_sp->port_to_module[local_port]; 3411 3412 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3413 width, 0); 3414 } 3415 } 3416 3417 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3418 unsigned int count) 3419 { 3420 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3421 struct mlxsw_sp_port *mlxsw_sp_port; 3422 u8 module, cur_width, base_port; 3423 int i; 3424 int err; 3425 3426 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3427 if (!mlxsw_sp_port) { 3428 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3429 local_port); 3430 return -EINVAL; 3431 } 3432 3433 module = mlxsw_sp_port->mapping.module; 3434 cur_width = mlxsw_sp_port->mapping.width; 3435 3436 if (count != 2 && count != 4) { 3437 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3438 return -EINVAL; 3439 } 3440 3441 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3442 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3443 return -EINVAL; 3444 } 3445 3446 /* Make sure we have enough slave (even) ports for the split. */ 3447 if (count == 2) { 3448 base_port = local_port; 3449 if (mlxsw_sp->ports[base_port + 1]) { 3450 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3451 return -EINVAL; 3452 } 3453 } else { 3454 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3455 if (mlxsw_sp->ports[base_port + 1] || 3456 mlxsw_sp->ports[base_port + 3]) { 3457 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3458 return -EINVAL; 3459 } 3460 } 3461 3462 for (i = 0; i < count; i++) 3463 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3464 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3465 3466 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3467 if (err) { 3468 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3469 goto err_port_split_create; 3470 } 3471 3472 return 0; 3473 3474 err_port_split_create: 3475 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3476 return err; 3477 } 3478 3479 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3480 { 3481 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3482 struct mlxsw_sp_port *mlxsw_sp_port; 3483 u8 cur_width, base_port; 3484 unsigned int count; 3485 int i; 3486 3487 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3488 if (!mlxsw_sp_port) { 3489 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3490 local_port); 3491 return -EINVAL; 3492 } 3493 3494 if (!mlxsw_sp_port->split) { 3495 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3496 return -EINVAL; 3497 } 3498 3499 cur_width = mlxsw_sp_port->mapping.width; 3500 count = cur_width == 1 ? 4 : 2; 3501 3502 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3503 3504 /* Determine which ports to remove. */ 3505 if (count == 2 && local_port >= base_port + 2) 3506 base_port = base_port + 2; 3507 3508 for (i = 0; i < count; i++) 3509 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3510 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3511 3512 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3513 3514 return 0; 3515 } 3516 3517 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3518 char *pude_pl, void *priv) 3519 { 3520 struct mlxsw_sp *mlxsw_sp = priv; 3521 struct mlxsw_sp_port *mlxsw_sp_port; 3522 enum mlxsw_reg_pude_oper_status status; 3523 u8 local_port; 3524 3525 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3526 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3527 if (!mlxsw_sp_port) 3528 return; 3529 3530 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3531 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3532 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3533 netif_carrier_on(mlxsw_sp_port->dev); 3534 } else { 3535 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3536 netif_carrier_off(mlxsw_sp_port->dev); 3537 } 3538 } 3539 3540 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3541 u8 local_port, void *priv) 3542 { 3543 struct mlxsw_sp *mlxsw_sp = priv; 3544 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3545 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3546 3547 if (unlikely(!mlxsw_sp_port)) { 3548 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3549 local_port); 3550 return; 3551 } 3552 3553 skb->dev = mlxsw_sp_port->dev; 3554 3555 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3556 u64_stats_update_begin(&pcpu_stats->syncp); 3557 pcpu_stats->rx_packets++; 3558 pcpu_stats->rx_bytes += skb->len; 3559 u64_stats_update_end(&pcpu_stats->syncp); 3560 3561 skb->protocol = eth_type_trans(skb, skb->dev); 3562 netif_receive_skb(skb); 3563 } 3564 3565 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3566 void *priv) 3567 { 3568 skb->offload_fwd_mark = 1; 3569 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3570 } 3571 3572 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3573 u8 local_port, void *priv) 3574 { 3575 skb->offload_mr_fwd_mark = 1; 3576 skb->offload_fwd_mark = 1; 3577 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3578 } 3579 3580 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3581 void *priv) 3582 { 3583 struct mlxsw_sp *mlxsw_sp = priv; 3584 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3585 struct psample_group *psample_group; 3586 u32 size; 3587 3588 if (unlikely(!mlxsw_sp_port)) { 3589 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3590 local_port); 3591 goto out; 3592 } 3593 if (unlikely(!mlxsw_sp_port->sample)) { 3594 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3595 local_port); 3596 goto out; 3597 } 3598 3599 size = mlxsw_sp_port->sample->truncate ? 3600 mlxsw_sp_port->sample->trunc_size : skb->len; 3601 3602 rcu_read_lock(); 3603 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3604 if (!psample_group) 3605 goto out_unlock; 3606 psample_sample_packet(psample_group, skb, size, 3607 mlxsw_sp_port->dev->ifindex, 0, 3608 mlxsw_sp_port->sample->rate); 3609 out_unlock: 3610 rcu_read_unlock(); 3611 out: 3612 consume_skb(skb); 3613 } 3614 3615 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3616 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3617 _is_ctrl, SP_##_trap_group, DISCARD) 3618 3619 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3620 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3621 _is_ctrl, SP_##_trap_group, DISCARD) 3622 3623 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3624 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3625 _is_ctrl, SP_##_trap_group, DISCARD) 3626 3627 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3628 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3629 3630 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3631 /* Events */ 3632 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3633 /* L2 traps */ 3634 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3635 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3636 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3637 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3638 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3639 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3640 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3641 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3642 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3643 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3644 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3645 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3646 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3647 false), 3648 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3649 false), 3650 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3651 false), 3652 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3653 false), 3654 /* L3 traps */ 3655 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3656 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3657 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3658 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3659 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3660 false), 3661 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3662 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3663 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3664 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3665 false), 3666 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3667 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3668 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3669 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3670 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3671 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3672 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3673 false), 3674 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3675 false), 3676 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3677 false), 3678 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3679 false), 3680 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3681 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3682 false), 3683 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3684 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3685 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3686 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3687 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3688 /* PKT Sample trap */ 3689 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3690 false, SP_IP2ME, DISCARD), 3691 /* ACL trap */ 3692 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3693 /* Multicast Router Traps */ 3694 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3695 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3696 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3697 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3698 }; 3699 3700 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3701 { 3702 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3703 enum mlxsw_reg_qpcr_ir_units ir_units; 3704 int max_cpu_policers; 3705 bool is_bytes; 3706 u8 burst_size; 3707 u32 rate; 3708 int i, err; 3709 3710 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3711 return -EIO; 3712 3713 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3714 3715 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3716 for (i = 0; i < max_cpu_policers; i++) { 3717 is_bytes = false; 3718 switch (i) { 3719 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3720 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3721 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3722 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3723 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3724 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3725 rate = 128; 3726 burst_size = 7; 3727 break; 3728 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3729 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3730 rate = 16 * 1024; 3731 burst_size = 10; 3732 break; 3733 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3734 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3735 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3736 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3737 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3738 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3739 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3740 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3741 rate = 1024; 3742 burst_size = 7; 3743 break; 3744 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3745 is_bytes = true; 3746 rate = 4 * 1024; 3747 burst_size = 4; 3748 break; 3749 default: 3750 continue; 3751 } 3752 3753 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3754 burst_size); 3755 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3756 if (err) 3757 return err; 3758 } 3759 3760 return 0; 3761 } 3762 3763 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3764 { 3765 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3766 enum mlxsw_reg_htgt_trap_group i; 3767 int max_cpu_policers; 3768 int max_trap_groups; 3769 u8 priority, tc; 3770 u16 policer_id; 3771 int err; 3772 3773 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3774 return -EIO; 3775 3776 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3777 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3778 3779 for (i = 0; i < max_trap_groups; i++) { 3780 policer_id = i; 3781 switch (i) { 3782 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3783 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3784 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3785 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3786 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3787 priority = 5; 3788 tc = 5; 3789 break; 3790 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3791 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3792 priority = 4; 3793 tc = 4; 3794 break; 3795 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3796 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3797 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3798 priority = 3; 3799 tc = 3; 3800 break; 3801 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3802 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3803 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3804 priority = 2; 3805 tc = 2; 3806 break; 3807 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3808 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3809 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3810 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3811 priority = 1; 3812 tc = 1; 3813 break; 3814 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3815 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3816 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3817 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3818 break; 3819 default: 3820 continue; 3821 } 3822 3823 if (max_cpu_policers <= policer_id && 3824 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3825 return -EIO; 3826 3827 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3828 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3829 if (err) 3830 return err; 3831 } 3832 3833 return 0; 3834 } 3835 3836 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3837 { 3838 int i; 3839 int err; 3840 3841 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3842 if (err) 3843 return err; 3844 3845 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3846 if (err) 3847 return err; 3848 3849 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3850 err = mlxsw_core_trap_register(mlxsw_sp->core, 3851 &mlxsw_sp_listener[i], 3852 mlxsw_sp); 3853 if (err) 3854 goto err_listener_register; 3855 3856 } 3857 return 0; 3858 3859 err_listener_register: 3860 for (i--; i >= 0; i--) { 3861 mlxsw_core_trap_unregister(mlxsw_sp->core, 3862 &mlxsw_sp_listener[i], 3863 mlxsw_sp); 3864 } 3865 return err; 3866 } 3867 3868 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3869 { 3870 int i; 3871 3872 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3873 mlxsw_core_trap_unregister(mlxsw_sp->core, 3874 &mlxsw_sp_listener[i], 3875 mlxsw_sp); 3876 } 3877 } 3878 3879 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3880 { 3881 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3882 int err; 3883 3884 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3885 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3886 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3887 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3888 MLXSW_REG_SLCR_LAG_HASH_SIP | 3889 MLXSW_REG_SLCR_LAG_HASH_DIP | 3890 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3891 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3892 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3893 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3894 if (err) 3895 return err; 3896 3897 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3898 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3899 return -EIO; 3900 3901 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3902 sizeof(struct mlxsw_sp_upper), 3903 GFP_KERNEL); 3904 if (!mlxsw_sp->lags) 3905 return -ENOMEM; 3906 3907 return 0; 3908 } 3909 3910 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3911 { 3912 kfree(mlxsw_sp->lags); 3913 } 3914 3915 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3916 { 3917 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3918 3919 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3920 MLXSW_REG_HTGT_INVALID_POLICER, 3921 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3922 MLXSW_REG_HTGT_DEFAULT_TC); 3923 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3924 } 3925 3926 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3927 unsigned long event, void *ptr); 3928 3929 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3930 const struct mlxsw_bus_info *mlxsw_bus_info) 3931 { 3932 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3933 int err; 3934 3935 mlxsw_sp->core = mlxsw_core; 3936 mlxsw_sp->bus_info = mlxsw_bus_info; 3937 3938 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3939 if (err) { 3940 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3941 return err; 3942 } 3943 3944 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3945 if (err) { 3946 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3947 return err; 3948 } 3949 3950 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3951 if (err) { 3952 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3953 return err; 3954 } 3955 3956 err = mlxsw_sp_fids_init(mlxsw_sp); 3957 if (err) { 3958 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3959 goto err_fids_init; 3960 } 3961 3962 err = mlxsw_sp_traps_init(mlxsw_sp); 3963 if (err) { 3964 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3965 goto err_traps_init; 3966 } 3967 3968 err = mlxsw_sp_buffers_init(mlxsw_sp); 3969 if (err) { 3970 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3971 goto err_buffers_init; 3972 } 3973 3974 err = mlxsw_sp_lag_init(mlxsw_sp); 3975 if (err) { 3976 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3977 goto err_lag_init; 3978 } 3979 3980 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3981 if (err) { 3982 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3983 goto err_switchdev_init; 3984 } 3985 3986 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3987 if (err) { 3988 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3989 goto err_counter_pool_init; 3990 } 3991 3992 err = mlxsw_sp_afa_init(mlxsw_sp); 3993 if (err) { 3994 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3995 goto err_afa_init; 3996 } 3997 3998 err = mlxsw_sp_router_init(mlxsw_sp); 3999 if (err) { 4000 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4001 goto err_router_init; 4002 } 4003 4004 /* Initialize netdevice notifier after router is initialized, so that 4005 * the event handler can use router structures. 4006 */ 4007 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4008 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4009 if (err) { 4010 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4011 goto err_netdev_notifier; 4012 } 4013 4014 err = mlxsw_sp_span_init(mlxsw_sp); 4015 if (err) { 4016 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4017 goto err_span_init; 4018 } 4019 4020 err = mlxsw_sp_acl_init(mlxsw_sp); 4021 if (err) { 4022 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4023 goto err_acl_init; 4024 } 4025 4026 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4027 if (err) { 4028 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4029 goto err_dpipe_init; 4030 } 4031 4032 err = mlxsw_sp_ports_create(mlxsw_sp); 4033 if (err) { 4034 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4035 goto err_ports_create; 4036 } 4037 4038 return 0; 4039 4040 err_ports_create: 4041 mlxsw_sp_dpipe_fini(mlxsw_sp); 4042 err_dpipe_init: 4043 mlxsw_sp_acl_fini(mlxsw_sp); 4044 err_acl_init: 4045 mlxsw_sp_span_fini(mlxsw_sp); 4046 err_span_init: 4047 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4048 err_netdev_notifier: 4049 mlxsw_sp_router_fini(mlxsw_sp); 4050 err_router_init: 4051 mlxsw_sp_afa_fini(mlxsw_sp); 4052 err_afa_init: 4053 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4054 err_counter_pool_init: 4055 mlxsw_sp_switchdev_fini(mlxsw_sp); 4056 err_switchdev_init: 4057 mlxsw_sp_lag_fini(mlxsw_sp); 4058 err_lag_init: 4059 mlxsw_sp_buffers_fini(mlxsw_sp); 4060 err_buffers_init: 4061 mlxsw_sp_traps_fini(mlxsw_sp); 4062 err_traps_init: 4063 mlxsw_sp_fids_fini(mlxsw_sp); 4064 err_fids_init: 4065 mlxsw_sp_kvdl_fini(mlxsw_sp); 4066 return err; 4067 } 4068 4069 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4070 { 4071 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4072 4073 mlxsw_sp_ports_remove(mlxsw_sp); 4074 mlxsw_sp_dpipe_fini(mlxsw_sp); 4075 mlxsw_sp_acl_fini(mlxsw_sp); 4076 mlxsw_sp_span_fini(mlxsw_sp); 4077 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4078 mlxsw_sp_router_fini(mlxsw_sp); 4079 mlxsw_sp_afa_fini(mlxsw_sp); 4080 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4081 mlxsw_sp_switchdev_fini(mlxsw_sp); 4082 mlxsw_sp_lag_fini(mlxsw_sp); 4083 mlxsw_sp_buffers_fini(mlxsw_sp); 4084 mlxsw_sp_traps_fini(mlxsw_sp); 4085 mlxsw_sp_fids_fini(mlxsw_sp); 4086 mlxsw_sp_kvdl_fini(mlxsw_sp); 4087 } 4088 4089 static const struct mlxsw_config_profile mlxsw_sp_config_profile = { 4090 .used_max_vepa_channels = 1, 4091 .max_vepa_channels = 0, 4092 .used_max_mid = 1, 4093 .max_mid = MLXSW_SP_MID_MAX, 4094 .used_max_pgt = 1, 4095 .max_pgt = 0, 4096 .used_flood_tables = 1, 4097 .used_flood_mode = 1, 4098 .flood_mode = 3, 4099 .max_fid_offset_flood_tables = 3, 4100 .fid_offset_flood_table_size = VLAN_N_VID - 1, 4101 .max_fid_flood_tables = 3, 4102 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 4103 .used_max_ib_mc = 1, 4104 .max_ib_mc = 0, 4105 .used_max_pkey = 1, 4106 .max_pkey = 0, 4107 .used_kvd_split_data = 1, 4108 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 4109 .kvd_hash_single_parts = 59, 4110 .kvd_hash_double_parts = 41, 4111 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4112 .swid_config = { 4113 { 4114 .used_type = 1, 4115 .type = MLXSW_PORT_SWID_TYPE_ETH, 4116 } 4117 }, 4118 .resource_query_enable = 1, 4119 }; 4120 4121 static bool 4122 mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack, 4123 u64 size) 4124 { 4125 const struct mlxsw_config_profile *profile; 4126 4127 profile = &mlxsw_sp_config_profile; 4128 if (size % profile->kvd_hash_granularity) { 4129 NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity"); 4130 return false; 4131 } 4132 return true; 4133 } 4134 4135 static int 4136 mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size, 4137 struct netlink_ext_ack *extack) 4138 { 4139 NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed"); 4140 return -EINVAL; 4141 } 4142 4143 static int 4144 mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size, 4145 struct netlink_ext_ack *extack) 4146 { 4147 if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) 4148 return -EINVAL; 4149 4150 return 0; 4151 } 4152 4153 static int 4154 mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size, 4155 struct netlink_ext_ack *extack) 4156 { 4157 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4158 4159 if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) 4160 return -EINVAL; 4161 4162 if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) { 4163 NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum"); 4164 return -EINVAL; 4165 } 4166 return 0; 4167 } 4168 4169 static int 4170 mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size, 4171 struct netlink_ext_ack *extack) 4172 { 4173 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4174 4175 if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) 4176 return -EINVAL; 4177 4178 if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) { 4179 NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum"); 4180 return -EINVAL; 4181 } 4182 return 0; 4183 } 4184 4185 static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) 4186 { 4187 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4188 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4189 4190 return mlxsw_sp_kvdl_occ_get(mlxsw_sp); 4191 } 4192 4193 static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = { 4194 .size_validate = mlxsw_sp_resource_kvd_size_validate, 4195 }; 4196 4197 static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { 4198 .size_validate = mlxsw_sp_resource_kvd_linear_size_validate, 4199 .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, 4200 }; 4201 4202 static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = { 4203 .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate, 4204 }; 4205 4206 static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { 4207 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, 4208 }; 4209 4210 static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; 4211 static struct devlink_resource_size_params mlxsw_sp_linear_size_params; 4212 static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; 4213 static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; 4214 4215 static void 4216 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) 4217 { 4218 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4219 KVD_SINGLE_MIN_SIZE); 4220 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4221 KVD_DOUBLE_MIN_SIZE); 4222 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4223 u32 linear_size_min = 0; 4224 4225 /* KVD top resource */ 4226 mlxsw_sp_kvd_size_params.size_min = kvd_size; 4227 mlxsw_sp_kvd_size_params.size_max = kvd_size; 4228 mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4229 mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4230 4231 /* Linear part init */ 4232 mlxsw_sp_linear_size_params.size_min = linear_size_min; 4233 mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - 4234 double_size_min; 4235 mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4236 mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4237 4238 /* Hash double part init */ 4239 mlxsw_sp_hash_double_size_params.size_min = double_size_min; 4240 mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - 4241 linear_size_min; 4242 mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4243 mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4244 4245 /* Hash single part init */ 4246 mlxsw_sp_hash_single_size_params.size_min = single_size_min; 4247 mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - 4248 linear_size_min; 4249 mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4250 mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4251 } 4252 4253 static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) 4254 { 4255 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4256 u32 kvd_size, single_size, double_size, linear_size; 4257 const struct mlxsw_config_profile *profile; 4258 int err; 4259 4260 profile = &mlxsw_sp_config_profile; 4261 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4262 return -EIO; 4263 4264 mlxsw_sp_resource_size_params_prepare(mlxsw_core); 4265 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4267 true, kvd_size, 4268 MLXSW_SP_RESOURCE_KVD, 4269 DEVLINK_RESOURCE_ID_PARENT_TOP, 4270 &mlxsw_sp_kvd_size_params, 4271 &mlxsw_sp_resource_kvd_ops); 4272 if (err) 4273 return err; 4274 4275 linear_size = profile->kvd_linear_size; 4276 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4277 false, linear_size, 4278 MLXSW_SP_RESOURCE_KVD_LINEAR, 4279 MLXSW_SP_RESOURCE_KVD, 4280 &mlxsw_sp_linear_size_params, 4281 &mlxsw_sp_resource_kvd_linear_ops); 4282 if (err) 4283 return err; 4284 4285 double_size = kvd_size - linear_size; 4286 double_size *= profile->kvd_hash_double_parts; 4287 double_size /= profile->kvd_hash_double_parts + 4288 profile->kvd_hash_single_parts; 4289 double_size = rounddown(double_size, profile->kvd_hash_granularity); 4290 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4291 false, double_size, 4292 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4293 MLXSW_SP_RESOURCE_KVD, 4294 &mlxsw_sp_hash_double_size_params, 4295 &mlxsw_sp_resource_kvd_hash_double_ops); 4296 if (err) 4297 return err; 4298 4299 single_size = kvd_size - double_size - linear_size; 4300 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4301 false, single_size, 4302 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4303 MLXSW_SP_RESOURCE_KVD, 4304 &mlxsw_sp_hash_single_size_params, 4305 &mlxsw_sp_resource_kvd_hash_single_ops); 4306 if (err) 4307 return err; 4308 4309 return 0; 4310 } 4311 4312 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4313 const struct mlxsw_config_profile *profile, 4314 u64 *p_single_size, u64 *p_double_size, 4315 u64 *p_linear_size) 4316 { 4317 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4318 u32 double_size; 4319 int err; 4320 4321 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4322 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4323 !profile->used_kvd_split_data) 4324 return -EIO; 4325 4326 /* The hash part is what left of the kvd without the 4327 * linear part. It is split to the single size and 4328 * double size by the parts ratio from the profile. 4329 * Both sizes must be a multiplications of the 4330 * granularity from the profile. In case the user 4331 * provided the sizes they are obtained via devlink. 4332 */ 4333 err = devlink_resource_size_get(devlink, 4334 MLXSW_SP_RESOURCE_KVD_LINEAR, 4335 p_linear_size); 4336 if (err) 4337 *p_linear_size = profile->kvd_linear_size; 4338 4339 err = devlink_resource_size_get(devlink, 4340 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4341 p_double_size); 4342 if (err) { 4343 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4344 *p_linear_size; 4345 double_size *= profile->kvd_hash_double_parts; 4346 double_size /= profile->kvd_hash_double_parts + 4347 profile->kvd_hash_single_parts; 4348 *p_double_size = rounddown(double_size, 4349 profile->kvd_hash_granularity); 4350 } 4351 4352 err = devlink_resource_size_get(devlink, 4353 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4354 p_single_size); 4355 if (err) 4356 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4357 *p_double_size - *p_linear_size; 4358 4359 /* Check results are legal. */ 4360 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4361 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4362 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4363 return -EIO; 4364 4365 return 0; 4366 } 4367 4368 static struct mlxsw_driver mlxsw_sp_driver = { 4369 .kind = mlxsw_sp_driver_name, 4370 .priv_size = sizeof(struct mlxsw_sp), 4371 .init = mlxsw_sp_init, 4372 .fini = mlxsw_sp_fini, 4373 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4374 .port_split = mlxsw_sp_port_split, 4375 .port_unsplit = mlxsw_sp_port_unsplit, 4376 .sb_pool_get = mlxsw_sp_sb_pool_get, 4377 .sb_pool_set = mlxsw_sp_sb_pool_set, 4378 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4379 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4380 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4381 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4382 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4383 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4384 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4385 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4386 .txhdr_construct = mlxsw_sp_txhdr_construct, 4387 .resources_register = mlxsw_sp_resources_register, 4388 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4389 .txhdr_len = MLXSW_TXHDR_LEN, 4390 .profile = &mlxsw_sp_config_profile, 4391 }; 4392 4393 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4394 { 4395 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4396 } 4397 4398 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4399 { 4400 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4401 int ret = 0; 4402 4403 if (mlxsw_sp_port_dev_check(lower_dev)) { 4404 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4405 ret = 1; 4406 } 4407 4408 return ret; 4409 } 4410 4411 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4412 { 4413 struct mlxsw_sp_port *mlxsw_sp_port; 4414 4415 if (mlxsw_sp_port_dev_check(dev)) 4416 return netdev_priv(dev); 4417 4418 mlxsw_sp_port = NULL; 4419 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4420 4421 return mlxsw_sp_port; 4422 } 4423 4424 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4425 { 4426 struct mlxsw_sp_port *mlxsw_sp_port; 4427 4428 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4429 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4430 } 4431 4432 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4433 { 4434 struct mlxsw_sp_port *mlxsw_sp_port; 4435 4436 if (mlxsw_sp_port_dev_check(dev)) 4437 return netdev_priv(dev); 4438 4439 mlxsw_sp_port = NULL; 4440 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4441 &mlxsw_sp_port); 4442 4443 return mlxsw_sp_port; 4444 } 4445 4446 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4447 { 4448 struct mlxsw_sp_port *mlxsw_sp_port; 4449 4450 rcu_read_lock(); 4451 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4452 if (mlxsw_sp_port) 4453 dev_hold(mlxsw_sp_port->dev); 4454 rcu_read_unlock(); 4455 return mlxsw_sp_port; 4456 } 4457 4458 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4459 { 4460 dev_put(mlxsw_sp_port->dev); 4461 } 4462 4463 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4464 { 4465 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4466 4467 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4468 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4469 } 4470 4471 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4472 { 4473 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4474 4475 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4476 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4477 } 4478 4479 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4480 u16 lag_id, u8 port_index) 4481 { 4482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4483 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4484 4485 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4486 lag_id, port_index); 4487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4488 } 4489 4490 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4491 u16 lag_id) 4492 { 4493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4494 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4495 4496 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4497 lag_id); 4498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4499 } 4500 4501 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4502 u16 lag_id) 4503 { 4504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4505 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4506 4507 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4508 lag_id); 4509 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4510 } 4511 4512 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4513 u16 lag_id) 4514 { 4515 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4516 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4517 4518 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4519 lag_id); 4520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4521 } 4522 4523 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4524 struct net_device *lag_dev, 4525 u16 *p_lag_id) 4526 { 4527 struct mlxsw_sp_upper *lag; 4528 int free_lag_id = -1; 4529 u64 max_lag; 4530 int i; 4531 4532 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4533 for (i = 0; i < max_lag; i++) { 4534 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4535 if (lag->ref_count) { 4536 if (lag->dev == lag_dev) { 4537 *p_lag_id = i; 4538 return 0; 4539 } 4540 } else if (free_lag_id < 0) { 4541 free_lag_id = i; 4542 } 4543 } 4544 if (free_lag_id < 0) 4545 return -EBUSY; 4546 *p_lag_id = free_lag_id; 4547 return 0; 4548 } 4549 4550 static bool 4551 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4552 struct net_device *lag_dev, 4553 struct netdev_lag_upper_info *lag_upper_info, 4554 struct netlink_ext_ack *extack) 4555 { 4556 u16 lag_id; 4557 4558 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4559 NL_SET_ERR_MSG(extack, 4560 "spectrum: Exceeded number of supported LAG devices"); 4561 return false; 4562 } 4563 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4564 NL_SET_ERR_MSG(extack, 4565 "spectrum: LAG device using unsupported Tx type"); 4566 return false; 4567 } 4568 return true; 4569 } 4570 4571 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4572 u16 lag_id, u8 *p_port_index) 4573 { 4574 u64 max_lag_members; 4575 int i; 4576 4577 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4578 MAX_LAG_MEMBERS); 4579 for (i = 0; i < max_lag_members; i++) { 4580 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4581 *p_port_index = i; 4582 return 0; 4583 } 4584 } 4585 return -EBUSY; 4586 } 4587 4588 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4589 struct net_device *lag_dev) 4590 { 4591 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4592 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4593 struct mlxsw_sp_upper *lag; 4594 u16 lag_id; 4595 u8 port_index; 4596 int err; 4597 4598 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4599 if (err) 4600 return err; 4601 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4602 if (!lag->ref_count) { 4603 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4604 if (err) 4605 return err; 4606 lag->dev = lag_dev; 4607 } 4608 4609 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4610 if (err) 4611 return err; 4612 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4613 if (err) 4614 goto err_col_port_add; 4615 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4616 if (err) 4617 goto err_col_port_enable; 4618 4619 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4620 mlxsw_sp_port->local_port); 4621 mlxsw_sp_port->lag_id = lag_id; 4622 mlxsw_sp_port->lagged = 1; 4623 lag->ref_count++; 4624 4625 /* Port is no longer usable as a router interface */ 4626 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4627 if (mlxsw_sp_port_vlan->fid) 4628 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4629 4630 return 0; 4631 4632 err_col_port_enable: 4633 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4634 err_col_port_add: 4635 if (!lag->ref_count) 4636 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4637 return err; 4638 } 4639 4640 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4641 struct net_device *lag_dev) 4642 { 4643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4644 u16 lag_id = mlxsw_sp_port->lag_id; 4645 struct mlxsw_sp_upper *lag; 4646 4647 if (!mlxsw_sp_port->lagged) 4648 return; 4649 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4650 WARN_ON(lag->ref_count == 0); 4651 4652 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4653 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4654 4655 /* Any VLANs configured on the port are no longer valid */ 4656 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4657 4658 if (lag->ref_count == 1) 4659 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4660 4661 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4662 mlxsw_sp_port->local_port); 4663 mlxsw_sp_port->lagged = 0; 4664 lag->ref_count--; 4665 4666 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4667 /* Make sure untagged frames are allowed to ingress */ 4668 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4669 } 4670 4671 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4672 u16 lag_id) 4673 { 4674 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4675 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4676 4677 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4678 mlxsw_sp_port->local_port); 4679 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4680 } 4681 4682 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4683 u16 lag_id) 4684 { 4685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4686 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4687 4688 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4689 mlxsw_sp_port->local_port); 4690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4691 } 4692 4693 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4694 bool lag_tx_enabled) 4695 { 4696 if (lag_tx_enabled) 4697 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4698 mlxsw_sp_port->lag_id); 4699 else 4700 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4701 mlxsw_sp_port->lag_id); 4702 } 4703 4704 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4705 struct netdev_lag_lower_state_info *info) 4706 { 4707 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4708 } 4709 4710 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4711 bool enable) 4712 { 4713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4714 enum mlxsw_reg_spms_state spms_state; 4715 char *spms_pl; 4716 u16 vid; 4717 int err; 4718 4719 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4720 MLXSW_REG_SPMS_STATE_DISCARDING; 4721 4722 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4723 if (!spms_pl) 4724 return -ENOMEM; 4725 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4726 4727 for (vid = 0; vid < VLAN_N_VID; vid++) 4728 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4729 4730 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4731 kfree(spms_pl); 4732 return err; 4733 } 4734 4735 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4736 { 4737 u16 vid = 1; 4738 int err; 4739 4740 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4741 if (err) 4742 return err; 4743 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4744 if (err) 4745 goto err_port_stp_set; 4746 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4747 true, false); 4748 if (err) 4749 goto err_port_vlan_set; 4750 4751 for (; vid <= VLAN_N_VID - 1; vid++) { 4752 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4753 vid, false); 4754 if (err) 4755 goto err_vid_learning_set; 4756 } 4757 4758 return 0; 4759 4760 err_vid_learning_set: 4761 for (vid--; vid >= 1; vid--) 4762 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4763 err_port_vlan_set: 4764 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4765 err_port_stp_set: 4766 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4767 return err; 4768 } 4769 4770 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4771 { 4772 u16 vid; 4773 4774 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4775 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4776 vid, true); 4777 4778 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4779 false, false); 4780 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4781 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4782 } 4783 4784 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4785 struct net_device *dev, 4786 unsigned long event, void *ptr) 4787 { 4788 struct netdev_notifier_changeupper_info *info; 4789 struct mlxsw_sp_port *mlxsw_sp_port; 4790 struct netlink_ext_ack *extack; 4791 struct net_device *upper_dev; 4792 struct mlxsw_sp *mlxsw_sp; 4793 int err = 0; 4794 4795 mlxsw_sp_port = netdev_priv(dev); 4796 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4797 info = ptr; 4798 extack = netdev_notifier_info_to_extack(&info->info); 4799 4800 switch (event) { 4801 case NETDEV_PRECHANGEUPPER: 4802 upper_dev = info->upper_dev; 4803 if (!is_vlan_dev(upper_dev) && 4804 !netif_is_lag_master(upper_dev) && 4805 !netif_is_bridge_master(upper_dev) && 4806 !netif_is_ovs_master(upper_dev)) { 4807 NL_SET_ERR_MSG(extack, 4808 "spectrum: Unknown upper device type"); 4809 return -EINVAL; 4810 } 4811 if (!info->linking) 4812 break; 4813 if (netdev_has_any_upper_dev(upper_dev) && 4814 (!netif_is_bridge_master(upper_dev) || 4815 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4816 upper_dev))) { 4817 NL_SET_ERR_MSG(extack, 4818 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4819 return -EINVAL; 4820 } 4821 if (netif_is_lag_master(upper_dev) && 4822 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4823 info->upper_info, extack)) 4824 return -EINVAL; 4825 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4826 NL_SET_ERR_MSG(extack, 4827 "spectrum: Master device is a LAG master and this device has a VLAN"); 4828 return -EINVAL; 4829 } 4830 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4831 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4832 NL_SET_ERR_MSG(extack, 4833 "spectrum: Can not put a VLAN on a LAG port"); 4834 return -EINVAL; 4835 } 4836 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4837 NL_SET_ERR_MSG(extack, 4838 "spectrum: Master device is an OVS master and this device has a VLAN"); 4839 return -EINVAL; 4840 } 4841 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4842 NL_SET_ERR_MSG(extack, 4843 "spectrum: Can not put a VLAN on an OVS port"); 4844 return -EINVAL; 4845 } 4846 break; 4847 case NETDEV_CHANGEUPPER: 4848 upper_dev = info->upper_dev; 4849 if (netif_is_bridge_master(upper_dev)) { 4850 if (info->linking) 4851 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4852 lower_dev, 4853 upper_dev, 4854 extack); 4855 else 4856 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4857 lower_dev, 4858 upper_dev); 4859 } else if (netif_is_lag_master(upper_dev)) { 4860 if (info->linking) 4861 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4862 upper_dev); 4863 else 4864 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4865 upper_dev); 4866 } else if (netif_is_ovs_master(upper_dev)) { 4867 if (info->linking) 4868 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4869 else 4870 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4871 } 4872 break; 4873 } 4874 4875 return err; 4876 } 4877 4878 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4879 unsigned long event, void *ptr) 4880 { 4881 struct netdev_notifier_changelowerstate_info *info; 4882 struct mlxsw_sp_port *mlxsw_sp_port; 4883 int err; 4884 4885 mlxsw_sp_port = netdev_priv(dev); 4886 info = ptr; 4887 4888 switch (event) { 4889 case NETDEV_CHANGELOWERSTATE: 4890 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4891 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4892 info->lower_state_info); 4893 if (err) 4894 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4895 } 4896 break; 4897 } 4898 4899 return 0; 4900 } 4901 4902 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4903 struct net_device *port_dev, 4904 unsigned long event, void *ptr) 4905 { 4906 switch (event) { 4907 case NETDEV_PRECHANGEUPPER: 4908 case NETDEV_CHANGEUPPER: 4909 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4910 event, ptr); 4911 case NETDEV_CHANGELOWERSTATE: 4912 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4913 ptr); 4914 } 4915 4916 return 0; 4917 } 4918 4919 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4920 unsigned long event, void *ptr) 4921 { 4922 struct net_device *dev; 4923 struct list_head *iter; 4924 int ret; 4925 4926 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4927 if (mlxsw_sp_port_dev_check(dev)) { 4928 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4929 ptr); 4930 if (ret) 4931 return ret; 4932 } 4933 } 4934 4935 return 0; 4936 } 4937 4938 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4939 struct net_device *dev, 4940 unsigned long event, void *ptr, 4941 u16 vid) 4942 { 4943 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4944 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4945 struct netdev_notifier_changeupper_info *info = ptr; 4946 struct netlink_ext_ack *extack; 4947 struct net_device *upper_dev; 4948 int err = 0; 4949 4950 extack = netdev_notifier_info_to_extack(&info->info); 4951 4952 switch (event) { 4953 case NETDEV_PRECHANGEUPPER: 4954 upper_dev = info->upper_dev; 4955 if (!netif_is_bridge_master(upper_dev)) { 4956 NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); 4957 return -EINVAL; 4958 } 4959 if (!info->linking) 4960 break; 4961 if (netdev_has_any_upper_dev(upper_dev) && 4962 (!netif_is_bridge_master(upper_dev) || 4963 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4964 upper_dev))) { 4965 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4966 return -EINVAL; 4967 } 4968 break; 4969 case NETDEV_CHANGEUPPER: 4970 upper_dev = info->upper_dev; 4971 if (netif_is_bridge_master(upper_dev)) { 4972 if (info->linking) 4973 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4974 vlan_dev, 4975 upper_dev, 4976 extack); 4977 else 4978 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4979 vlan_dev, 4980 upper_dev); 4981 } else { 4982 err = -EINVAL; 4983 WARN_ON(1); 4984 } 4985 break; 4986 } 4987 4988 return err; 4989 } 4990 4991 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4992 struct net_device *lag_dev, 4993 unsigned long event, 4994 void *ptr, u16 vid) 4995 { 4996 struct net_device *dev; 4997 struct list_head *iter; 4998 int ret; 4999 5000 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5001 if (mlxsw_sp_port_dev_check(dev)) { 5002 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5003 event, ptr, 5004 vid); 5005 if (ret) 5006 return ret; 5007 } 5008 } 5009 5010 return 0; 5011 } 5012 5013 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 5014 unsigned long event, void *ptr) 5015 { 5016 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5017 u16 vid = vlan_dev_vlan_id(vlan_dev); 5018 5019 if (mlxsw_sp_port_dev_check(real_dev)) 5020 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5021 event, ptr, vid); 5022 else if (netif_is_lag_master(real_dev)) 5023 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5024 real_dev, event, 5025 ptr, vid); 5026 5027 return 0; 5028 } 5029 5030 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 5031 { 5032 struct netdev_notifier_changeupper_info *info = ptr; 5033 5034 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 5035 return false; 5036 return netif_is_l3_master(info->upper_dev); 5037 } 5038 5039 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5040 unsigned long event, void *ptr) 5041 { 5042 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5043 struct mlxsw_sp *mlxsw_sp; 5044 int err = 0; 5045 5046 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5047 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 5048 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 5049 event, ptr); 5050 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 5051 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 5052 event, ptr); 5053 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 5054 err = mlxsw_sp_netdevice_router_port_event(dev); 5055 else if (mlxsw_sp_is_vrf_event(event, ptr)) 5056 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 5057 else if (mlxsw_sp_port_dev_check(dev)) 5058 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5059 else if (netif_is_lag_master(dev)) 5060 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5061 else if (is_vlan_dev(dev)) 5062 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5063 5064 return notifier_from_errno(err); 5065 } 5066 5067 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5068 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5069 }; 5070 5071 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 5072 .notifier_call = mlxsw_sp_inetaddr_event, 5073 }; 5074 5075 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5076 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5077 }; 5078 5079 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 5080 .notifier_call = mlxsw_sp_inet6addr_event, 5081 }; 5082 5083 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 5084 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5085 {0, }, 5086 }; 5087 5088 static struct pci_driver mlxsw_sp_pci_driver = { 5089 .name = mlxsw_sp_driver_name, 5090 .id_table = mlxsw_sp_pci_id_table, 5091 }; 5092 5093 static int __init mlxsw_sp_module_init(void) 5094 { 5095 int err; 5096 5097 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5098 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5099 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5100 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5101 5102 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 5103 if (err) 5104 goto err_core_driver_register; 5105 5106 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 5107 if (err) 5108 goto err_pci_driver_register; 5109 5110 return 0; 5111 5112 err_pci_driver_register: 5113 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 5114 err_core_driver_register: 5115 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5116 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5117 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5118 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5119 return err; 5120 } 5121 5122 static void __exit mlxsw_sp_module_exit(void) 5123 { 5124 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 5125 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 5126 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5127 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5128 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5129 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5130 } 5131 5132 module_init(mlxsw_sp_module_init); 5133 module_exit(mlxsw_sp_module_exit); 5134 5135 MODULE_LICENSE("Dual BSD/GPL"); 5136 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5137 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5138 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 5139 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 5140