1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/random.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "../mlxfw/mlxfw.h" 45 46 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 1910 50 #define MLXSW_SP1_FWREV_SUBMINOR 622 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 66 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 67 static const char mlxsw_sp_driver_version[] = "1.0"; 68 69 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 70 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 71 }; 72 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 73 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 74 }; 75 76 /* tx_hdr_version 77 * Tx header version. 78 * Must be set to 1. 79 */ 80 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 81 82 /* tx_hdr_ctl 83 * Packet control type. 84 * 0 - Ethernet control (e.g. EMADs, LACP) 85 * 1 - Ethernet data 86 */ 87 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 88 89 /* tx_hdr_proto 90 * Packet protocol type. Must be set to 1 (Ethernet). 91 */ 92 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 93 94 /* tx_hdr_rx_is_router 95 * Packet is sent from the router. Valid for data packets only. 96 */ 97 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 98 99 /* tx_hdr_fid_valid 100 * Indicates if the 'fid' field is valid and should be used for 101 * forwarding lookup. Valid for data packets only. 102 */ 103 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 104 105 /* tx_hdr_swid 106 * Switch partition ID. Must be set to 0. 107 */ 108 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 109 110 /* tx_hdr_control_tclass 111 * Indicates if the packet should use the control TClass and not one 112 * of the data TClasses. 113 */ 114 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 115 116 /* tx_hdr_etclass 117 * Egress TClass to be used on the egress device on the egress port. 118 */ 119 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 120 121 /* tx_hdr_port_mid 122 * Destination local port for unicast packets. 123 * Destination multicast ID for multicast packets. 124 * 125 * Control packets are directed to a specific egress port, while data 126 * packets are transmitted through the CPU port (0) into the switch partition, 127 * where forwarding rules are applied. 128 */ 129 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 130 131 /* tx_hdr_fid 132 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 133 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 134 * Valid for data packets only. 135 */ 136 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 137 138 /* tx_hdr_type 139 * 0 - Data packets 140 * 6 - Control packets 141 */ 142 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 143 144 struct mlxsw_sp_mlxfw_dev { 145 struct mlxfw_dev mlxfw_dev; 146 struct mlxsw_sp *mlxsw_sp; 147 }; 148 149 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 150 u16 component_index, u32 *p_max_size, 151 u8 *p_align_bits, u16 *p_max_write_size) 152 { 153 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 154 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 156 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 157 int err; 158 159 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 160 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 161 if (err) 162 return err; 163 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 164 p_max_write_size); 165 166 *p_align_bits = max_t(u8, *p_align_bits, 2); 167 *p_max_write_size = min_t(u16, *p_max_write_size, 168 MLXSW_REG_MCDA_MAX_DATA_LEN); 169 return 0; 170 } 171 172 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 173 { 174 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 175 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 177 char mcc_pl[MLXSW_REG_MCC_LEN]; 178 u8 control_state; 179 int err; 180 181 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 182 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 183 if (err) 184 return err; 185 186 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 187 if (control_state != MLXFW_FSM_STATE_IDLE) 188 return -EBUSY; 189 190 mlxsw_reg_mcc_pack(mcc_pl, 191 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 192 0, *fwhandle, 0); 193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 194 } 195 196 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 197 u32 fwhandle, u16 component_index, 198 u32 component_size) 199 { 200 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 201 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 202 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 203 char mcc_pl[MLXSW_REG_MCC_LEN]; 204 205 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 206 component_index, fwhandle, component_size); 207 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 208 } 209 210 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 211 u32 fwhandle, u8 *data, u16 size, 212 u32 offset) 213 { 214 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 215 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 217 char mcda_pl[MLXSW_REG_MCDA_LEN]; 218 219 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 220 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 221 } 222 223 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 224 u32 fwhandle, u16 component_index) 225 { 226 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 227 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 228 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 229 char mcc_pl[MLXSW_REG_MCC_LEN]; 230 231 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 232 component_index, fwhandle, 0); 233 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 234 } 235 236 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 237 { 238 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 239 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 241 char mcc_pl[MLXSW_REG_MCC_LEN]; 242 243 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 244 fwhandle, 0); 245 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 246 } 247 248 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 249 enum mlxfw_fsm_state *fsm_state, 250 enum mlxfw_fsm_state_err *fsm_state_err) 251 { 252 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 253 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 255 char mcc_pl[MLXSW_REG_MCC_LEN]; 256 u8 control_state; 257 u8 error_code; 258 int err; 259 260 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 262 if (err) 263 return err; 264 265 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 266 *fsm_state = control_state; 267 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 268 MLXFW_FSM_STATE_ERR_MAX); 269 return 0; 270 } 271 272 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 273 { 274 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 275 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 277 char mcc_pl[MLXSW_REG_MCC_LEN]; 278 279 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 280 fwhandle, 0); 281 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 282 } 283 284 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 285 { 286 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 287 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 289 char mcc_pl[MLXSW_REG_MCC_LEN]; 290 291 mlxsw_reg_mcc_pack(mcc_pl, 292 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 293 fwhandle, 0); 294 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 295 } 296 297 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 298 .component_query = mlxsw_sp_component_query, 299 .fsm_lock = mlxsw_sp_fsm_lock, 300 .fsm_component_update = mlxsw_sp_fsm_component_update, 301 .fsm_block_download = mlxsw_sp_fsm_block_download, 302 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 303 .fsm_activate = mlxsw_sp_fsm_activate, 304 .fsm_query_state = mlxsw_sp_fsm_query_state, 305 .fsm_cancel = mlxsw_sp_fsm_cancel, 306 .fsm_release = mlxsw_sp_fsm_release 307 }; 308 309 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 310 const struct firmware *firmware) 311 { 312 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 313 .mlxfw_dev = { 314 .ops = &mlxsw_sp_mlxfw_dev_ops, 315 .psid = mlxsw_sp->bus_info->psid, 316 .psid_size = strlen(mlxsw_sp->bus_info->psid), 317 }, 318 .mlxsw_sp = mlxsw_sp 319 }; 320 int err; 321 322 mlxsw_core_fw_flash_start(mlxsw_sp->core); 323 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 324 mlxsw_core_fw_flash_end(mlxsw_sp->core); 325 326 return err; 327 } 328 329 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 330 { 331 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 332 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 333 const char *fw_filename = mlxsw_sp->fw_filename; 334 union devlink_param_value value; 335 const struct firmware *firmware; 336 int err; 337 338 /* Don't check if driver does not require it */ 339 if (!req_rev || !fw_filename) 340 return 0; 341 342 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 343 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 344 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 345 &value); 346 if (err) 347 return err; 348 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 349 return 0; 350 351 /* Validate driver & FW are compatible */ 352 if (rev->major != req_rev->major) { 353 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 354 rev->major, req_rev->major); 355 return -EINVAL; 356 } 357 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 358 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 359 (rev->minor > req_rev->minor || 360 (rev->minor == req_rev->minor && 361 rev->subminor >= req_rev->subminor))) 362 return 0; 363 364 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 365 rev->major, rev->minor, rev->subminor); 366 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 367 fw_filename); 368 369 err = request_firmware_direct(&firmware, fw_filename, 370 mlxsw_sp->bus_info->dev); 371 if (err) { 372 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 373 fw_filename); 374 return err; 375 } 376 377 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 378 release_firmware(firmware); 379 if (err) 380 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 381 382 /* On FW flash success, tell the caller FW reset is needed 383 * if current FW supports it. 384 */ 385 if (rev->minor >= req_rev->can_reset_minor) 386 return err ? err : -EAGAIN; 387 else 388 return 0; 389 } 390 391 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 392 unsigned int counter_index, u64 *packets, 393 u64 *bytes) 394 { 395 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 396 int err; 397 398 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 399 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 400 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 401 if (err) 402 return err; 403 if (packets) 404 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 405 if (bytes) 406 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 407 return 0; 408 } 409 410 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 411 unsigned int counter_index) 412 { 413 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 414 415 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 416 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 417 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 418 } 419 420 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 421 unsigned int *p_counter_index) 422 { 423 int err; 424 425 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 426 p_counter_index); 427 if (err) 428 return err; 429 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 430 if (err) 431 goto err_counter_clear; 432 return 0; 433 434 err_counter_clear: 435 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 436 *p_counter_index); 437 return err; 438 } 439 440 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 441 unsigned int counter_index) 442 { 443 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 444 counter_index); 445 } 446 447 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 448 const struct mlxsw_tx_info *tx_info) 449 { 450 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 451 452 memset(txhdr, 0, MLXSW_TXHDR_LEN); 453 454 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 455 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 456 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 457 mlxsw_tx_hdr_swid_set(txhdr, 0); 458 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 459 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 460 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 461 } 462 463 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 464 { 465 switch (state) { 466 case BR_STATE_FORWARDING: 467 return MLXSW_REG_SPMS_STATE_FORWARDING; 468 case BR_STATE_LEARNING: 469 return MLXSW_REG_SPMS_STATE_LEARNING; 470 case BR_STATE_LISTENING: /* fall-through */ 471 case BR_STATE_DISABLED: /* fall-through */ 472 case BR_STATE_BLOCKING: 473 return MLXSW_REG_SPMS_STATE_DISCARDING; 474 default: 475 BUG(); 476 } 477 } 478 479 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 480 u8 state) 481 { 482 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 484 char *spms_pl; 485 int err; 486 487 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 488 if (!spms_pl) 489 return -ENOMEM; 490 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 491 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 492 493 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 494 kfree(spms_pl); 495 return err; 496 } 497 498 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 499 { 500 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 501 int err; 502 503 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 504 if (err) 505 return err; 506 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 507 return 0; 508 } 509 510 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 511 bool enable, u32 rate) 512 { 513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 514 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 515 516 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 517 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 518 } 519 520 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 521 bool is_up) 522 { 523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 524 char paos_pl[MLXSW_REG_PAOS_LEN]; 525 526 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 527 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 528 MLXSW_PORT_ADMIN_STATUS_DOWN); 529 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 530 } 531 532 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 533 unsigned char *addr) 534 { 535 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 536 char ppad_pl[MLXSW_REG_PPAD_LEN]; 537 538 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 539 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 541 } 542 543 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 544 { 545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 546 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 547 548 ether_addr_copy(addr, mlxsw_sp->base_mac); 549 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 550 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 551 } 552 553 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 554 { 555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 556 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 557 int max_mtu; 558 int err; 559 560 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 561 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 562 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 563 if (err) 564 return err; 565 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 566 567 if (mtu > max_mtu) 568 return -EINVAL; 569 570 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 572 } 573 574 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 575 { 576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 577 char pspa_pl[MLXSW_REG_PSPA_LEN]; 578 579 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 581 } 582 583 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 584 { 585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 586 char svpe_pl[MLXSW_REG_SVPE_LEN]; 587 588 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 589 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 590 } 591 592 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 593 bool learn_enable) 594 { 595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 596 char *spvmlr_pl; 597 int err; 598 599 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 600 if (!spvmlr_pl) 601 return -ENOMEM; 602 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 603 learn_enable); 604 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 605 kfree(spvmlr_pl); 606 return err; 607 } 608 609 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 610 u16 vid) 611 { 612 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 613 char spvid_pl[MLXSW_REG_SPVID_LEN]; 614 615 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 616 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 617 } 618 619 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 620 bool allow) 621 { 622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 623 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 624 625 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 627 } 628 629 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 630 { 631 int err; 632 633 if (!vid) { 634 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 635 if (err) 636 return err; 637 } else { 638 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 639 if (err) 640 return err; 641 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 642 if (err) 643 goto err_port_allow_untagged_set; 644 } 645 646 mlxsw_sp_port->pvid = vid; 647 return 0; 648 649 err_port_allow_untagged_set: 650 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 651 return err; 652 } 653 654 static int 655 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 656 { 657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 658 char sspr_pl[MLXSW_REG_SSPR_LEN]; 659 660 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 662 } 663 664 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 665 u8 local_port, u8 *p_module, 666 u8 *p_width, u8 *p_lane) 667 { 668 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 669 int err; 670 671 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 672 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 673 if (err) 674 return err; 675 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 676 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 677 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 678 return 0; 679 } 680 681 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 682 u8 module, u8 width, u8 lane) 683 { 684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 685 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 686 int i; 687 688 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 689 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 690 for (i = 0; i < width; i++) { 691 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 692 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 693 } 694 695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 696 } 697 698 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 702 703 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 704 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 705 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 706 } 707 708 static int mlxsw_sp_port_open(struct net_device *dev) 709 { 710 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 711 int err; 712 713 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 714 if (err) 715 return err; 716 netif_start_queue(dev); 717 return 0; 718 } 719 720 static int mlxsw_sp_port_stop(struct net_device *dev) 721 { 722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 723 724 netif_stop_queue(dev); 725 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 726 } 727 728 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 729 struct net_device *dev) 730 { 731 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 733 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 734 const struct mlxsw_tx_info tx_info = { 735 .local_port = mlxsw_sp_port->local_port, 736 .is_emad = false, 737 }; 738 u64 len; 739 int err; 740 741 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 742 return NETDEV_TX_BUSY; 743 744 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 745 struct sk_buff *skb_orig = skb; 746 747 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 748 if (!skb) { 749 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 750 dev_kfree_skb_any(skb_orig); 751 return NETDEV_TX_OK; 752 } 753 dev_consume_skb_any(skb_orig); 754 } 755 756 if (eth_skb_pad(skb)) { 757 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 758 return NETDEV_TX_OK; 759 } 760 761 mlxsw_sp_txhdr_construct(skb, &tx_info); 762 /* TX header is consumed by HW on the way so we shouldn't count its 763 * bytes as being sent. 764 */ 765 len = skb->len - MLXSW_TXHDR_LEN; 766 767 /* Due to a race we might fail here because of a full queue. In that 768 * unlikely case we simply drop the packet. 769 */ 770 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 771 772 if (!err) { 773 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 774 u64_stats_update_begin(&pcpu_stats->syncp); 775 pcpu_stats->tx_packets++; 776 pcpu_stats->tx_bytes += len; 777 u64_stats_update_end(&pcpu_stats->syncp); 778 } else { 779 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 780 dev_kfree_skb_any(skb); 781 } 782 return NETDEV_TX_OK; 783 } 784 785 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 786 { 787 } 788 789 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 struct sockaddr *addr = p; 793 int err; 794 795 if (!is_valid_ether_addr(addr->sa_data)) 796 return -EADDRNOTAVAIL; 797 798 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 799 if (err) 800 return err; 801 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 802 return 0; 803 } 804 805 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 806 int mtu) 807 { 808 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 809 } 810 811 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 812 813 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 814 u16 delay) 815 { 816 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 817 BITS_PER_BYTE)); 818 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 819 mtu); 820 } 821 822 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 823 * Assumes 100m cable and maximum MTU. 824 */ 825 #define MLXSW_SP_PAUSE_DELAY 58752 826 827 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 828 u16 delay, bool pfc, bool pause) 829 { 830 if (pfc) 831 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 832 else if (pause) 833 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 834 else 835 return 0; 836 } 837 838 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 839 bool lossy) 840 { 841 if (lossy) 842 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 843 else 844 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 845 thres); 846 } 847 848 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 849 u8 *prio_tc, bool pause_en, 850 struct ieee_pfc *my_pfc) 851 { 852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 853 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 854 u16 delay = !!my_pfc ? my_pfc->delay : 0; 855 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 856 u32 taken_headroom_cells = 0; 857 u32 max_headroom_cells; 858 int i, j, err; 859 860 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 861 862 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 863 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 864 if (err) 865 return err; 866 867 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 868 bool configure = false; 869 bool pfc = false; 870 u16 thres_cells; 871 u16 delay_cells; 872 u16 total_cells; 873 bool lossy; 874 875 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 876 if (prio_tc[j] == i) { 877 pfc = pfc_en & BIT(j); 878 configure = true; 879 break; 880 } 881 } 882 883 if (!configure) 884 continue; 885 886 lossy = !(pfc || pause_en); 887 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 888 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 889 pfc, pause_en); 890 total_cells = thres_cells + delay_cells; 891 892 taken_headroom_cells += total_cells; 893 if (taken_headroom_cells > max_headroom_cells) 894 return -ENOBUFS; 895 896 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 897 thres_cells, lossy); 898 } 899 900 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 901 } 902 903 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 904 int mtu, bool pause_en) 905 { 906 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 907 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 908 struct ieee_pfc *my_pfc; 909 u8 *prio_tc; 910 911 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 912 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 913 914 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 915 pause_en, my_pfc); 916 } 917 918 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 919 { 920 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 921 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 922 int err; 923 924 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 925 if (err) 926 return err; 927 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 928 if (err) 929 goto err_span_port_mtu_update; 930 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 931 if (err) 932 goto err_port_mtu_set; 933 dev->mtu = mtu; 934 return 0; 935 936 err_port_mtu_set: 937 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 938 err_span_port_mtu_update: 939 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 940 return err; 941 } 942 943 static int 944 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 945 struct rtnl_link_stats64 *stats) 946 { 947 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 948 struct mlxsw_sp_port_pcpu_stats *p; 949 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 950 u32 tx_dropped = 0; 951 unsigned int start; 952 int i; 953 954 for_each_possible_cpu(i) { 955 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 956 do { 957 start = u64_stats_fetch_begin_irq(&p->syncp); 958 rx_packets = p->rx_packets; 959 rx_bytes = p->rx_bytes; 960 tx_packets = p->tx_packets; 961 tx_bytes = p->tx_bytes; 962 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 963 964 stats->rx_packets += rx_packets; 965 stats->rx_bytes += rx_bytes; 966 stats->tx_packets += tx_packets; 967 stats->tx_bytes += tx_bytes; 968 /* tx_dropped is u32, updated without syncp protection. */ 969 tx_dropped += p->tx_dropped; 970 } 971 stats->tx_dropped = tx_dropped; 972 return 0; 973 } 974 975 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 976 { 977 switch (attr_id) { 978 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 979 return true; 980 } 981 982 return false; 983 } 984 985 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 986 void *sp) 987 { 988 switch (attr_id) { 989 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 990 return mlxsw_sp_port_get_sw_stats64(dev, sp); 991 } 992 993 return -EINVAL; 994 } 995 996 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 997 int prio, char *ppcnt_pl) 998 { 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1001 1002 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1003 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1004 } 1005 1006 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1007 struct rtnl_link_stats64 *stats) 1008 { 1009 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1010 int err; 1011 1012 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1013 0, ppcnt_pl); 1014 if (err) 1015 goto out; 1016 1017 stats->tx_packets = 1018 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1019 stats->rx_packets = 1020 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1021 stats->tx_bytes = 1022 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1023 stats->rx_bytes = 1024 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1025 stats->multicast = 1026 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1027 1028 stats->rx_crc_errors = 1029 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1030 stats->rx_frame_errors = 1031 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1032 1033 stats->rx_length_errors = ( 1034 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1035 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1036 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1037 1038 stats->rx_errors = (stats->rx_crc_errors + 1039 stats->rx_frame_errors + stats->rx_length_errors); 1040 1041 out: 1042 return err; 1043 } 1044 1045 static void 1046 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1047 struct mlxsw_sp_port_xstats *xstats) 1048 { 1049 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1050 int err, i; 1051 1052 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1053 ppcnt_pl); 1054 if (!err) 1055 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1056 1057 for (i = 0; i < TC_MAX_QUEUE; i++) { 1058 err = mlxsw_sp_port_get_stats_raw(dev, 1059 MLXSW_REG_PPCNT_TC_CONG_TC, 1060 i, ppcnt_pl); 1061 if (!err) 1062 xstats->wred_drop[i] = 1063 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1064 1065 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1066 i, ppcnt_pl); 1067 if (err) 1068 continue; 1069 1070 xstats->backlog[i] = 1071 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1072 xstats->tail_drop[i] = 1073 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1074 } 1075 1076 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1077 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1078 i, ppcnt_pl); 1079 if (err) 1080 continue; 1081 1082 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1083 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1084 } 1085 } 1086 1087 static void update_stats_cache(struct work_struct *work) 1088 { 1089 struct mlxsw_sp_port *mlxsw_sp_port = 1090 container_of(work, struct mlxsw_sp_port, 1091 periodic_hw_stats.update_dw.work); 1092 1093 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1094 goto out; 1095 1096 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1097 &mlxsw_sp_port->periodic_hw_stats.stats); 1098 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1099 &mlxsw_sp_port->periodic_hw_stats.xstats); 1100 1101 out: 1102 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1103 MLXSW_HW_STATS_UPDATE_TIME); 1104 } 1105 1106 /* Return the stats from a cache that is updated periodically, 1107 * as this function might get called in an atomic context. 1108 */ 1109 static void 1110 mlxsw_sp_port_get_stats64(struct net_device *dev, 1111 struct rtnl_link_stats64 *stats) 1112 { 1113 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1114 1115 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1116 } 1117 1118 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1119 u16 vid_begin, u16 vid_end, 1120 bool is_member, bool untagged) 1121 { 1122 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1123 char *spvm_pl; 1124 int err; 1125 1126 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1127 if (!spvm_pl) 1128 return -ENOMEM; 1129 1130 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1131 vid_end, is_member, untagged); 1132 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1133 kfree(spvm_pl); 1134 return err; 1135 } 1136 1137 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1138 u16 vid_end, bool is_member, bool untagged) 1139 { 1140 u16 vid, vid_e; 1141 int err; 1142 1143 for (vid = vid_begin; vid <= vid_end; 1144 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1145 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1146 vid_end); 1147 1148 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1149 is_member, untagged); 1150 if (err) 1151 return err; 1152 } 1153 1154 return 0; 1155 } 1156 1157 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1158 bool flush_default) 1159 { 1160 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1161 1162 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1163 &mlxsw_sp_port->vlans_list, list) { 1164 if (!flush_default && 1165 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1166 continue; 1167 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1168 } 1169 } 1170 1171 static void 1172 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1173 { 1174 if (mlxsw_sp_port_vlan->bridge_port) 1175 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1176 else if (mlxsw_sp_port_vlan->fid) 1177 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1178 } 1179 1180 struct mlxsw_sp_port_vlan * 1181 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1182 { 1183 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1184 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1185 int err; 1186 1187 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1188 if (mlxsw_sp_port_vlan) 1189 return ERR_PTR(-EEXIST); 1190 1191 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1192 if (err) 1193 return ERR_PTR(err); 1194 1195 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1196 if (!mlxsw_sp_port_vlan) { 1197 err = -ENOMEM; 1198 goto err_port_vlan_alloc; 1199 } 1200 1201 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1202 mlxsw_sp_port_vlan->vid = vid; 1203 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1204 1205 return mlxsw_sp_port_vlan; 1206 1207 err_port_vlan_alloc: 1208 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1209 return ERR_PTR(err); 1210 } 1211 1212 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1213 { 1214 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1215 u16 vid = mlxsw_sp_port_vlan->vid; 1216 1217 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1218 list_del(&mlxsw_sp_port_vlan->list); 1219 kfree(mlxsw_sp_port_vlan); 1220 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1221 } 1222 1223 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1224 __be16 __always_unused proto, u16 vid) 1225 { 1226 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1227 1228 /* VLAN 0 is added to HW filter when device goes up, but it is 1229 * reserved in our case, so simply return. 1230 */ 1231 if (!vid) 1232 return 0; 1233 1234 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1235 } 1236 1237 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1238 __be16 __always_unused proto, u16 vid) 1239 { 1240 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1241 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1242 1243 /* VLAN 0 is removed from HW filter when device goes down, but 1244 * it is reserved in our case, so simply return. 1245 */ 1246 if (!vid) 1247 return 0; 1248 1249 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1250 if (!mlxsw_sp_port_vlan) 1251 return 0; 1252 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1253 1254 return 0; 1255 } 1256 1257 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1258 size_t len) 1259 { 1260 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1261 1262 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, 1263 mlxsw_sp_port->local_port, 1264 name, len); 1265 } 1266 1267 static struct mlxsw_sp_port_mall_tc_entry * 1268 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1269 unsigned long cookie) { 1270 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1271 1272 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1273 if (mall_tc_entry->cookie == cookie) 1274 return mall_tc_entry; 1275 1276 return NULL; 1277 } 1278 1279 static int 1280 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1281 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1282 const struct tc_action *a, 1283 bool ingress) 1284 { 1285 enum mlxsw_sp_span_type span_type; 1286 struct net_device *to_dev; 1287 1288 to_dev = tcf_mirred_dev(a); 1289 if (!to_dev) { 1290 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1291 return -EINVAL; 1292 } 1293 1294 mirror->ingress = ingress; 1295 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1296 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1297 true, &mirror->span_id); 1298 } 1299 1300 static void 1301 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1302 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1303 { 1304 enum mlxsw_sp_span_type span_type; 1305 1306 span_type = mirror->ingress ? 1307 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1308 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1309 span_type, true); 1310 } 1311 1312 static int 1313 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1314 struct tc_cls_matchall_offload *cls, 1315 const struct tc_action *a, 1316 bool ingress) 1317 { 1318 int err; 1319 1320 if (!mlxsw_sp_port->sample) 1321 return -EOPNOTSUPP; 1322 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1323 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1324 return -EEXIST; 1325 } 1326 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1327 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1328 return -EOPNOTSUPP; 1329 } 1330 1331 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1332 tcf_sample_psample_group(a)); 1333 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1334 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1335 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1336 1337 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1338 if (err) 1339 goto err_port_sample_set; 1340 return 0; 1341 1342 err_port_sample_set: 1343 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1344 return err; 1345 } 1346 1347 static void 1348 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1349 { 1350 if (!mlxsw_sp_port->sample) 1351 return; 1352 1353 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1354 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1355 } 1356 1357 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1358 struct tc_cls_matchall_offload *f, 1359 bool ingress) 1360 { 1361 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1362 __be16 protocol = f->common.protocol; 1363 const struct tc_action *a; 1364 int err; 1365 1366 if (!tcf_exts_has_one_action(f->exts)) { 1367 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1368 return -EOPNOTSUPP; 1369 } 1370 1371 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1372 if (!mall_tc_entry) 1373 return -ENOMEM; 1374 mall_tc_entry->cookie = f->cookie; 1375 1376 a = tcf_exts_first_action(f->exts); 1377 1378 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1379 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1380 1381 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1382 mirror = &mall_tc_entry->mirror; 1383 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1384 mirror, a, ingress); 1385 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1386 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1387 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1388 a, ingress); 1389 } else { 1390 err = -EOPNOTSUPP; 1391 } 1392 1393 if (err) 1394 goto err_add_action; 1395 1396 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1397 return 0; 1398 1399 err_add_action: 1400 kfree(mall_tc_entry); 1401 return err; 1402 } 1403 1404 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1405 struct tc_cls_matchall_offload *f) 1406 { 1407 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1408 1409 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1410 f->cookie); 1411 if (!mall_tc_entry) { 1412 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1413 return; 1414 } 1415 list_del(&mall_tc_entry->list); 1416 1417 switch (mall_tc_entry->type) { 1418 case MLXSW_SP_PORT_MALL_MIRROR: 1419 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1420 &mall_tc_entry->mirror); 1421 break; 1422 case MLXSW_SP_PORT_MALL_SAMPLE: 1423 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1424 break; 1425 default: 1426 WARN_ON(1); 1427 } 1428 1429 kfree(mall_tc_entry); 1430 } 1431 1432 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1433 struct tc_cls_matchall_offload *f, 1434 bool ingress) 1435 { 1436 switch (f->command) { 1437 case TC_CLSMATCHALL_REPLACE: 1438 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1439 ingress); 1440 case TC_CLSMATCHALL_DESTROY: 1441 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1442 return 0; 1443 default: 1444 return -EOPNOTSUPP; 1445 } 1446 } 1447 1448 static int 1449 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1450 struct tc_cls_flower_offload *f) 1451 { 1452 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1453 1454 switch (f->command) { 1455 case TC_CLSFLOWER_REPLACE: 1456 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1457 case TC_CLSFLOWER_DESTROY: 1458 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1459 return 0; 1460 case TC_CLSFLOWER_STATS: 1461 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1462 case TC_CLSFLOWER_TMPLT_CREATE: 1463 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1464 case TC_CLSFLOWER_TMPLT_DESTROY: 1465 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1466 return 0; 1467 default: 1468 return -EOPNOTSUPP; 1469 } 1470 } 1471 1472 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1473 void *type_data, 1474 void *cb_priv, bool ingress) 1475 { 1476 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1477 1478 switch (type) { 1479 case TC_SETUP_CLSMATCHALL: 1480 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1481 type_data)) 1482 return -EOPNOTSUPP; 1483 1484 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1485 ingress); 1486 case TC_SETUP_CLSFLOWER: 1487 return 0; 1488 default: 1489 return -EOPNOTSUPP; 1490 } 1491 } 1492 1493 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1494 void *type_data, 1495 void *cb_priv) 1496 { 1497 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1498 cb_priv, true); 1499 } 1500 1501 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1502 void *type_data, 1503 void *cb_priv) 1504 { 1505 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1506 cb_priv, false); 1507 } 1508 1509 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1510 void *type_data, void *cb_priv) 1511 { 1512 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1513 1514 switch (type) { 1515 case TC_SETUP_CLSMATCHALL: 1516 return 0; 1517 case TC_SETUP_CLSFLOWER: 1518 if (mlxsw_sp_acl_block_disabled(acl_block)) 1519 return -EOPNOTSUPP; 1520 1521 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1522 default: 1523 return -EOPNOTSUPP; 1524 } 1525 } 1526 1527 static int 1528 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1529 struct tcf_block *block, bool ingress, 1530 struct netlink_ext_ack *extack) 1531 { 1532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1533 struct mlxsw_sp_acl_block *acl_block; 1534 struct tcf_block_cb *block_cb; 1535 int err; 1536 1537 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1538 mlxsw_sp); 1539 if (!block_cb) { 1540 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1541 if (!acl_block) 1542 return -ENOMEM; 1543 block_cb = __tcf_block_cb_register(block, 1544 mlxsw_sp_setup_tc_block_cb_flower, 1545 mlxsw_sp, acl_block, extack); 1546 if (IS_ERR(block_cb)) { 1547 err = PTR_ERR(block_cb); 1548 goto err_cb_register; 1549 } 1550 } else { 1551 acl_block = tcf_block_cb_priv(block_cb); 1552 } 1553 tcf_block_cb_incref(block_cb); 1554 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1555 mlxsw_sp_port, ingress); 1556 if (err) 1557 goto err_block_bind; 1558 1559 if (ingress) 1560 mlxsw_sp_port->ing_acl_block = acl_block; 1561 else 1562 mlxsw_sp_port->eg_acl_block = acl_block; 1563 1564 return 0; 1565 1566 err_block_bind: 1567 if (!tcf_block_cb_decref(block_cb)) { 1568 __tcf_block_cb_unregister(block, block_cb); 1569 err_cb_register: 1570 mlxsw_sp_acl_block_destroy(acl_block); 1571 } 1572 return err; 1573 } 1574 1575 static void 1576 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1577 struct tcf_block *block, bool ingress) 1578 { 1579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1580 struct mlxsw_sp_acl_block *acl_block; 1581 struct tcf_block_cb *block_cb; 1582 int err; 1583 1584 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1585 mlxsw_sp); 1586 if (!block_cb) 1587 return; 1588 1589 if (ingress) 1590 mlxsw_sp_port->ing_acl_block = NULL; 1591 else 1592 mlxsw_sp_port->eg_acl_block = NULL; 1593 1594 acl_block = tcf_block_cb_priv(block_cb); 1595 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1596 mlxsw_sp_port, ingress); 1597 if (!err && !tcf_block_cb_decref(block_cb)) { 1598 __tcf_block_cb_unregister(block, block_cb); 1599 mlxsw_sp_acl_block_destroy(acl_block); 1600 } 1601 } 1602 1603 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1604 struct tc_block_offload *f) 1605 { 1606 tc_setup_cb_t *cb; 1607 bool ingress; 1608 int err; 1609 1610 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1611 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1612 ingress = true; 1613 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1614 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1615 ingress = false; 1616 } else { 1617 return -EOPNOTSUPP; 1618 } 1619 1620 switch (f->command) { 1621 case TC_BLOCK_BIND: 1622 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1623 mlxsw_sp_port, f->extack); 1624 if (err) 1625 return err; 1626 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1627 f->block, ingress, 1628 f->extack); 1629 if (err) { 1630 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1631 return err; 1632 } 1633 return 0; 1634 case TC_BLOCK_UNBIND: 1635 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1636 f->block, ingress); 1637 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1638 return 0; 1639 default: 1640 return -EOPNOTSUPP; 1641 } 1642 } 1643 1644 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1645 void *type_data) 1646 { 1647 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1648 1649 switch (type) { 1650 case TC_SETUP_BLOCK: 1651 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1652 case TC_SETUP_QDISC_RED: 1653 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1654 case TC_SETUP_QDISC_PRIO: 1655 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1656 default: 1657 return -EOPNOTSUPP; 1658 } 1659 } 1660 1661 1662 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1663 { 1664 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1665 1666 if (!enable) { 1667 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1668 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1669 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1670 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1671 return -EINVAL; 1672 } 1673 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1674 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1675 } else { 1676 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1677 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1678 } 1679 return 0; 1680 } 1681 1682 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1683 1684 static int mlxsw_sp_handle_feature(struct net_device *dev, 1685 netdev_features_t wanted_features, 1686 netdev_features_t feature, 1687 mlxsw_sp_feature_handler feature_handler) 1688 { 1689 netdev_features_t changes = wanted_features ^ dev->features; 1690 bool enable = !!(wanted_features & feature); 1691 int err; 1692 1693 if (!(changes & feature)) 1694 return 0; 1695 1696 err = feature_handler(dev, enable); 1697 if (err) { 1698 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1699 enable ? "Enable" : "Disable", &feature, err); 1700 return err; 1701 } 1702 1703 if (enable) 1704 dev->features |= feature; 1705 else 1706 dev->features &= ~feature; 1707 1708 return 0; 1709 } 1710 static int mlxsw_sp_set_features(struct net_device *dev, 1711 netdev_features_t features) 1712 { 1713 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1714 mlxsw_sp_feature_hw_tc); 1715 } 1716 1717 static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev, 1718 struct netdev_phys_item_id *ppid) 1719 { 1720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1721 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1722 1723 ppid->id_len = sizeof(mlxsw_sp->base_mac); 1724 memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len); 1725 1726 return 0; 1727 } 1728 1729 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1730 .ndo_open = mlxsw_sp_port_open, 1731 .ndo_stop = mlxsw_sp_port_stop, 1732 .ndo_start_xmit = mlxsw_sp_port_xmit, 1733 .ndo_setup_tc = mlxsw_sp_setup_tc, 1734 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1735 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1736 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1737 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1738 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1739 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1740 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1741 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1742 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1743 .ndo_set_features = mlxsw_sp_set_features, 1744 .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id, 1745 }; 1746 1747 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1748 struct ethtool_drvinfo *drvinfo) 1749 { 1750 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1752 1753 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1754 sizeof(drvinfo->driver)); 1755 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1756 sizeof(drvinfo->version)); 1757 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1758 "%d.%d.%d", 1759 mlxsw_sp->bus_info->fw_rev.major, 1760 mlxsw_sp->bus_info->fw_rev.minor, 1761 mlxsw_sp->bus_info->fw_rev.subminor); 1762 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1763 sizeof(drvinfo->bus_info)); 1764 } 1765 1766 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1767 struct ethtool_pauseparam *pause) 1768 { 1769 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1770 1771 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1772 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1773 } 1774 1775 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1776 struct ethtool_pauseparam *pause) 1777 { 1778 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1779 1780 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1781 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1782 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1783 1784 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1785 pfcc_pl); 1786 } 1787 1788 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1789 struct ethtool_pauseparam *pause) 1790 { 1791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1792 bool pause_en = pause->tx_pause || pause->rx_pause; 1793 int err; 1794 1795 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1796 netdev_err(dev, "PFC already enabled on port\n"); 1797 return -EINVAL; 1798 } 1799 1800 if (pause->autoneg) { 1801 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1802 return -EINVAL; 1803 } 1804 1805 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1806 if (err) { 1807 netdev_err(dev, "Failed to configure port's headroom\n"); 1808 return err; 1809 } 1810 1811 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1812 if (err) { 1813 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1814 goto err_port_pause_configure; 1815 } 1816 1817 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1818 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1819 1820 return 0; 1821 1822 err_port_pause_configure: 1823 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1824 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1825 return err; 1826 } 1827 1828 struct mlxsw_sp_port_hw_stats { 1829 char str[ETH_GSTRING_LEN]; 1830 u64 (*getter)(const char *payload); 1831 bool cells_bytes; 1832 }; 1833 1834 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1835 { 1836 .str = "a_frames_transmitted_ok", 1837 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1838 }, 1839 { 1840 .str = "a_frames_received_ok", 1841 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1842 }, 1843 { 1844 .str = "a_frame_check_sequence_errors", 1845 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1846 }, 1847 { 1848 .str = "a_alignment_errors", 1849 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1850 }, 1851 { 1852 .str = "a_octets_transmitted_ok", 1853 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1854 }, 1855 { 1856 .str = "a_octets_received_ok", 1857 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1858 }, 1859 { 1860 .str = "a_multicast_frames_xmitted_ok", 1861 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1862 }, 1863 { 1864 .str = "a_broadcast_frames_xmitted_ok", 1865 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1866 }, 1867 { 1868 .str = "a_multicast_frames_received_ok", 1869 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1870 }, 1871 { 1872 .str = "a_broadcast_frames_received_ok", 1873 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1874 }, 1875 { 1876 .str = "a_in_range_length_errors", 1877 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1878 }, 1879 { 1880 .str = "a_out_of_range_length_field", 1881 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1882 }, 1883 { 1884 .str = "a_frame_too_long_errors", 1885 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1886 }, 1887 { 1888 .str = "a_symbol_error_during_carrier", 1889 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1890 }, 1891 { 1892 .str = "a_mac_control_frames_transmitted", 1893 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1894 }, 1895 { 1896 .str = "a_mac_control_frames_received", 1897 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1898 }, 1899 { 1900 .str = "a_unsupported_opcodes_received", 1901 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1902 }, 1903 { 1904 .str = "a_pause_mac_ctrl_frames_received", 1905 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1906 }, 1907 { 1908 .str = "a_pause_mac_ctrl_frames_xmitted", 1909 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1910 }, 1911 }; 1912 1913 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1914 1915 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1916 { 1917 .str = "if_in_discards", 1918 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1919 }, 1920 { 1921 .str = "if_out_discards", 1922 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1923 }, 1924 { 1925 .str = "if_out_errors", 1926 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1927 }, 1928 }; 1929 1930 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1931 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1932 1933 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1934 { 1935 .str = "ether_stats_undersize_pkts", 1936 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1937 }, 1938 { 1939 .str = "ether_stats_oversize_pkts", 1940 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1941 }, 1942 { 1943 .str = "ether_stats_fragments", 1944 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 1945 }, 1946 { 1947 .str = "ether_pkts64octets", 1948 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1949 }, 1950 { 1951 .str = "ether_pkts65to127octets", 1952 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1953 }, 1954 { 1955 .str = "ether_pkts128to255octets", 1956 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1957 }, 1958 { 1959 .str = "ether_pkts256to511octets", 1960 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1961 }, 1962 { 1963 .str = "ether_pkts512to1023octets", 1964 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1965 }, 1966 { 1967 .str = "ether_pkts1024to1518octets", 1968 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1969 }, 1970 { 1971 .str = "ether_pkts1519to2047octets", 1972 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1973 }, 1974 { 1975 .str = "ether_pkts2048to4095octets", 1976 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1977 }, 1978 { 1979 .str = "ether_pkts4096to8191octets", 1980 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1981 }, 1982 { 1983 .str = "ether_pkts8192to10239octets", 1984 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1985 }, 1986 }; 1987 1988 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1989 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1990 1991 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 1992 { 1993 .str = "dot3stats_fcs_errors", 1994 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 1995 }, 1996 { 1997 .str = "dot3stats_symbol_errors", 1998 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 1999 }, 2000 { 2001 .str = "dot3control_in_unknown_opcodes", 2002 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2003 }, 2004 { 2005 .str = "dot3in_pause_frames", 2006 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2007 }, 2008 }; 2009 2010 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2011 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2012 2013 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2014 { 2015 .str = "discard_ingress_general", 2016 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2017 }, 2018 { 2019 .str = "discard_ingress_policy_engine", 2020 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2021 }, 2022 { 2023 .str = "discard_ingress_vlan_membership", 2024 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2025 }, 2026 { 2027 .str = "discard_ingress_tag_frame_type", 2028 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2029 }, 2030 { 2031 .str = "discard_egress_vlan_membership", 2032 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2033 }, 2034 { 2035 .str = "discard_loopback_filter", 2036 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2037 }, 2038 { 2039 .str = "discard_egress_general", 2040 .getter = mlxsw_reg_ppcnt_egress_general_get, 2041 }, 2042 { 2043 .str = "discard_egress_hoq", 2044 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2045 }, 2046 { 2047 .str = "discard_egress_policy_engine", 2048 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2049 }, 2050 { 2051 .str = "discard_ingress_tx_link_down", 2052 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2053 }, 2054 { 2055 .str = "discard_egress_stp_filter", 2056 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2057 }, 2058 { 2059 .str = "discard_egress_sll", 2060 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2061 }, 2062 }; 2063 2064 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2065 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2066 2067 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2068 { 2069 .str = "rx_octets_prio", 2070 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2071 }, 2072 { 2073 .str = "rx_frames_prio", 2074 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2075 }, 2076 { 2077 .str = "tx_octets_prio", 2078 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2079 }, 2080 { 2081 .str = "tx_frames_prio", 2082 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2083 }, 2084 { 2085 .str = "rx_pause_prio", 2086 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2087 }, 2088 { 2089 .str = "rx_pause_duration_prio", 2090 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2091 }, 2092 { 2093 .str = "tx_pause_prio", 2094 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2095 }, 2096 { 2097 .str = "tx_pause_duration_prio", 2098 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2099 }, 2100 }; 2101 2102 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2103 2104 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2105 { 2106 .str = "tc_transmit_queue_tc", 2107 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2108 .cells_bytes = true, 2109 }, 2110 { 2111 .str = "tc_no_buffer_discard_uc_tc", 2112 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2113 }, 2114 }; 2115 2116 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2117 2118 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2119 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2120 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2121 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2122 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2123 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2124 IEEE_8021QAZ_MAX_TCS) + \ 2125 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2126 TC_MAX_QUEUE)) 2127 2128 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2129 { 2130 int i; 2131 2132 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2133 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2134 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2135 *p += ETH_GSTRING_LEN; 2136 } 2137 } 2138 2139 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2140 { 2141 int i; 2142 2143 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2144 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2145 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2146 *p += ETH_GSTRING_LEN; 2147 } 2148 } 2149 2150 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2151 u32 stringset, u8 *data) 2152 { 2153 u8 *p = data; 2154 int i; 2155 2156 switch (stringset) { 2157 case ETH_SS_STATS: 2158 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2159 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2160 ETH_GSTRING_LEN); 2161 p += ETH_GSTRING_LEN; 2162 } 2163 2164 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2165 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2166 ETH_GSTRING_LEN); 2167 p += ETH_GSTRING_LEN; 2168 } 2169 2170 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2171 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2172 ETH_GSTRING_LEN); 2173 p += ETH_GSTRING_LEN; 2174 } 2175 2176 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2177 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2178 ETH_GSTRING_LEN); 2179 p += ETH_GSTRING_LEN; 2180 } 2181 2182 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2183 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2184 ETH_GSTRING_LEN); 2185 p += ETH_GSTRING_LEN; 2186 } 2187 2188 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2189 mlxsw_sp_port_get_prio_strings(&p, i); 2190 2191 for (i = 0; i < TC_MAX_QUEUE; i++) 2192 mlxsw_sp_port_get_tc_strings(&p, i); 2193 2194 break; 2195 } 2196 } 2197 2198 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2199 enum ethtool_phys_id_state state) 2200 { 2201 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2202 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2203 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2204 bool active; 2205 2206 switch (state) { 2207 case ETHTOOL_ID_ACTIVE: 2208 active = true; 2209 break; 2210 case ETHTOOL_ID_INACTIVE: 2211 active = false; 2212 break; 2213 default: 2214 return -EOPNOTSUPP; 2215 } 2216 2217 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2218 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2219 } 2220 2221 static int 2222 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2223 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2224 { 2225 switch (grp) { 2226 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2227 *p_hw_stats = mlxsw_sp_port_hw_stats; 2228 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2229 break; 2230 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2231 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2232 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2233 break; 2234 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2235 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2236 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2237 break; 2238 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2239 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2240 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2241 break; 2242 case MLXSW_REG_PPCNT_DISCARD_CNT: 2243 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2244 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2245 break; 2246 case MLXSW_REG_PPCNT_PRIO_CNT: 2247 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2248 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2249 break; 2250 case MLXSW_REG_PPCNT_TC_CNT: 2251 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2252 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2253 break; 2254 default: 2255 WARN_ON(1); 2256 return -EOPNOTSUPP; 2257 } 2258 return 0; 2259 } 2260 2261 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2262 enum mlxsw_reg_ppcnt_grp grp, int prio, 2263 u64 *data, int data_index) 2264 { 2265 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2267 struct mlxsw_sp_port_hw_stats *hw_stats; 2268 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2269 int i, len; 2270 int err; 2271 2272 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2273 if (err) 2274 return; 2275 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2276 for (i = 0; i < len; i++) { 2277 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2278 if (!hw_stats[i].cells_bytes) 2279 continue; 2280 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2281 data[data_index + i]); 2282 } 2283 } 2284 2285 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2286 struct ethtool_stats *stats, u64 *data) 2287 { 2288 int i, data_index = 0; 2289 2290 /* IEEE 802.3 Counters */ 2291 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2292 data, data_index); 2293 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2294 2295 /* RFC 2863 Counters */ 2296 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2297 data, data_index); 2298 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2299 2300 /* RFC 2819 Counters */ 2301 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2302 data, data_index); 2303 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2304 2305 /* RFC 3635 Counters */ 2306 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2307 data, data_index); 2308 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2309 2310 /* Discard Counters */ 2311 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2312 data, data_index); 2313 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2314 2315 /* Per-Priority Counters */ 2316 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2317 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2318 data, data_index); 2319 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2320 } 2321 2322 /* Per-TC Counters */ 2323 for (i = 0; i < TC_MAX_QUEUE; i++) { 2324 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2325 data, data_index); 2326 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2327 } 2328 } 2329 2330 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2331 { 2332 switch (sset) { 2333 case ETH_SS_STATS: 2334 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2335 default: 2336 return -EOPNOTSUPP; 2337 } 2338 } 2339 2340 struct mlxsw_sp1_port_link_mode { 2341 enum ethtool_link_mode_bit_indices mask_ethtool; 2342 u32 mask; 2343 u32 speed; 2344 }; 2345 2346 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2347 { 2348 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2349 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2350 .speed = SPEED_100, 2351 }, 2352 { 2353 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2354 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2355 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2356 .speed = SPEED_1000, 2357 }, 2358 { 2359 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2360 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2361 .speed = SPEED_10000, 2362 }, 2363 { 2364 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2365 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2366 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2367 .speed = SPEED_10000, 2368 }, 2369 { 2370 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2371 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2372 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2373 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2374 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2375 .speed = SPEED_10000, 2376 }, 2377 { 2378 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2379 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2380 .speed = SPEED_20000, 2381 }, 2382 { 2383 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2384 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2385 .speed = SPEED_40000, 2386 }, 2387 { 2388 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2389 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2390 .speed = SPEED_40000, 2391 }, 2392 { 2393 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2394 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2395 .speed = SPEED_40000, 2396 }, 2397 { 2398 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2399 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2400 .speed = SPEED_40000, 2401 }, 2402 { 2403 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2404 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2405 .speed = SPEED_25000, 2406 }, 2407 { 2408 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2409 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2410 .speed = SPEED_25000, 2411 }, 2412 { 2413 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2414 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2415 .speed = SPEED_25000, 2416 }, 2417 { 2418 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2419 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2420 .speed = SPEED_50000, 2421 }, 2422 { 2423 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2424 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2425 .speed = SPEED_50000, 2426 }, 2427 { 2428 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2429 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2430 .speed = SPEED_50000, 2431 }, 2432 { 2433 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2434 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2435 .speed = SPEED_56000, 2436 }, 2437 { 2438 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2439 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2440 .speed = SPEED_56000, 2441 }, 2442 { 2443 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2444 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2445 .speed = SPEED_56000, 2446 }, 2447 { 2448 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2449 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2450 .speed = SPEED_56000, 2451 }, 2452 { 2453 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2454 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2455 .speed = SPEED_100000, 2456 }, 2457 { 2458 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2459 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2460 .speed = SPEED_100000, 2461 }, 2462 { 2463 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2464 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2465 .speed = SPEED_100000, 2466 }, 2467 { 2468 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2469 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2470 .speed = SPEED_100000, 2471 }, 2472 }; 2473 2474 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2475 2476 static void 2477 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2478 u32 ptys_eth_proto, 2479 struct ethtool_link_ksettings *cmd) 2480 { 2481 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2482 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2483 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2484 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2485 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2486 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2487 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2488 2489 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2490 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2491 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2492 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2493 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2494 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2495 } 2496 2497 static void 2498 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2499 unsigned long *mode) 2500 { 2501 int i; 2502 2503 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2504 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2505 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2506 mode); 2507 } 2508 } 2509 2510 static void 2511 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2512 u32 ptys_eth_proto, 2513 struct ethtool_link_ksettings *cmd) 2514 { 2515 u32 speed = SPEED_UNKNOWN; 2516 u8 duplex = DUPLEX_UNKNOWN; 2517 int i; 2518 2519 if (!carrier_ok) 2520 goto out; 2521 2522 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2523 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) { 2524 speed = mlxsw_sp1_port_link_mode[i].speed; 2525 duplex = DUPLEX_FULL; 2526 break; 2527 } 2528 } 2529 out: 2530 cmd->base.speed = speed; 2531 cmd->base.duplex = duplex; 2532 } 2533 2534 static u32 2535 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2536 const struct ethtool_link_ksettings *cmd) 2537 { 2538 u32 ptys_proto = 0; 2539 int i; 2540 2541 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2542 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2543 cmd->link_modes.advertising)) 2544 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2545 } 2546 return ptys_proto; 2547 } 2548 2549 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2550 { 2551 u32 ptys_proto = 0; 2552 int i; 2553 2554 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2555 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2556 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2557 } 2558 return ptys_proto; 2559 } 2560 2561 static u32 2562 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2563 { 2564 u32 ptys_proto = 0; 2565 int i; 2566 2567 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2568 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2569 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2570 } 2571 return ptys_proto; 2572 } 2573 2574 static int 2575 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2576 u32 *base_speed) 2577 { 2578 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2579 return 0; 2580 } 2581 2582 static void 2583 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2584 u8 local_port, u32 proto_admin, bool autoneg) 2585 { 2586 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2587 } 2588 2589 static void 2590 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2591 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2592 u32 *p_eth_proto_oper) 2593 { 2594 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2595 p_eth_proto_oper); 2596 } 2597 2598 static const struct mlxsw_sp_port_type_speed_ops 2599 mlxsw_sp1_port_type_speed_ops = { 2600 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2601 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2602 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2603 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2604 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2605 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2606 .port_speed_base = mlxsw_sp1_port_speed_base, 2607 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2608 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2609 }; 2610 2611 static const enum ethtool_link_mode_bit_indices 2612 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2613 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2614 }; 2615 2616 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2617 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2618 2619 static const enum ethtool_link_mode_bit_indices 2620 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2621 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2622 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2623 }; 2624 2625 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2626 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2627 2628 static const enum ethtool_link_mode_bit_indices 2629 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2630 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2631 }; 2632 2633 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2634 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2635 2636 static const enum ethtool_link_mode_bit_indices 2637 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2638 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2639 }; 2640 2641 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2642 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2643 2644 static const enum ethtool_link_mode_bit_indices 2645 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2646 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2647 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2648 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2649 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2650 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2651 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2652 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2653 }; 2654 2655 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2656 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2657 2658 static const enum ethtool_link_mode_bit_indices 2659 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2660 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2661 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2662 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2663 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2664 }; 2665 2666 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2667 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2668 2669 static const enum ethtool_link_mode_bit_indices 2670 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2671 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2672 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2673 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2674 }; 2675 2676 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2677 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2678 2679 static const enum ethtool_link_mode_bit_indices 2680 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2681 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2682 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2683 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2684 }; 2685 2686 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2687 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2688 2689 static const enum ethtool_link_mode_bit_indices 2690 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2691 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2692 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2693 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2694 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2695 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2696 }; 2697 2698 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2699 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2700 2701 static const enum ethtool_link_mode_bit_indices 2702 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2703 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2704 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2705 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2706 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2707 }; 2708 2709 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2710 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2711 2712 static const enum ethtool_link_mode_bit_indices 2713 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2714 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2715 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2716 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2717 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2718 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2719 }; 2720 2721 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2722 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2723 2724 static const enum ethtool_link_mode_bit_indices 2725 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2726 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2727 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2728 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2729 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2730 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2731 }; 2732 2733 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2734 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2735 2736 struct mlxsw_sp2_port_link_mode { 2737 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2738 int m_ethtool_len; 2739 u32 mask; 2740 u32 speed; 2741 }; 2742 2743 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2744 { 2745 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2746 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2747 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2748 .speed = SPEED_100, 2749 }, 2750 { 2751 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2752 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2753 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2754 .speed = SPEED_1000, 2755 }, 2756 { 2757 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2758 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2759 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2760 .speed = SPEED_2500, 2761 }, 2762 { 2763 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2764 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2765 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2766 .speed = SPEED_5000, 2767 }, 2768 { 2769 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2770 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2771 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2772 .speed = SPEED_10000, 2773 }, 2774 { 2775 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2776 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2777 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2778 .speed = SPEED_40000, 2779 }, 2780 { 2781 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2782 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2783 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2784 .speed = SPEED_25000, 2785 }, 2786 { 2787 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2788 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2789 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2790 .speed = SPEED_50000, 2791 }, 2792 { 2793 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2794 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2795 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2796 .speed = SPEED_50000, 2797 }, 2798 { 2799 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2800 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2801 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2802 .speed = SPEED_100000, 2803 }, 2804 { 2805 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2806 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2807 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2808 .speed = SPEED_100000, 2809 }, 2810 { 2811 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2812 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2813 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2814 .speed = SPEED_200000, 2815 }, 2816 }; 2817 2818 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2819 2820 static void 2821 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2822 u32 ptys_eth_proto, 2823 struct ethtool_link_ksettings *cmd) 2824 { 2825 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2826 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2827 } 2828 2829 static void 2830 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2831 unsigned long *mode) 2832 { 2833 int i; 2834 2835 for (i = 0; i < link_mode->m_ethtool_len; i++) 2836 __set_bit(link_mode->mask_ethtool[i], mode); 2837 } 2838 2839 static void 2840 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2841 unsigned long *mode) 2842 { 2843 int i; 2844 2845 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2846 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 2847 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2848 mode); 2849 } 2850 } 2851 2852 static void 2853 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2854 u32 ptys_eth_proto, 2855 struct ethtool_link_ksettings *cmd) 2856 { 2857 u32 speed = SPEED_UNKNOWN; 2858 u8 duplex = DUPLEX_UNKNOWN; 2859 int i; 2860 2861 if (!carrier_ok) 2862 goto out; 2863 2864 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2865 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { 2866 speed = mlxsw_sp2_port_link_mode[i].speed; 2867 duplex = DUPLEX_FULL; 2868 break; 2869 } 2870 } 2871 out: 2872 cmd->base.speed = speed; 2873 cmd->base.duplex = duplex; 2874 } 2875 2876 static bool 2877 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2878 const unsigned long *mode) 2879 { 2880 int cnt = 0; 2881 int i; 2882 2883 for (i = 0; i < link_mode->m_ethtool_len; i++) { 2884 if (test_bit(link_mode->mask_ethtool[i], mode)) 2885 cnt++; 2886 } 2887 2888 return cnt == link_mode->m_ethtool_len; 2889 } 2890 2891 static u32 2892 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2893 const struct ethtool_link_ksettings *cmd) 2894 { 2895 u32 ptys_proto = 0; 2896 int i; 2897 2898 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2899 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2900 cmd->link_modes.advertising)) 2901 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2902 } 2903 return ptys_proto; 2904 } 2905 2906 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2907 { 2908 u32 ptys_proto = 0; 2909 int i; 2910 2911 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2912 if (speed == mlxsw_sp2_port_link_mode[i].speed) 2913 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2914 } 2915 return ptys_proto; 2916 } 2917 2918 static u32 2919 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2920 { 2921 u32 ptys_proto = 0; 2922 int i; 2923 2924 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2925 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 2926 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2927 } 2928 return ptys_proto; 2929 } 2930 2931 static int 2932 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2933 u32 *base_speed) 2934 { 2935 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2936 u32 eth_proto_cap; 2937 int err; 2938 2939 /* In Spectrum-2, the speed of 1x can change from port to port, so query 2940 * it from firmware. 2941 */ 2942 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 2943 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2944 if (err) 2945 return err; 2946 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2947 2948 if (eth_proto_cap & 2949 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 2950 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 2951 return 0; 2952 } 2953 2954 if (eth_proto_cap & 2955 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 2956 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2957 return 0; 2958 } 2959 2960 return -EIO; 2961 } 2962 2963 static void 2964 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2965 u8 local_port, u32 proto_admin, 2966 bool autoneg) 2967 { 2968 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 2969 } 2970 2971 static void 2972 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2973 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2974 u32 *p_eth_proto_oper) 2975 { 2976 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 2977 p_eth_proto_admin, p_eth_proto_oper); 2978 } 2979 2980 static const struct mlxsw_sp_port_type_speed_ops 2981 mlxsw_sp2_port_type_speed_ops = { 2982 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 2983 .from_ptys_link = mlxsw_sp2_from_ptys_link, 2984 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 2985 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 2986 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 2987 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 2988 .port_speed_base = mlxsw_sp2_port_speed_base, 2989 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 2990 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 2991 }; 2992 2993 static void 2994 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 2995 struct ethtool_link_ksettings *cmd) 2996 { 2997 const struct mlxsw_sp_port_type_speed_ops *ops; 2998 2999 ops = mlxsw_sp->port_type_speed_ops; 3000 3001 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3002 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3003 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3004 3005 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3006 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3007 } 3008 3009 static void 3010 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3011 u32 eth_proto_admin, bool autoneg, 3012 struct ethtool_link_ksettings *cmd) 3013 { 3014 const struct mlxsw_sp_port_type_speed_ops *ops; 3015 3016 ops = mlxsw_sp->port_type_speed_ops; 3017 3018 if (!autoneg) 3019 return; 3020 3021 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3022 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3023 cmd->link_modes.advertising); 3024 } 3025 3026 static u8 3027 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3028 { 3029 switch (connector_type) { 3030 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3031 return PORT_OTHER; 3032 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3033 return PORT_NONE; 3034 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3035 return PORT_TP; 3036 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3037 return PORT_AUI; 3038 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3039 return PORT_BNC; 3040 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3041 return PORT_MII; 3042 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3043 return PORT_FIBRE; 3044 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3045 return PORT_DA; 3046 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3047 return PORT_OTHER; 3048 default: 3049 WARN_ON_ONCE(1); 3050 return PORT_OTHER; 3051 } 3052 } 3053 3054 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3055 struct ethtool_link_ksettings *cmd) 3056 { 3057 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3058 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3059 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3060 const struct mlxsw_sp_port_type_speed_ops *ops; 3061 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3062 u8 connector_type; 3063 bool autoneg; 3064 int err; 3065 3066 ops = mlxsw_sp->port_type_speed_ops; 3067 3068 autoneg = mlxsw_sp_port->link.autoneg; 3069 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3070 0, false); 3071 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3072 if (err) 3073 return err; 3074 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3075 ð_proto_admin, ð_proto_oper); 3076 3077 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3078 3079 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3080 cmd); 3081 3082 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3083 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3084 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3085 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3086 eth_proto_oper, cmd); 3087 3088 return 0; 3089 } 3090 3091 static int 3092 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3093 const struct ethtool_link_ksettings *cmd) 3094 { 3095 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3096 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3097 const struct mlxsw_sp_port_type_speed_ops *ops; 3098 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3099 u32 eth_proto_cap, eth_proto_new; 3100 bool autoneg; 3101 int err; 3102 3103 ops = mlxsw_sp->port_type_speed_ops; 3104 3105 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3106 0, false); 3107 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3108 if (err) 3109 return err; 3110 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3111 3112 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3113 eth_proto_new = autoneg ? 3114 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3115 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3116 3117 eth_proto_new = eth_proto_new & eth_proto_cap; 3118 if (!eth_proto_new) { 3119 netdev_err(dev, "No supported speed requested\n"); 3120 return -EINVAL; 3121 } 3122 3123 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3124 eth_proto_new, autoneg); 3125 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3126 if (err) 3127 return err; 3128 3129 if (!netif_running(dev)) 3130 return 0; 3131 3132 mlxsw_sp_port->link.autoneg = autoneg; 3133 3134 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3135 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3136 3137 return 0; 3138 } 3139 3140 static int mlxsw_sp_flash_device(struct net_device *dev, 3141 struct ethtool_flash *flash) 3142 { 3143 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3145 const struct firmware *firmware; 3146 int err; 3147 3148 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 3149 return -EOPNOTSUPP; 3150 3151 dev_hold(dev); 3152 rtnl_unlock(); 3153 3154 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 3155 if (err) 3156 goto out; 3157 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 3158 release_firmware(firmware); 3159 out: 3160 rtnl_lock(); 3161 dev_put(dev); 3162 return err; 3163 } 3164 3165 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3166 struct ethtool_modinfo *modinfo) 3167 { 3168 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3169 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3170 int err; 3171 3172 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3173 mlxsw_sp_port->mapping.module, 3174 modinfo); 3175 3176 return err; 3177 } 3178 3179 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3180 struct ethtool_eeprom *ee, 3181 u8 *data) 3182 { 3183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3184 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3185 int err; 3186 3187 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3188 mlxsw_sp_port->mapping.module, ee, 3189 data); 3190 3191 return err; 3192 } 3193 3194 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3195 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3196 .get_link = ethtool_op_get_link, 3197 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3198 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3199 .get_strings = mlxsw_sp_port_get_strings, 3200 .set_phys_id = mlxsw_sp_port_set_phys_id, 3201 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3202 .get_sset_count = mlxsw_sp_port_get_sset_count, 3203 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3204 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3205 .flash_device = mlxsw_sp_flash_device, 3206 .get_module_info = mlxsw_sp_get_module_info, 3207 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3208 }; 3209 3210 static int 3211 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3212 { 3213 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3214 const struct mlxsw_sp_port_type_speed_ops *ops; 3215 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3216 u32 eth_proto_admin; 3217 u32 upper_speed; 3218 u32 base_speed; 3219 int err; 3220 3221 ops = mlxsw_sp->port_type_speed_ops; 3222 3223 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3224 &base_speed); 3225 if (err) 3226 return err; 3227 upper_speed = base_speed * width; 3228 3229 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3230 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3231 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3233 } 3234 3235 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3236 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3237 bool dwrr, u8 dwrr_weight) 3238 { 3239 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3240 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3241 3242 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3243 next_index); 3244 mlxsw_reg_qeec_de_set(qeec_pl, true); 3245 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3246 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3248 } 3249 3250 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3251 enum mlxsw_reg_qeec_hr hr, u8 index, 3252 u8 next_index, u32 maxrate) 3253 { 3254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3255 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3256 3257 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3258 next_index); 3259 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3260 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3262 } 3263 3264 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3265 enum mlxsw_reg_qeec_hr hr, u8 index, 3266 u8 next_index, u32 minrate) 3267 { 3268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3269 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3270 3271 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3272 next_index); 3273 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3274 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3275 3276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3277 } 3278 3279 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3280 u8 switch_prio, u8 tclass) 3281 { 3282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3283 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3284 3285 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3286 tclass); 3287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3288 } 3289 3290 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3291 { 3292 int err, i; 3293 3294 /* Setup the elements hierarcy, so that each TC is linked to 3295 * one subgroup, which are all member in the same group. 3296 */ 3297 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3298 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3299 0); 3300 if (err) 3301 return err; 3302 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3303 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3304 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3305 0, false, 0); 3306 if (err) 3307 return err; 3308 } 3309 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3310 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3311 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3312 false, 0); 3313 if (err) 3314 return err; 3315 3316 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3317 MLXSW_REG_QEEC_HIERARCY_TC, 3318 i + 8, i, 3319 false, 0); 3320 if (err) 3321 return err; 3322 } 3323 3324 /* Make sure the max shaper is disabled in all hierarchies that 3325 * support it. 3326 */ 3327 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3328 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3329 MLXSW_REG_QEEC_MAS_DIS); 3330 if (err) 3331 return err; 3332 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3333 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3334 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3335 i, 0, 3336 MLXSW_REG_QEEC_MAS_DIS); 3337 if (err) 3338 return err; 3339 } 3340 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3341 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3342 MLXSW_REG_QEEC_HIERARCY_TC, 3343 i, i, 3344 MLXSW_REG_QEEC_MAS_DIS); 3345 if (err) 3346 return err; 3347 3348 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3349 MLXSW_REG_QEEC_HIERARCY_TC, 3350 i + 8, i, 3351 MLXSW_REG_QEEC_MAS_DIS); 3352 if (err) 3353 return err; 3354 } 3355 3356 /* Configure the min shaper for multicast TCs. */ 3357 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3358 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3359 MLXSW_REG_QEEC_HIERARCY_TC, 3360 i + 8, i, 3361 MLXSW_REG_QEEC_MIS_MIN); 3362 if (err) 3363 return err; 3364 } 3365 3366 /* Map all priorities to traffic class 0. */ 3367 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3368 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3369 if (err) 3370 return err; 3371 } 3372 3373 return 0; 3374 } 3375 3376 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3377 bool enable) 3378 { 3379 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3380 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3381 3382 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3384 } 3385 3386 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3387 bool split, u8 module, u8 width, u8 lane) 3388 { 3389 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3390 struct mlxsw_sp_port *mlxsw_sp_port; 3391 struct net_device *dev; 3392 int err; 3393 3394 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 3395 if (err) { 3396 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3397 local_port); 3398 return err; 3399 } 3400 3401 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3402 if (!dev) { 3403 err = -ENOMEM; 3404 goto err_alloc_etherdev; 3405 } 3406 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3407 mlxsw_sp_port = netdev_priv(dev); 3408 mlxsw_sp_port->dev = dev; 3409 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3410 mlxsw_sp_port->local_port = local_port; 3411 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3412 mlxsw_sp_port->split = split; 3413 mlxsw_sp_port->mapping.module = module; 3414 mlxsw_sp_port->mapping.width = width; 3415 mlxsw_sp_port->mapping.lane = lane; 3416 mlxsw_sp_port->link.autoneg = 1; 3417 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3418 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3419 3420 mlxsw_sp_port->pcpu_stats = 3421 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3422 if (!mlxsw_sp_port->pcpu_stats) { 3423 err = -ENOMEM; 3424 goto err_alloc_stats; 3425 } 3426 3427 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3428 GFP_KERNEL); 3429 if (!mlxsw_sp_port->sample) { 3430 err = -ENOMEM; 3431 goto err_alloc_sample; 3432 } 3433 3434 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3435 &update_stats_cache); 3436 3437 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3438 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3439 3440 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3441 if (err) { 3442 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3443 mlxsw_sp_port->local_port); 3444 goto err_port_module_map; 3445 } 3446 3447 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3448 if (err) { 3449 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3450 mlxsw_sp_port->local_port); 3451 goto err_port_swid_set; 3452 } 3453 3454 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3455 if (err) { 3456 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3457 mlxsw_sp_port->local_port); 3458 goto err_dev_addr_init; 3459 } 3460 3461 netif_carrier_off(dev); 3462 3463 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3464 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3465 dev->hw_features |= NETIF_F_HW_TC; 3466 3467 dev->min_mtu = 0; 3468 dev->max_mtu = ETH_MAX_MTU; 3469 3470 /* Each packet needs to have a Tx header (metadata) on top all other 3471 * headers. 3472 */ 3473 dev->needed_headroom = MLXSW_TXHDR_LEN; 3474 3475 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3476 if (err) { 3477 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3478 mlxsw_sp_port->local_port); 3479 goto err_port_system_port_mapping_set; 3480 } 3481 3482 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3483 if (err) { 3484 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3485 mlxsw_sp_port->local_port); 3486 goto err_port_speed_by_width_set; 3487 } 3488 3489 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3490 if (err) { 3491 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3492 mlxsw_sp_port->local_port); 3493 goto err_port_mtu_set; 3494 } 3495 3496 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3497 if (err) 3498 goto err_port_admin_status_set; 3499 3500 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3501 if (err) { 3502 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3503 mlxsw_sp_port->local_port); 3504 goto err_port_buffers_init; 3505 } 3506 3507 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3508 if (err) { 3509 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3510 mlxsw_sp_port->local_port); 3511 goto err_port_ets_init; 3512 } 3513 3514 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3515 if (err) { 3516 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3517 mlxsw_sp_port->local_port); 3518 goto err_port_tc_mc_mode; 3519 } 3520 3521 /* ETS and buffers must be initialized before DCB. */ 3522 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3523 if (err) { 3524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3525 mlxsw_sp_port->local_port); 3526 goto err_port_dcb_init; 3527 } 3528 3529 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3530 if (err) { 3531 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3532 mlxsw_sp_port->local_port); 3533 goto err_port_fids_init; 3534 } 3535 3536 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3537 if (err) { 3538 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3539 mlxsw_sp_port->local_port); 3540 goto err_port_qdiscs_init; 3541 } 3542 3543 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3544 if (err) { 3545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3546 mlxsw_sp_port->local_port); 3547 goto err_port_nve_init; 3548 } 3549 3550 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3551 if (err) { 3552 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3553 mlxsw_sp_port->local_port); 3554 goto err_port_pvid_set; 3555 } 3556 3557 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3558 MLXSW_SP_DEFAULT_VID); 3559 if (IS_ERR(mlxsw_sp_port_vlan)) { 3560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3561 mlxsw_sp_port->local_port); 3562 err = PTR_ERR(mlxsw_sp_port_vlan); 3563 goto err_port_vlan_create; 3564 } 3565 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3566 3567 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3568 err = register_netdev(dev); 3569 if (err) { 3570 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3571 mlxsw_sp_port->local_port); 3572 goto err_register_netdev; 3573 } 3574 3575 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3576 mlxsw_sp_port, dev, module + 1, 3577 mlxsw_sp_port->split, lane / width); 3578 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3579 return 0; 3580 3581 err_register_netdev: 3582 mlxsw_sp->ports[local_port] = NULL; 3583 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3584 err_port_vlan_create: 3585 err_port_pvid_set: 3586 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3587 err_port_nve_init: 3588 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3589 err_port_qdiscs_init: 3590 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3591 err_port_fids_init: 3592 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3593 err_port_dcb_init: 3594 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3595 err_port_tc_mc_mode: 3596 err_port_ets_init: 3597 err_port_buffers_init: 3598 err_port_admin_status_set: 3599 err_port_mtu_set: 3600 err_port_speed_by_width_set: 3601 err_port_system_port_mapping_set: 3602 err_dev_addr_init: 3603 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3604 err_port_swid_set: 3605 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3606 err_port_module_map: 3607 kfree(mlxsw_sp_port->sample); 3608 err_alloc_sample: 3609 free_percpu(mlxsw_sp_port->pcpu_stats); 3610 err_alloc_stats: 3611 free_netdev(dev); 3612 err_alloc_etherdev: 3613 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3614 return err; 3615 } 3616 3617 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3618 { 3619 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3620 3621 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3622 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3623 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3624 mlxsw_sp->ports[local_port] = NULL; 3625 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3626 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3627 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3628 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3629 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3630 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3631 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3632 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3633 kfree(mlxsw_sp_port->sample); 3634 free_percpu(mlxsw_sp_port->pcpu_stats); 3635 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3636 free_netdev(mlxsw_sp_port->dev); 3637 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3638 } 3639 3640 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3641 { 3642 return mlxsw_sp->ports[local_port] != NULL; 3643 } 3644 3645 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3646 { 3647 int i; 3648 3649 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3650 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3651 mlxsw_sp_port_remove(mlxsw_sp, i); 3652 kfree(mlxsw_sp->port_to_module); 3653 kfree(mlxsw_sp->ports); 3654 } 3655 3656 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3657 { 3658 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3659 u8 module, width, lane; 3660 size_t alloc_size; 3661 int i; 3662 int err; 3663 3664 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3665 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3666 if (!mlxsw_sp->ports) 3667 return -ENOMEM; 3668 3669 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3670 GFP_KERNEL); 3671 if (!mlxsw_sp->port_to_module) { 3672 err = -ENOMEM; 3673 goto err_port_to_module_alloc; 3674 } 3675 3676 for (i = 1; i < max_ports; i++) { 3677 /* Mark as invalid */ 3678 mlxsw_sp->port_to_module[i] = -1; 3679 3680 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3681 &width, &lane); 3682 if (err) 3683 goto err_port_module_info_get; 3684 if (!width) 3685 continue; 3686 mlxsw_sp->port_to_module[i] = module; 3687 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3688 module, width, lane); 3689 if (err) 3690 goto err_port_create; 3691 } 3692 return 0; 3693 3694 err_port_create: 3695 err_port_module_info_get: 3696 for (i--; i >= 1; i--) 3697 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3698 mlxsw_sp_port_remove(mlxsw_sp, i); 3699 kfree(mlxsw_sp->port_to_module); 3700 err_port_to_module_alloc: 3701 kfree(mlxsw_sp->ports); 3702 return err; 3703 } 3704 3705 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3706 { 3707 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3708 3709 return local_port - offset; 3710 } 3711 3712 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3713 u8 module, unsigned int count) 3714 { 3715 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3716 int err, i; 3717 3718 for (i = 0; i < count; i++) { 3719 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3720 module, width, i * width); 3721 if (err) 3722 goto err_port_create; 3723 } 3724 3725 return 0; 3726 3727 err_port_create: 3728 for (i--; i >= 0; i--) 3729 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3730 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3731 return err; 3732 } 3733 3734 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3735 u8 base_port, unsigned int count) 3736 { 3737 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3738 int i; 3739 3740 /* Split by four means we need to re-create two ports, otherwise 3741 * only one. 3742 */ 3743 count = count / 2; 3744 3745 for (i = 0; i < count; i++) { 3746 local_port = base_port + i * 2; 3747 if (mlxsw_sp->port_to_module[local_port] < 0) 3748 continue; 3749 module = mlxsw_sp->port_to_module[local_port]; 3750 3751 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3752 width, 0); 3753 } 3754 } 3755 3756 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3757 unsigned int count, 3758 struct netlink_ext_ack *extack) 3759 { 3760 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3761 struct mlxsw_sp_port *mlxsw_sp_port; 3762 u8 module, cur_width, base_port; 3763 int i; 3764 int err; 3765 3766 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3767 if (!mlxsw_sp_port) { 3768 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3769 local_port); 3770 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3771 return -EINVAL; 3772 } 3773 3774 module = mlxsw_sp_port->mapping.module; 3775 cur_width = mlxsw_sp_port->mapping.width; 3776 3777 if (count != 2 && count != 4) { 3778 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3779 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3780 return -EINVAL; 3781 } 3782 3783 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3784 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3785 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3786 return -EINVAL; 3787 } 3788 3789 /* Make sure we have enough slave (even) ports for the split. */ 3790 if (count == 2) { 3791 base_port = local_port; 3792 if (mlxsw_sp->ports[base_port + 1]) { 3793 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3794 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3795 return -EINVAL; 3796 } 3797 } else { 3798 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3799 if (mlxsw_sp->ports[base_port + 1] || 3800 mlxsw_sp->ports[base_port + 3]) { 3801 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3802 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3803 return -EINVAL; 3804 } 3805 } 3806 3807 for (i = 0; i < count; i++) 3808 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3809 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3810 3811 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3812 if (err) { 3813 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3814 goto err_port_split_create; 3815 } 3816 3817 return 0; 3818 3819 err_port_split_create: 3820 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3821 return err; 3822 } 3823 3824 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3825 struct netlink_ext_ack *extack) 3826 { 3827 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3828 struct mlxsw_sp_port *mlxsw_sp_port; 3829 u8 cur_width, base_port; 3830 unsigned int count; 3831 int i; 3832 3833 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3834 if (!mlxsw_sp_port) { 3835 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3836 local_port); 3837 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3838 return -EINVAL; 3839 } 3840 3841 if (!mlxsw_sp_port->split) { 3842 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3843 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3844 return -EINVAL; 3845 } 3846 3847 cur_width = mlxsw_sp_port->mapping.width; 3848 count = cur_width == 1 ? 4 : 2; 3849 3850 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3851 3852 /* Determine which ports to remove. */ 3853 if (count == 2 && local_port >= base_port + 2) 3854 base_port = base_port + 2; 3855 3856 for (i = 0; i < count; i++) 3857 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3858 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3859 3860 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3861 3862 return 0; 3863 } 3864 3865 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3866 char *pude_pl, void *priv) 3867 { 3868 struct mlxsw_sp *mlxsw_sp = priv; 3869 struct mlxsw_sp_port *mlxsw_sp_port; 3870 enum mlxsw_reg_pude_oper_status status; 3871 u8 local_port; 3872 3873 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3874 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3875 if (!mlxsw_sp_port) 3876 return; 3877 3878 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3879 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3880 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3881 netif_carrier_on(mlxsw_sp_port->dev); 3882 } else { 3883 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3884 netif_carrier_off(mlxsw_sp_port->dev); 3885 } 3886 } 3887 3888 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3889 u8 local_port, void *priv) 3890 { 3891 struct mlxsw_sp *mlxsw_sp = priv; 3892 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3893 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3894 3895 if (unlikely(!mlxsw_sp_port)) { 3896 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3897 local_port); 3898 return; 3899 } 3900 3901 skb->dev = mlxsw_sp_port->dev; 3902 3903 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3904 u64_stats_update_begin(&pcpu_stats->syncp); 3905 pcpu_stats->rx_packets++; 3906 pcpu_stats->rx_bytes += skb->len; 3907 u64_stats_update_end(&pcpu_stats->syncp); 3908 3909 skb->protocol = eth_type_trans(skb, skb->dev); 3910 netif_receive_skb(skb); 3911 } 3912 3913 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3914 void *priv) 3915 { 3916 skb->offload_fwd_mark = 1; 3917 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3918 } 3919 3920 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 3921 u8 local_port, void *priv) 3922 { 3923 skb->offload_l3_fwd_mark = 1; 3924 skb->offload_fwd_mark = 1; 3925 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3926 } 3927 3928 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3929 void *priv) 3930 { 3931 struct mlxsw_sp *mlxsw_sp = priv; 3932 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3933 struct psample_group *psample_group; 3934 u32 size; 3935 3936 if (unlikely(!mlxsw_sp_port)) { 3937 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3938 local_port); 3939 goto out; 3940 } 3941 if (unlikely(!mlxsw_sp_port->sample)) { 3942 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3943 local_port); 3944 goto out; 3945 } 3946 3947 size = mlxsw_sp_port->sample->truncate ? 3948 mlxsw_sp_port->sample->trunc_size : skb->len; 3949 3950 rcu_read_lock(); 3951 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3952 if (!psample_group) 3953 goto out_unlock; 3954 psample_sample_packet(psample_group, skb, size, 3955 mlxsw_sp_port->dev->ifindex, 0, 3956 mlxsw_sp_port->sample->rate); 3957 out_unlock: 3958 rcu_read_unlock(); 3959 out: 3960 consume_skb(skb); 3961 } 3962 3963 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3964 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3965 _is_ctrl, SP_##_trap_group, DISCARD) 3966 3967 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3968 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3969 _is_ctrl, SP_##_trap_group, DISCARD) 3970 3971 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3972 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 3973 _is_ctrl, SP_##_trap_group, DISCARD) 3974 3975 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3976 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3977 3978 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3979 /* Events */ 3980 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3981 /* L2 traps */ 3982 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3983 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3984 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3985 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3986 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3987 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3988 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3989 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3990 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3991 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3992 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3993 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3994 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3995 false), 3996 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3997 false), 3998 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3999 false), 4000 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4001 false), 4002 /* L3 traps */ 4003 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4004 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4005 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4006 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4007 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4008 false), 4009 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4010 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4011 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4012 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4013 false), 4014 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4015 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4016 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4017 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4018 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4019 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4020 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4021 false), 4022 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4023 false), 4024 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4025 false), 4026 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4027 false), 4028 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4029 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4030 false), 4031 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4032 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4033 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4034 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4035 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4036 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4037 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4038 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4039 /* PKT Sample trap */ 4040 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4041 false, SP_IP2ME, DISCARD), 4042 /* ACL trap */ 4043 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4044 /* Multicast Router Traps */ 4045 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4046 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4047 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4048 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4049 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4050 /* NVE traps */ 4051 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4052 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4053 }; 4054 4055 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4056 { 4057 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4058 enum mlxsw_reg_qpcr_ir_units ir_units; 4059 int max_cpu_policers; 4060 bool is_bytes; 4061 u8 burst_size; 4062 u32 rate; 4063 int i, err; 4064 4065 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4066 return -EIO; 4067 4068 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4069 4070 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4071 for (i = 0; i < max_cpu_policers; i++) { 4072 is_bytes = false; 4073 switch (i) { 4074 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4075 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4076 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4077 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4078 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4079 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4080 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4081 rate = 128; 4082 burst_size = 7; 4083 break; 4084 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4085 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4086 rate = 16 * 1024; 4087 burst_size = 10; 4088 break; 4089 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4090 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4091 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4092 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4093 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4094 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4095 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4096 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4097 rate = 1024; 4098 burst_size = 7; 4099 break; 4100 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4101 rate = 1024; 4102 burst_size = 7; 4103 break; 4104 default: 4105 continue; 4106 } 4107 4108 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4109 burst_size); 4110 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4111 if (err) 4112 return err; 4113 } 4114 4115 return 0; 4116 } 4117 4118 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4119 { 4120 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4121 enum mlxsw_reg_htgt_trap_group i; 4122 int max_cpu_policers; 4123 int max_trap_groups; 4124 u8 priority, tc; 4125 u16 policer_id; 4126 int err; 4127 4128 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4129 return -EIO; 4130 4131 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4132 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4133 4134 for (i = 0; i < max_trap_groups; i++) { 4135 policer_id = i; 4136 switch (i) { 4137 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4138 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4139 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4140 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4141 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4142 priority = 5; 4143 tc = 5; 4144 break; 4145 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4146 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4147 priority = 4; 4148 tc = 4; 4149 break; 4150 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4151 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4152 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4153 priority = 3; 4154 tc = 3; 4155 break; 4156 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4157 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4158 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4159 priority = 2; 4160 tc = 2; 4161 break; 4162 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4163 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4164 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4165 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4166 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4167 priority = 1; 4168 tc = 1; 4169 break; 4170 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4171 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4172 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4173 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4174 break; 4175 default: 4176 continue; 4177 } 4178 4179 if (max_cpu_policers <= policer_id && 4180 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4181 return -EIO; 4182 4183 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4184 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4185 if (err) 4186 return err; 4187 } 4188 4189 return 0; 4190 } 4191 4192 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4193 { 4194 int i; 4195 int err; 4196 4197 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4198 if (err) 4199 return err; 4200 4201 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4202 if (err) 4203 return err; 4204 4205 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 4206 err = mlxsw_core_trap_register(mlxsw_sp->core, 4207 &mlxsw_sp_listener[i], 4208 mlxsw_sp); 4209 if (err) 4210 goto err_listener_register; 4211 4212 } 4213 return 0; 4214 4215 err_listener_register: 4216 for (i--; i >= 0; i--) { 4217 mlxsw_core_trap_unregister(mlxsw_sp->core, 4218 &mlxsw_sp_listener[i], 4219 mlxsw_sp); 4220 } 4221 return err; 4222 } 4223 4224 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4225 { 4226 int i; 4227 4228 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 4229 mlxsw_core_trap_unregister(mlxsw_sp->core, 4230 &mlxsw_sp_listener[i], 4231 mlxsw_sp); 4232 } 4233 } 4234 4235 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4236 { 4237 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4238 u32 seed; 4239 int err; 4240 4241 get_random_bytes(&seed, sizeof(seed)); 4242 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4243 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4244 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4245 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4246 MLXSW_REG_SLCR_LAG_HASH_SIP | 4247 MLXSW_REG_SLCR_LAG_HASH_DIP | 4248 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4249 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4250 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4251 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4252 if (err) 4253 return err; 4254 4255 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4256 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4257 return -EIO; 4258 4259 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4260 sizeof(struct mlxsw_sp_upper), 4261 GFP_KERNEL); 4262 if (!mlxsw_sp->lags) 4263 return -ENOMEM; 4264 4265 return 0; 4266 } 4267 4268 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4269 { 4270 kfree(mlxsw_sp->lags); 4271 } 4272 4273 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4274 { 4275 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4276 4277 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4278 MLXSW_REG_HTGT_INVALID_POLICER, 4279 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4280 MLXSW_REG_HTGT_DEFAULT_TC); 4281 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4282 } 4283 4284 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4285 unsigned long event, void *ptr); 4286 4287 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4288 const struct mlxsw_bus_info *mlxsw_bus_info) 4289 { 4290 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4291 int err; 4292 4293 mlxsw_sp->core = mlxsw_core; 4294 mlxsw_sp->bus_info = mlxsw_bus_info; 4295 4296 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4297 if (err) 4298 return err; 4299 4300 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4301 if (err) { 4302 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4303 return err; 4304 } 4305 4306 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4307 if (err) { 4308 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4309 return err; 4310 } 4311 4312 err = mlxsw_sp_fids_init(mlxsw_sp); 4313 if (err) { 4314 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4315 goto err_fids_init; 4316 } 4317 4318 err = mlxsw_sp_traps_init(mlxsw_sp); 4319 if (err) { 4320 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4321 goto err_traps_init; 4322 } 4323 4324 err = mlxsw_sp_buffers_init(mlxsw_sp); 4325 if (err) { 4326 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4327 goto err_buffers_init; 4328 } 4329 4330 err = mlxsw_sp_lag_init(mlxsw_sp); 4331 if (err) { 4332 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4333 goto err_lag_init; 4334 } 4335 4336 /* Initialize SPAN before router and switchdev, so that those components 4337 * can call mlxsw_sp_span_respin(). 4338 */ 4339 err = mlxsw_sp_span_init(mlxsw_sp); 4340 if (err) { 4341 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4342 goto err_span_init; 4343 } 4344 4345 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4346 if (err) { 4347 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4348 goto err_switchdev_init; 4349 } 4350 4351 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4352 if (err) { 4353 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4354 goto err_counter_pool_init; 4355 } 4356 4357 err = mlxsw_sp_afa_init(mlxsw_sp); 4358 if (err) { 4359 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4360 goto err_afa_init; 4361 } 4362 4363 err = mlxsw_sp_nve_init(mlxsw_sp); 4364 if (err) { 4365 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4366 goto err_nve_init; 4367 } 4368 4369 err = mlxsw_sp_acl_init(mlxsw_sp); 4370 if (err) { 4371 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4372 goto err_acl_init; 4373 } 4374 4375 err = mlxsw_sp_router_init(mlxsw_sp); 4376 if (err) { 4377 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4378 goto err_router_init; 4379 } 4380 4381 /* Initialize netdevice notifier after router and SPAN is initialized, 4382 * so that the event handler can use router structures and call SPAN 4383 * respin. 4384 */ 4385 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4386 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4387 if (err) { 4388 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4389 goto err_netdev_notifier; 4390 } 4391 4392 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4393 if (err) { 4394 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4395 goto err_dpipe_init; 4396 } 4397 4398 err = mlxsw_sp_ports_create(mlxsw_sp); 4399 if (err) { 4400 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4401 goto err_ports_create; 4402 } 4403 4404 return 0; 4405 4406 err_ports_create: 4407 mlxsw_sp_dpipe_fini(mlxsw_sp); 4408 err_dpipe_init: 4409 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4410 err_netdev_notifier: 4411 mlxsw_sp_router_fini(mlxsw_sp); 4412 err_router_init: 4413 mlxsw_sp_acl_fini(mlxsw_sp); 4414 err_acl_init: 4415 mlxsw_sp_nve_fini(mlxsw_sp); 4416 err_nve_init: 4417 mlxsw_sp_afa_fini(mlxsw_sp); 4418 err_afa_init: 4419 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4420 err_counter_pool_init: 4421 mlxsw_sp_switchdev_fini(mlxsw_sp); 4422 err_switchdev_init: 4423 mlxsw_sp_span_fini(mlxsw_sp); 4424 err_span_init: 4425 mlxsw_sp_lag_fini(mlxsw_sp); 4426 err_lag_init: 4427 mlxsw_sp_buffers_fini(mlxsw_sp); 4428 err_buffers_init: 4429 mlxsw_sp_traps_fini(mlxsw_sp); 4430 err_traps_init: 4431 mlxsw_sp_fids_fini(mlxsw_sp); 4432 err_fids_init: 4433 mlxsw_sp_kvdl_fini(mlxsw_sp); 4434 return err; 4435 } 4436 4437 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4438 const struct mlxsw_bus_info *mlxsw_bus_info) 4439 { 4440 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4441 4442 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4443 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4444 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4445 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4446 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4447 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4448 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4449 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4450 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4451 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4452 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4453 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4454 4455 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4456 } 4457 4458 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4459 const struct mlxsw_bus_info *mlxsw_bus_info) 4460 { 4461 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4462 4463 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4464 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4465 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4466 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4467 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4468 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4469 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4470 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4471 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4472 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4473 4474 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4475 } 4476 4477 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4478 { 4479 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4480 4481 mlxsw_sp_ports_remove(mlxsw_sp); 4482 mlxsw_sp_dpipe_fini(mlxsw_sp); 4483 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4484 mlxsw_sp_router_fini(mlxsw_sp); 4485 mlxsw_sp_acl_fini(mlxsw_sp); 4486 mlxsw_sp_nve_fini(mlxsw_sp); 4487 mlxsw_sp_afa_fini(mlxsw_sp); 4488 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4489 mlxsw_sp_switchdev_fini(mlxsw_sp); 4490 mlxsw_sp_span_fini(mlxsw_sp); 4491 mlxsw_sp_lag_fini(mlxsw_sp); 4492 mlxsw_sp_buffers_fini(mlxsw_sp); 4493 mlxsw_sp_traps_fini(mlxsw_sp); 4494 mlxsw_sp_fids_fini(mlxsw_sp); 4495 mlxsw_sp_kvdl_fini(mlxsw_sp); 4496 } 4497 4498 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4499 * 802.1Q FIDs 4500 */ 4501 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4502 VLAN_VID_MASK - 1) 4503 4504 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4505 .used_max_mid = 1, 4506 .max_mid = MLXSW_SP_MID_MAX, 4507 .used_flood_tables = 1, 4508 .used_flood_mode = 1, 4509 .flood_mode = 3, 4510 .max_fid_flood_tables = 3, 4511 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4512 .used_max_ib_mc = 1, 4513 .max_ib_mc = 0, 4514 .used_max_pkey = 1, 4515 .max_pkey = 0, 4516 .used_kvd_sizes = 1, 4517 .kvd_hash_single_parts = 59, 4518 .kvd_hash_double_parts = 41, 4519 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4520 .swid_config = { 4521 { 4522 .used_type = 1, 4523 .type = MLXSW_PORT_SWID_TYPE_ETH, 4524 } 4525 }, 4526 }; 4527 4528 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4529 .used_max_mid = 1, 4530 .max_mid = MLXSW_SP_MID_MAX, 4531 .used_flood_tables = 1, 4532 .used_flood_mode = 1, 4533 .flood_mode = 3, 4534 .max_fid_flood_tables = 3, 4535 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4536 .used_max_ib_mc = 1, 4537 .max_ib_mc = 0, 4538 .used_max_pkey = 1, 4539 .max_pkey = 0, 4540 .swid_config = { 4541 { 4542 .used_type = 1, 4543 .type = MLXSW_PORT_SWID_TYPE_ETH, 4544 } 4545 }, 4546 }; 4547 4548 static void 4549 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4550 struct devlink_resource_size_params *kvd_size_params, 4551 struct devlink_resource_size_params *linear_size_params, 4552 struct devlink_resource_size_params *hash_double_size_params, 4553 struct devlink_resource_size_params *hash_single_size_params) 4554 { 4555 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4556 KVD_SINGLE_MIN_SIZE); 4557 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4558 KVD_DOUBLE_MIN_SIZE); 4559 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4560 u32 linear_size_min = 0; 4561 4562 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4563 MLXSW_SP_KVD_GRANULARITY, 4564 DEVLINK_RESOURCE_UNIT_ENTRY); 4565 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4566 kvd_size - single_size_min - 4567 double_size_min, 4568 MLXSW_SP_KVD_GRANULARITY, 4569 DEVLINK_RESOURCE_UNIT_ENTRY); 4570 devlink_resource_size_params_init(hash_double_size_params, 4571 double_size_min, 4572 kvd_size - single_size_min - 4573 linear_size_min, 4574 MLXSW_SP_KVD_GRANULARITY, 4575 DEVLINK_RESOURCE_UNIT_ENTRY); 4576 devlink_resource_size_params_init(hash_single_size_params, 4577 single_size_min, 4578 kvd_size - double_size_min - 4579 linear_size_min, 4580 MLXSW_SP_KVD_GRANULARITY, 4581 DEVLINK_RESOURCE_UNIT_ENTRY); 4582 } 4583 4584 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4585 { 4586 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4587 struct devlink_resource_size_params hash_single_size_params; 4588 struct devlink_resource_size_params hash_double_size_params; 4589 struct devlink_resource_size_params linear_size_params; 4590 struct devlink_resource_size_params kvd_size_params; 4591 u32 kvd_size, single_size, double_size, linear_size; 4592 const struct mlxsw_config_profile *profile; 4593 int err; 4594 4595 profile = &mlxsw_sp1_config_profile; 4596 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4597 return -EIO; 4598 4599 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4600 &linear_size_params, 4601 &hash_double_size_params, 4602 &hash_single_size_params); 4603 4604 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4605 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4606 kvd_size, MLXSW_SP_RESOURCE_KVD, 4607 DEVLINK_RESOURCE_ID_PARENT_TOP, 4608 &kvd_size_params); 4609 if (err) 4610 return err; 4611 4612 linear_size = profile->kvd_linear_size; 4613 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4614 linear_size, 4615 MLXSW_SP_RESOURCE_KVD_LINEAR, 4616 MLXSW_SP_RESOURCE_KVD, 4617 &linear_size_params); 4618 if (err) 4619 return err; 4620 4621 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4622 if (err) 4623 return err; 4624 4625 double_size = kvd_size - linear_size; 4626 double_size *= profile->kvd_hash_double_parts; 4627 double_size /= profile->kvd_hash_double_parts + 4628 profile->kvd_hash_single_parts; 4629 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4630 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4631 double_size, 4632 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4633 MLXSW_SP_RESOURCE_KVD, 4634 &hash_double_size_params); 4635 if (err) 4636 return err; 4637 4638 single_size = kvd_size - double_size - linear_size; 4639 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4640 single_size, 4641 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4642 MLXSW_SP_RESOURCE_KVD, 4643 &hash_single_size_params); 4644 if (err) 4645 return err; 4646 4647 return 0; 4648 } 4649 4650 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4651 { 4652 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4653 } 4654 4655 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4656 { 4657 return 0; 4658 } 4659 4660 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4661 const struct mlxsw_config_profile *profile, 4662 u64 *p_single_size, u64 *p_double_size, 4663 u64 *p_linear_size) 4664 { 4665 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4666 u32 double_size; 4667 int err; 4668 4669 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4670 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4671 return -EIO; 4672 4673 /* The hash part is what left of the kvd without the 4674 * linear part. It is split to the single size and 4675 * double size by the parts ratio from the profile. 4676 * Both sizes must be a multiplications of the 4677 * granularity from the profile. In case the user 4678 * provided the sizes they are obtained via devlink. 4679 */ 4680 err = devlink_resource_size_get(devlink, 4681 MLXSW_SP_RESOURCE_KVD_LINEAR, 4682 p_linear_size); 4683 if (err) 4684 *p_linear_size = profile->kvd_linear_size; 4685 4686 err = devlink_resource_size_get(devlink, 4687 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4688 p_double_size); 4689 if (err) { 4690 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4691 *p_linear_size; 4692 double_size *= profile->kvd_hash_double_parts; 4693 double_size /= profile->kvd_hash_double_parts + 4694 profile->kvd_hash_single_parts; 4695 *p_double_size = rounddown(double_size, 4696 MLXSW_SP_KVD_GRANULARITY); 4697 } 4698 4699 err = devlink_resource_size_get(devlink, 4700 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4701 p_single_size); 4702 if (err) 4703 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4704 *p_double_size - *p_linear_size; 4705 4706 /* Check results are legal. */ 4707 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4708 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4709 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4710 return -EIO; 4711 4712 return 0; 4713 } 4714 4715 static int 4716 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 4717 union devlink_param_value val, 4718 struct netlink_ext_ack *extack) 4719 { 4720 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 4721 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 4722 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 4723 return -EINVAL; 4724 } 4725 4726 return 0; 4727 } 4728 4729 static const struct devlink_param mlxsw_sp_devlink_params[] = { 4730 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 4731 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 4732 NULL, NULL, 4733 mlxsw_sp_devlink_param_fw_load_policy_validate), 4734 }; 4735 4736 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 4737 { 4738 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4739 union devlink_param_value value; 4740 int err; 4741 4742 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 4743 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4744 if (err) 4745 return err; 4746 4747 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 4748 devlink_param_driverinit_value_set(devlink, 4749 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 4750 value); 4751 return 0; 4752 } 4753 4754 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 4755 { 4756 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4757 mlxsw_sp_devlink_params, 4758 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4759 } 4760 4761 static int 4762 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 4763 struct devlink_param_gset_ctx *ctx) 4764 { 4765 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4766 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4767 4768 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 4769 return 0; 4770 } 4771 4772 static int 4773 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 4774 struct devlink_param_gset_ctx *ctx) 4775 { 4776 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 4777 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4778 4779 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 4780 } 4781 4782 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 4783 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 4784 "acl_region_rehash_interval", 4785 DEVLINK_PARAM_TYPE_U32, 4786 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 4787 mlxsw_sp_params_acl_region_rehash_intrvl_get, 4788 mlxsw_sp_params_acl_region_rehash_intrvl_set, 4789 NULL), 4790 }; 4791 4792 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 4793 { 4794 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4795 union devlink_param_value value; 4796 int err; 4797 4798 err = mlxsw_sp_params_register(mlxsw_core); 4799 if (err) 4800 return err; 4801 4802 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 4803 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 4804 if (err) 4805 goto err_devlink_params_register; 4806 4807 value.vu32 = 0; 4808 devlink_param_driverinit_value_set(devlink, 4809 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 4810 value); 4811 return 0; 4812 4813 err_devlink_params_register: 4814 mlxsw_sp_params_unregister(mlxsw_core); 4815 return err; 4816 } 4817 4818 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 4819 { 4820 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4821 mlxsw_sp2_devlink_params, 4822 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 4823 mlxsw_sp_params_unregister(mlxsw_core); 4824 } 4825 4826 static struct mlxsw_driver mlxsw_sp1_driver = { 4827 .kind = mlxsw_sp1_driver_name, 4828 .priv_size = sizeof(struct mlxsw_sp), 4829 .init = mlxsw_sp1_init, 4830 .fini = mlxsw_sp_fini, 4831 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4832 .port_split = mlxsw_sp_port_split, 4833 .port_unsplit = mlxsw_sp_port_unsplit, 4834 .sb_pool_get = mlxsw_sp_sb_pool_get, 4835 .sb_pool_set = mlxsw_sp_sb_pool_set, 4836 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4837 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4838 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4839 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4840 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4841 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4842 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4843 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4844 .txhdr_construct = mlxsw_sp_txhdr_construct, 4845 .resources_register = mlxsw_sp1_resources_register, 4846 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4847 .params_register = mlxsw_sp_params_register, 4848 .params_unregister = mlxsw_sp_params_unregister, 4849 .txhdr_len = MLXSW_TXHDR_LEN, 4850 .profile = &mlxsw_sp1_config_profile, 4851 .res_query_enabled = true, 4852 }; 4853 4854 static struct mlxsw_driver mlxsw_sp2_driver = { 4855 .kind = mlxsw_sp2_driver_name, 4856 .priv_size = sizeof(struct mlxsw_sp), 4857 .init = mlxsw_sp2_init, 4858 .fini = mlxsw_sp_fini, 4859 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4860 .port_split = mlxsw_sp_port_split, 4861 .port_unsplit = mlxsw_sp_port_unsplit, 4862 .sb_pool_get = mlxsw_sp_sb_pool_get, 4863 .sb_pool_set = mlxsw_sp_sb_pool_set, 4864 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4865 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4866 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4867 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4868 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4869 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4870 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4871 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4872 .txhdr_construct = mlxsw_sp_txhdr_construct, 4873 .resources_register = mlxsw_sp2_resources_register, 4874 .params_register = mlxsw_sp2_params_register, 4875 .params_unregister = mlxsw_sp2_params_unregister, 4876 .txhdr_len = MLXSW_TXHDR_LEN, 4877 .profile = &mlxsw_sp2_config_profile, 4878 .res_query_enabled = true, 4879 }; 4880 4881 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4882 { 4883 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4884 } 4885 4886 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4887 { 4888 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4889 int ret = 0; 4890 4891 if (mlxsw_sp_port_dev_check(lower_dev)) { 4892 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4893 ret = 1; 4894 } 4895 4896 return ret; 4897 } 4898 4899 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4900 { 4901 struct mlxsw_sp_port *mlxsw_sp_port; 4902 4903 if (mlxsw_sp_port_dev_check(dev)) 4904 return netdev_priv(dev); 4905 4906 mlxsw_sp_port = NULL; 4907 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4908 4909 return mlxsw_sp_port; 4910 } 4911 4912 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4913 { 4914 struct mlxsw_sp_port *mlxsw_sp_port; 4915 4916 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4917 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4918 } 4919 4920 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4921 { 4922 struct mlxsw_sp_port *mlxsw_sp_port; 4923 4924 if (mlxsw_sp_port_dev_check(dev)) 4925 return netdev_priv(dev); 4926 4927 mlxsw_sp_port = NULL; 4928 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4929 &mlxsw_sp_port); 4930 4931 return mlxsw_sp_port; 4932 } 4933 4934 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4935 { 4936 struct mlxsw_sp_port *mlxsw_sp_port; 4937 4938 rcu_read_lock(); 4939 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4940 if (mlxsw_sp_port) 4941 dev_hold(mlxsw_sp_port->dev); 4942 rcu_read_unlock(); 4943 return mlxsw_sp_port; 4944 } 4945 4946 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4947 { 4948 dev_put(mlxsw_sp_port->dev); 4949 } 4950 4951 static void 4952 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4953 struct net_device *lag_dev) 4954 { 4955 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4956 struct net_device *upper_dev; 4957 struct list_head *iter; 4958 4959 if (netif_is_bridge_port(lag_dev)) 4960 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4961 4962 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4963 if (!netif_is_bridge_port(upper_dev)) 4964 continue; 4965 br_dev = netdev_master_upper_dev_get(upper_dev); 4966 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4967 } 4968 } 4969 4970 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4971 { 4972 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4973 4974 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4975 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4976 } 4977 4978 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4979 { 4980 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4981 4982 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4983 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4984 } 4985 4986 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4987 u16 lag_id, u8 port_index) 4988 { 4989 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4990 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4991 4992 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4993 lag_id, port_index); 4994 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4995 } 4996 4997 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4998 u16 lag_id) 4999 { 5000 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5001 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5002 5003 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5004 lag_id); 5005 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5006 } 5007 5008 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5009 u16 lag_id) 5010 { 5011 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5012 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5013 5014 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5015 lag_id); 5016 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5017 } 5018 5019 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5020 u16 lag_id) 5021 { 5022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5023 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5024 5025 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5026 lag_id); 5027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5028 } 5029 5030 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5031 struct net_device *lag_dev, 5032 u16 *p_lag_id) 5033 { 5034 struct mlxsw_sp_upper *lag; 5035 int free_lag_id = -1; 5036 u64 max_lag; 5037 int i; 5038 5039 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5040 for (i = 0; i < max_lag; i++) { 5041 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5042 if (lag->ref_count) { 5043 if (lag->dev == lag_dev) { 5044 *p_lag_id = i; 5045 return 0; 5046 } 5047 } else if (free_lag_id < 0) { 5048 free_lag_id = i; 5049 } 5050 } 5051 if (free_lag_id < 0) 5052 return -EBUSY; 5053 *p_lag_id = free_lag_id; 5054 return 0; 5055 } 5056 5057 static bool 5058 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5059 struct net_device *lag_dev, 5060 struct netdev_lag_upper_info *lag_upper_info, 5061 struct netlink_ext_ack *extack) 5062 { 5063 u16 lag_id; 5064 5065 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5066 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5067 return false; 5068 } 5069 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5070 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5071 return false; 5072 } 5073 return true; 5074 } 5075 5076 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5077 u16 lag_id, u8 *p_port_index) 5078 { 5079 u64 max_lag_members; 5080 int i; 5081 5082 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5083 MAX_LAG_MEMBERS); 5084 for (i = 0; i < max_lag_members; i++) { 5085 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5086 *p_port_index = i; 5087 return 0; 5088 } 5089 } 5090 return -EBUSY; 5091 } 5092 5093 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5094 struct net_device *lag_dev) 5095 { 5096 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5097 struct mlxsw_sp_upper *lag; 5098 u16 lag_id; 5099 u8 port_index; 5100 int err; 5101 5102 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5103 if (err) 5104 return err; 5105 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5106 if (!lag->ref_count) { 5107 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5108 if (err) 5109 return err; 5110 lag->dev = lag_dev; 5111 } 5112 5113 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5114 if (err) 5115 return err; 5116 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5117 if (err) 5118 goto err_col_port_add; 5119 5120 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5121 mlxsw_sp_port->local_port); 5122 mlxsw_sp_port->lag_id = lag_id; 5123 mlxsw_sp_port->lagged = 1; 5124 lag->ref_count++; 5125 5126 /* Port is no longer usable as a router interface */ 5127 if (mlxsw_sp_port->default_vlan->fid) 5128 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5129 5130 return 0; 5131 5132 err_col_port_add: 5133 if (!lag->ref_count) 5134 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5135 return err; 5136 } 5137 5138 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5139 struct net_device *lag_dev) 5140 { 5141 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5142 u16 lag_id = mlxsw_sp_port->lag_id; 5143 struct mlxsw_sp_upper *lag; 5144 5145 if (!mlxsw_sp_port->lagged) 5146 return; 5147 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5148 WARN_ON(lag->ref_count == 0); 5149 5150 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5151 5152 /* Any VLANs configured on the port are no longer valid */ 5153 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5154 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5155 /* Make the LAG and its directly linked uppers leave bridges they 5156 * are memeber in 5157 */ 5158 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5159 5160 if (lag->ref_count == 1) 5161 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5162 5163 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5164 mlxsw_sp_port->local_port); 5165 mlxsw_sp_port->lagged = 0; 5166 lag->ref_count--; 5167 5168 /* Make sure untagged frames are allowed to ingress */ 5169 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5170 } 5171 5172 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5173 u16 lag_id) 5174 { 5175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5176 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5177 5178 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5179 mlxsw_sp_port->local_port); 5180 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5181 } 5182 5183 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5184 u16 lag_id) 5185 { 5186 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5187 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5188 5189 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5190 mlxsw_sp_port->local_port); 5191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5192 } 5193 5194 static int 5195 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5196 { 5197 int err; 5198 5199 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5200 mlxsw_sp_port->lag_id); 5201 if (err) 5202 return err; 5203 5204 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5205 if (err) 5206 goto err_dist_port_add; 5207 5208 return 0; 5209 5210 err_dist_port_add: 5211 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5212 return err; 5213 } 5214 5215 static int 5216 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5217 { 5218 int err; 5219 5220 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5221 mlxsw_sp_port->lag_id); 5222 if (err) 5223 return err; 5224 5225 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5226 mlxsw_sp_port->lag_id); 5227 if (err) 5228 goto err_col_port_disable; 5229 5230 return 0; 5231 5232 err_col_port_disable: 5233 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5234 return err; 5235 } 5236 5237 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5238 struct netdev_lag_lower_state_info *info) 5239 { 5240 if (info->tx_enabled) 5241 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5242 else 5243 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5244 } 5245 5246 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5247 bool enable) 5248 { 5249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5250 enum mlxsw_reg_spms_state spms_state; 5251 char *spms_pl; 5252 u16 vid; 5253 int err; 5254 5255 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5256 MLXSW_REG_SPMS_STATE_DISCARDING; 5257 5258 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5259 if (!spms_pl) 5260 return -ENOMEM; 5261 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5262 5263 for (vid = 0; vid < VLAN_N_VID; vid++) 5264 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5265 5266 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5267 kfree(spms_pl); 5268 return err; 5269 } 5270 5271 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5272 { 5273 u16 vid = 1; 5274 int err; 5275 5276 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5277 if (err) 5278 return err; 5279 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5280 if (err) 5281 goto err_port_stp_set; 5282 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5283 true, false); 5284 if (err) 5285 goto err_port_vlan_set; 5286 5287 for (; vid <= VLAN_N_VID - 1; vid++) { 5288 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5289 vid, false); 5290 if (err) 5291 goto err_vid_learning_set; 5292 } 5293 5294 return 0; 5295 5296 err_vid_learning_set: 5297 for (vid--; vid >= 1; vid--) 5298 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5299 err_port_vlan_set: 5300 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5301 err_port_stp_set: 5302 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5303 return err; 5304 } 5305 5306 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5307 { 5308 u16 vid; 5309 5310 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5311 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5312 vid, true); 5313 5314 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5315 false, false); 5316 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5317 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5318 } 5319 5320 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5321 { 5322 unsigned int num_vxlans = 0; 5323 struct net_device *dev; 5324 struct list_head *iter; 5325 5326 netdev_for_each_lower_dev(br_dev, dev, iter) { 5327 if (netif_is_vxlan(dev)) 5328 num_vxlans++; 5329 } 5330 5331 return num_vxlans > 1; 5332 } 5333 5334 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5335 { 5336 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5337 struct net_device *dev; 5338 struct list_head *iter; 5339 5340 netdev_for_each_lower_dev(br_dev, dev, iter) { 5341 u16 pvid; 5342 int err; 5343 5344 if (!netif_is_vxlan(dev)) 5345 continue; 5346 5347 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5348 if (err || !pvid) 5349 continue; 5350 5351 if (test_and_set_bit(pvid, vlans)) 5352 return false; 5353 } 5354 5355 return true; 5356 } 5357 5358 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5359 struct netlink_ext_ack *extack) 5360 { 5361 if (br_multicast_enabled(br_dev)) { 5362 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5363 return false; 5364 } 5365 5366 if (!br_vlan_enabled(br_dev) && 5367 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5368 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5369 return false; 5370 } 5371 5372 if (br_vlan_enabled(br_dev) && 5373 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5374 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5375 return false; 5376 } 5377 5378 return true; 5379 } 5380 5381 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5382 struct net_device *dev, 5383 unsigned long event, void *ptr) 5384 { 5385 struct netdev_notifier_changeupper_info *info; 5386 struct mlxsw_sp_port *mlxsw_sp_port; 5387 struct netlink_ext_ack *extack; 5388 struct net_device *upper_dev; 5389 struct mlxsw_sp *mlxsw_sp; 5390 int err = 0; 5391 5392 mlxsw_sp_port = netdev_priv(dev); 5393 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5394 info = ptr; 5395 extack = netdev_notifier_info_to_extack(&info->info); 5396 5397 switch (event) { 5398 case NETDEV_PRECHANGEUPPER: 5399 upper_dev = info->upper_dev; 5400 if (!is_vlan_dev(upper_dev) && 5401 !netif_is_lag_master(upper_dev) && 5402 !netif_is_bridge_master(upper_dev) && 5403 !netif_is_ovs_master(upper_dev) && 5404 !netif_is_macvlan(upper_dev)) { 5405 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5406 return -EINVAL; 5407 } 5408 if (!info->linking) 5409 break; 5410 if (netif_is_bridge_master(upper_dev) && 5411 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5412 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5413 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5414 return -EOPNOTSUPP; 5415 if (netdev_has_any_upper_dev(upper_dev) && 5416 (!netif_is_bridge_master(upper_dev) || 5417 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5418 upper_dev))) { 5419 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5420 return -EINVAL; 5421 } 5422 if (netif_is_lag_master(upper_dev) && 5423 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5424 info->upper_info, extack)) 5425 return -EINVAL; 5426 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5427 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5428 return -EINVAL; 5429 } 5430 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5431 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5432 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5433 return -EINVAL; 5434 } 5435 if (netif_is_macvlan(upper_dev) && 5436 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5437 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5438 return -EOPNOTSUPP; 5439 } 5440 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5441 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5442 return -EINVAL; 5443 } 5444 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5445 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5446 return -EINVAL; 5447 } 5448 break; 5449 case NETDEV_CHANGEUPPER: 5450 upper_dev = info->upper_dev; 5451 if (netif_is_bridge_master(upper_dev)) { 5452 if (info->linking) 5453 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5454 lower_dev, 5455 upper_dev, 5456 extack); 5457 else 5458 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5459 lower_dev, 5460 upper_dev); 5461 } else if (netif_is_lag_master(upper_dev)) { 5462 if (info->linking) { 5463 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5464 upper_dev); 5465 } else { 5466 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5467 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5468 upper_dev); 5469 } 5470 } else if (netif_is_ovs_master(upper_dev)) { 5471 if (info->linking) 5472 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5473 else 5474 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5475 } else if (netif_is_macvlan(upper_dev)) { 5476 if (!info->linking) 5477 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5478 } else if (is_vlan_dev(upper_dev)) { 5479 struct net_device *br_dev; 5480 5481 if (!netif_is_bridge_port(upper_dev)) 5482 break; 5483 if (info->linking) 5484 break; 5485 br_dev = netdev_master_upper_dev_get(upper_dev); 5486 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5487 br_dev); 5488 } 5489 break; 5490 } 5491 5492 return err; 5493 } 5494 5495 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5496 unsigned long event, void *ptr) 5497 { 5498 struct netdev_notifier_changelowerstate_info *info; 5499 struct mlxsw_sp_port *mlxsw_sp_port; 5500 int err; 5501 5502 mlxsw_sp_port = netdev_priv(dev); 5503 info = ptr; 5504 5505 switch (event) { 5506 case NETDEV_CHANGELOWERSTATE: 5507 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5508 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5509 info->lower_state_info); 5510 if (err) 5511 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5512 } 5513 break; 5514 } 5515 5516 return 0; 5517 } 5518 5519 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5520 struct net_device *port_dev, 5521 unsigned long event, void *ptr) 5522 { 5523 switch (event) { 5524 case NETDEV_PRECHANGEUPPER: 5525 case NETDEV_CHANGEUPPER: 5526 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5527 event, ptr); 5528 case NETDEV_CHANGELOWERSTATE: 5529 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5530 ptr); 5531 } 5532 5533 return 0; 5534 } 5535 5536 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5537 unsigned long event, void *ptr) 5538 { 5539 struct net_device *dev; 5540 struct list_head *iter; 5541 int ret; 5542 5543 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5544 if (mlxsw_sp_port_dev_check(dev)) { 5545 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5546 ptr); 5547 if (ret) 5548 return ret; 5549 } 5550 } 5551 5552 return 0; 5553 } 5554 5555 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5556 struct net_device *dev, 5557 unsigned long event, void *ptr, 5558 u16 vid) 5559 { 5560 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5562 struct netdev_notifier_changeupper_info *info = ptr; 5563 struct netlink_ext_ack *extack; 5564 struct net_device *upper_dev; 5565 int err = 0; 5566 5567 extack = netdev_notifier_info_to_extack(&info->info); 5568 5569 switch (event) { 5570 case NETDEV_PRECHANGEUPPER: 5571 upper_dev = info->upper_dev; 5572 if (!netif_is_bridge_master(upper_dev) && 5573 !netif_is_macvlan(upper_dev)) { 5574 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5575 return -EINVAL; 5576 } 5577 if (!info->linking) 5578 break; 5579 if (netif_is_bridge_master(upper_dev) && 5580 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5581 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5582 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5583 return -EOPNOTSUPP; 5584 if (netdev_has_any_upper_dev(upper_dev) && 5585 (!netif_is_bridge_master(upper_dev) || 5586 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5587 upper_dev))) { 5588 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5589 return -EINVAL; 5590 } 5591 if (netif_is_macvlan(upper_dev) && 5592 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5593 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5594 return -EOPNOTSUPP; 5595 } 5596 break; 5597 case NETDEV_CHANGEUPPER: 5598 upper_dev = info->upper_dev; 5599 if (netif_is_bridge_master(upper_dev)) { 5600 if (info->linking) 5601 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5602 vlan_dev, 5603 upper_dev, 5604 extack); 5605 else 5606 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5607 vlan_dev, 5608 upper_dev); 5609 } else if (netif_is_macvlan(upper_dev)) { 5610 if (!info->linking) 5611 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5612 } else { 5613 err = -EINVAL; 5614 WARN_ON(1); 5615 } 5616 break; 5617 } 5618 5619 return err; 5620 } 5621 5622 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5623 struct net_device *lag_dev, 5624 unsigned long event, 5625 void *ptr, u16 vid) 5626 { 5627 struct net_device *dev; 5628 struct list_head *iter; 5629 int ret; 5630 5631 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5632 if (mlxsw_sp_port_dev_check(dev)) { 5633 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5634 event, ptr, 5635 vid); 5636 if (ret) 5637 return ret; 5638 } 5639 } 5640 5641 return 0; 5642 } 5643 5644 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 5645 struct net_device *br_dev, 5646 unsigned long event, void *ptr, 5647 u16 vid) 5648 { 5649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 5650 struct netdev_notifier_changeupper_info *info = ptr; 5651 struct netlink_ext_ack *extack; 5652 struct net_device *upper_dev; 5653 5654 if (!mlxsw_sp) 5655 return 0; 5656 5657 extack = netdev_notifier_info_to_extack(&info->info); 5658 5659 switch (event) { 5660 case NETDEV_PRECHANGEUPPER: 5661 upper_dev = info->upper_dev; 5662 if (!netif_is_macvlan(upper_dev)) { 5663 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5664 return -EOPNOTSUPP; 5665 } 5666 if (!info->linking) 5667 break; 5668 if (netif_is_macvlan(upper_dev) && 5669 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5670 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5671 return -EOPNOTSUPP; 5672 } 5673 break; 5674 case NETDEV_CHANGEUPPER: 5675 upper_dev = info->upper_dev; 5676 if (info->linking) 5677 break; 5678 if (netif_is_macvlan(upper_dev)) 5679 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5680 break; 5681 } 5682 5683 return 0; 5684 } 5685 5686 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 5687 unsigned long event, void *ptr) 5688 { 5689 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5690 u16 vid = vlan_dev_vlan_id(vlan_dev); 5691 5692 if (mlxsw_sp_port_dev_check(real_dev)) 5693 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5694 event, ptr, vid); 5695 else if (netif_is_lag_master(real_dev)) 5696 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5697 real_dev, event, 5698 ptr, vid); 5699 else if (netif_is_bridge_master(real_dev)) 5700 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 5701 event, ptr, vid); 5702 5703 return 0; 5704 } 5705 5706 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 5707 unsigned long event, void *ptr) 5708 { 5709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 5710 struct netdev_notifier_changeupper_info *info = ptr; 5711 struct netlink_ext_ack *extack; 5712 struct net_device *upper_dev; 5713 5714 if (!mlxsw_sp) 5715 return 0; 5716 5717 extack = netdev_notifier_info_to_extack(&info->info); 5718 5719 switch (event) { 5720 case NETDEV_PRECHANGEUPPER: 5721 upper_dev = info->upper_dev; 5722 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 5723 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5724 return -EOPNOTSUPP; 5725 } 5726 if (!info->linking) 5727 break; 5728 if (netif_is_macvlan(upper_dev) && 5729 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 5730 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5731 return -EOPNOTSUPP; 5732 } 5733 break; 5734 case NETDEV_CHANGEUPPER: 5735 upper_dev = info->upper_dev; 5736 if (info->linking) 5737 break; 5738 if (is_vlan_dev(upper_dev)) 5739 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5740 if (netif_is_macvlan(upper_dev)) 5741 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5742 break; 5743 } 5744 5745 return 0; 5746 } 5747 5748 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5749 unsigned long event, void *ptr) 5750 { 5751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5752 struct netdev_notifier_changeupper_info *info = ptr; 5753 struct netlink_ext_ack *extack; 5754 5755 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5756 return 0; 5757 5758 extack = netdev_notifier_info_to_extack(&info->info); 5759 5760 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 5761 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5762 5763 return -EOPNOTSUPP; 5764 } 5765 5766 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 5767 { 5768 struct netdev_notifier_changeupper_info *info = ptr; 5769 5770 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 5771 return false; 5772 return netif_is_l3_master(info->upper_dev); 5773 } 5774 5775 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5776 struct net_device *dev, 5777 unsigned long event, void *ptr) 5778 { 5779 struct netdev_notifier_changeupper_info *cu_info; 5780 struct netdev_notifier_info *info = ptr; 5781 struct netlink_ext_ack *extack; 5782 struct net_device *upper_dev; 5783 5784 extack = netdev_notifier_info_to_extack(info); 5785 5786 switch (event) { 5787 case NETDEV_CHANGEUPPER: 5788 cu_info = container_of(info, 5789 struct netdev_notifier_changeupper_info, 5790 info); 5791 upper_dev = cu_info->upper_dev; 5792 if (!netif_is_bridge_master(upper_dev)) 5793 return 0; 5794 if (!mlxsw_sp_lower_get(upper_dev)) 5795 return 0; 5796 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5797 return -EOPNOTSUPP; 5798 if (cu_info->linking) { 5799 if (!netif_running(dev)) 5800 return 0; 5801 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5802 * device needs to be mapped to a VLAN, but at this 5803 * point no VLANs are configured on the VxLAN device 5804 */ 5805 if (br_vlan_enabled(upper_dev)) 5806 return 0; 5807 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5808 dev, 0, extack); 5809 } else { 5810 /* VLANs were already flushed, which triggered the 5811 * necessary cleanup 5812 */ 5813 if (br_vlan_enabled(upper_dev)) 5814 return 0; 5815 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5816 } 5817 break; 5818 case NETDEV_PRE_UP: 5819 upper_dev = netdev_master_upper_dev_get(dev); 5820 if (!upper_dev) 5821 return 0; 5822 if (!netif_is_bridge_master(upper_dev)) 5823 return 0; 5824 if (!mlxsw_sp_lower_get(upper_dev)) 5825 return 0; 5826 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5827 extack); 5828 case NETDEV_DOWN: 5829 upper_dev = netdev_master_upper_dev_get(dev); 5830 if (!upper_dev) 5831 return 0; 5832 if (!netif_is_bridge_master(upper_dev)) 5833 return 0; 5834 if (!mlxsw_sp_lower_get(upper_dev)) 5835 return 0; 5836 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5837 break; 5838 } 5839 5840 return 0; 5841 } 5842 5843 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5844 unsigned long event, void *ptr) 5845 { 5846 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5847 struct mlxsw_sp_span_entry *span_entry; 5848 struct mlxsw_sp *mlxsw_sp; 5849 int err = 0; 5850 5851 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5852 if (event == NETDEV_UNREGISTER) { 5853 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5854 if (span_entry) 5855 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5856 } 5857 mlxsw_sp_span_respin(mlxsw_sp); 5858 5859 if (netif_is_vxlan(dev)) 5860 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5861 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 5862 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 5863 event, ptr); 5864 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 5865 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 5866 event, ptr); 5867 else if (event == NETDEV_PRE_CHANGEADDR || 5868 event == NETDEV_CHANGEADDR || 5869 event == NETDEV_CHANGEMTU) 5870 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 5871 else if (mlxsw_sp_is_vrf_event(event, ptr)) 5872 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 5873 else if (mlxsw_sp_port_dev_check(dev)) 5874 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5875 else if (netif_is_lag_master(dev)) 5876 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5877 else if (is_vlan_dev(dev)) 5878 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5879 else if (netif_is_bridge_master(dev)) 5880 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5881 else if (netif_is_macvlan(dev)) 5882 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5883 5884 return notifier_from_errno(err); 5885 } 5886 5887 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5888 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5889 }; 5890 5891 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5892 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5893 }; 5894 5895 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5896 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5897 {0, }, 5898 }; 5899 5900 static struct pci_driver mlxsw_sp1_pci_driver = { 5901 .name = mlxsw_sp1_driver_name, 5902 .id_table = mlxsw_sp1_pci_id_table, 5903 }; 5904 5905 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5906 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5907 {0, }, 5908 }; 5909 5910 static struct pci_driver mlxsw_sp2_pci_driver = { 5911 .name = mlxsw_sp2_driver_name, 5912 .id_table = mlxsw_sp2_pci_id_table, 5913 }; 5914 5915 static int __init mlxsw_sp_module_init(void) 5916 { 5917 int err; 5918 5919 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5920 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5921 5922 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5923 if (err) 5924 goto err_sp1_core_driver_register; 5925 5926 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5927 if (err) 5928 goto err_sp2_core_driver_register; 5929 5930 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5931 if (err) 5932 goto err_sp1_pci_driver_register; 5933 5934 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5935 if (err) 5936 goto err_sp2_pci_driver_register; 5937 5938 return 0; 5939 5940 err_sp2_pci_driver_register: 5941 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5942 err_sp1_pci_driver_register: 5943 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5944 err_sp2_core_driver_register: 5945 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5946 err_sp1_core_driver_register: 5947 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5948 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5949 return err; 5950 } 5951 5952 static void __exit mlxsw_sp_module_exit(void) 5953 { 5954 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5955 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5956 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5957 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5958 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5959 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5960 } 5961 5962 module_init(mlxsw_sp_module_init); 5963 module_exit(mlxsw_sp_module_exit); 5964 5965 MODULE_LICENSE("Dual BSD/GPL"); 5966 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5967 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5968 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5969 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5970 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5971