1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/random.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "../mlxfw/mlxfw.h" 44 45 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 1703 49 #define MLXSW_SP1_FWREV_SUBMINOR 4 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 65 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 66 static const char mlxsw_sp_driver_version[] = "1.0"; 67 68 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 69 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 70 }; 71 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 72 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 73 }; 74 75 /* tx_hdr_version 76 * Tx header version. 77 * Must be set to 1. 78 */ 79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 80 81 /* tx_hdr_ctl 82 * Packet control type. 83 * 0 - Ethernet control (e.g. EMADs, LACP) 84 * 1 - Ethernet data 85 */ 86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 87 88 /* tx_hdr_proto 89 * Packet protocol type. Must be set to 1 (Ethernet). 90 */ 91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 92 93 /* tx_hdr_rx_is_router 94 * Packet is sent from the router. Valid for data packets only. 95 */ 96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 97 98 /* tx_hdr_fid_valid 99 * Indicates if the 'fid' field is valid and should be used for 100 * forwarding lookup. Valid for data packets only. 101 */ 102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 103 104 /* tx_hdr_swid 105 * Switch partition ID. Must be set to 0. 106 */ 107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 108 109 /* tx_hdr_control_tclass 110 * Indicates if the packet should use the control TClass and not one 111 * of the data TClasses. 112 */ 113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 114 115 /* tx_hdr_etclass 116 * Egress TClass to be used on the egress device on the egress port. 117 */ 118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 119 120 /* tx_hdr_port_mid 121 * Destination local port for unicast packets. 122 * Destination multicast ID for multicast packets. 123 * 124 * Control packets are directed to a specific egress port, while data 125 * packets are transmitted through the CPU port (0) into the switch partition, 126 * where forwarding rules are applied. 127 */ 128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 129 130 /* tx_hdr_fid 131 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 132 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 133 * Valid for data packets only. 134 */ 135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 136 137 /* tx_hdr_type 138 * 0 - Data packets 139 * 6 - Control packets 140 */ 141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 142 143 struct mlxsw_sp_mlxfw_dev { 144 struct mlxfw_dev mlxfw_dev; 145 struct mlxsw_sp *mlxsw_sp; 146 }; 147 148 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 149 u16 component_index, u32 *p_max_size, 150 u8 *p_align_bits, u16 *p_max_write_size) 151 { 152 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 153 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 154 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 155 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 156 int err; 157 158 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 159 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 160 if (err) 161 return err; 162 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 163 p_max_write_size); 164 165 *p_align_bits = max_t(u8, *p_align_bits, 2); 166 *p_max_write_size = min_t(u16, *p_max_write_size, 167 MLXSW_REG_MCDA_MAX_DATA_LEN); 168 return 0; 169 } 170 171 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 172 { 173 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 174 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 176 char mcc_pl[MLXSW_REG_MCC_LEN]; 177 u8 control_state; 178 int err; 179 180 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 182 if (err) 183 return err; 184 185 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 186 if (control_state != MLXFW_FSM_STATE_IDLE) 187 return -EBUSY; 188 189 mlxsw_reg_mcc_pack(mcc_pl, 190 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 191 0, *fwhandle, 0); 192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 193 } 194 195 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 196 u32 fwhandle, u16 component_index, 197 u32 component_size) 198 { 199 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 200 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 201 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 202 char mcc_pl[MLXSW_REG_MCC_LEN]; 203 204 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 205 component_index, fwhandle, component_size); 206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 207 } 208 209 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 210 u32 fwhandle, u8 *data, u16 size, 211 u32 offset) 212 { 213 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 214 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 216 char mcda_pl[MLXSW_REG_MCDA_LEN]; 217 218 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 219 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 220 } 221 222 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 223 u32 fwhandle, u16 component_index) 224 { 225 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 226 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 228 char mcc_pl[MLXSW_REG_MCC_LEN]; 229 230 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 231 component_index, fwhandle, 0); 232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 233 } 234 235 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 236 { 237 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 238 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 239 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 240 char mcc_pl[MLXSW_REG_MCC_LEN]; 241 242 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 243 fwhandle, 0); 244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 245 } 246 247 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 248 enum mlxfw_fsm_state *fsm_state, 249 enum mlxfw_fsm_state_err *fsm_state_err) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 u8 control_state; 256 u8 error_code; 257 int err; 258 259 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 260 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 261 if (err) 262 return err; 263 264 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 265 *fsm_state = control_state; 266 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 267 MLXFW_FSM_STATE_ERR_MAX); 268 return 0; 269 } 270 271 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 272 { 273 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 274 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 276 char mcc_pl[MLXSW_REG_MCC_LEN]; 277 278 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 279 fwhandle, 0); 280 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 281 } 282 283 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 284 { 285 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 286 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 288 char mcc_pl[MLXSW_REG_MCC_LEN]; 289 290 mlxsw_reg_mcc_pack(mcc_pl, 291 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 292 fwhandle, 0); 293 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 294 } 295 296 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 297 .component_query = mlxsw_sp_component_query, 298 .fsm_lock = mlxsw_sp_fsm_lock, 299 .fsm_component_update = mlxsw_sp_fsm_component_update, 300 .fsm_block_download = mlxsw_sp_fsm_block_download, 301 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 302 .fsm_activate = mlxsw_sp_fsm_activate, 303 .fsm_query_state = mlxsw_sp_fsm_query_state, 304 .fsm_cancel = mlxsw_sp_fsm_cancel, 305 .fsm_release = mlxsw_sp_fsm_release 306 }; 307 308 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 309 const struct firmware *firmware) 310 { 311 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 312 .mlxfw_dev = { 313 .ops = &mlxsw_sp_mlxfw_dev_ops, 314 .psid = mlxsw_sp->bus_info->psid, 315 .psid_size = strlen(mlxsw_sp->bus_info->psid), 316 }, 317 .mlxsw_sp = mlxsw_sp 318 }; 319 320 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 321 } 322 323 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 324 { 325 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 326 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 327 const char *fw_filename = mlxsw_sp->fw_filename; 328 union devlink_param_value value; 329 const struct firmware *firmware; 330 int err; 331 332 /* Don't check if driver does not require it */ 333 if (!req_rev || !fw_filename) 334 return 0; 335 336 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 337 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 338 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 339 &value); 340 if (err) 341 return err; 342 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 343 return 0; 344 345 /* Validate driver & FW are compatible */ 346 if (rev->major != req_rev->major) { 347 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 348 rev->major, req_rev->major); 349 return -EINVAL; 350 } 351 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 352 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 353 (rev->minor > req_rev->minor || 354 (rev->minor == req_rev->minor && 355 rev->subminor >= req_rev->subminor))) 356 return 0; 357 358 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 359 rev->major, rev->minor, rev->subminor); 360 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 361 fw_filename); 362 363 err = request_firmware_direct(&firmware, fw_filename, 364 mlxsw_sp->bus_info->dev); 365 if (err) { 366 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 367 fw_filename); 368 return err; 369 } 370 371 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 372 release_firmware(firmware); 373 if (err) 374 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 375 376 /* On FW flash success, tell the caller FW reset is needed 377 * if current FW supports it. 378 */ 379 if (rev->minor >= req_rev->can_reset_minor) 380 return err ? err : -EAGAIN; 381 else 382 return 0; 383 } 384 385 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 386 unsigned int counter_index, u64 *packets, 387 u64 *bytes) 388 { 389 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 390 int err; 391 392 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 393 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 394 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 395 if (err) 396 return err; 397 if (packets) 398 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 399 if (bytes) 400 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 401 return 0; 402 } 403 404 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 405 unsigned int counter_index) 406 { 407 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 408 409 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 410 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 411 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 412 } 413 414 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 415 unsigned int *p_counter_index) 416 { 417 int err; 418 419 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 420 p_counter_index); 421 if (err) 422 return err; 423 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 424 if (err) 425 goto err_counter_clear; 426 return 0; 427 428 err_counter_clear: 429 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 430 *p_counter_index); 431 return err; 432 } 433 434 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 435 unsigned int counter_index) 436 { 437 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 438 counter_index); 439 } 440 441 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 442 const struct mlxsw_tx_info *tx_info) 443 { 444 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 445 446 memset(txhdr, 0, MLXSW_TXHDR_LEN); 447 448 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 449 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 450 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 451 mlxsw_tx_hdr_swid_set(txhdr, 0); 452 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 453 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 454 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 455 } 456 457 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 458 { 459 switch (state) { 460 case BR_STATE_FORWARDING: 461 return MLXSW_REG_SPMS_STATE_FORWARDING; 462 case BR_STATE_LEARNING: 463 return MLXSW_REG_SPMS_STATE_LEARNING; 464 case BR_STATE_LISTENING: /* fall-through */ 465 case BR_STATE_DISABLED: /* fall-through */ 466 case BR_STATE_BLOCKING: 467 return MLXSW_REG_SPMS_STATE_DISCARDING; 468 default: 469 BUG(); 470 } 471 } 472 473 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 474 u8 state) 475 { 476 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 477 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 478 char *spms_pl; 479 int err; 480 481 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 482 if (!spms_pl) 483 return -ENOMEM; 484 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 485 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 486 487 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 488 kfree(spms_pl); 489 return err; 490 } 491 492 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 493 { 494 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 495 int err; 496 497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 498 if (err) 499 return err; 500 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 501 return 0; 502 } 503 504 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 505 bool enable, u32 rate) 506 { 507 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 508 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 509 510 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 511 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 512 } 513 514 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 515 bool is_up) 516 { 517 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 518 char paos_pl[MLXSW_REG_PAOS_LEN]; 519 520 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 521 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 522 MLXSW_PORT_ADMIN_STATUS_DOWN); 523 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 524 } 525 526 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 527 unsigned char *addr) 528 { 529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 530 char ppad_pl[MLXSW_REG_PPAD_LEN]; 531 532 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 533 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 534 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 535 } 536 537 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 538 { 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 541 542 ether_addr_copy(addr, mlxsw_sp->base_mac); 543 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 544 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 545 } 546 547 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 548 { 549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 550 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 551 int max_mtu; 552 int err; 553 554 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 555 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 556 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 557 if (err) 558 return err; 559 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 560 561 if (mtu > max_mtu) 562 return -EINVAL; 563 564 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 566 } 567 568 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 569 { 570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 571 char pspa_pl[MLXSW_REG_PSPA_LEN]; 572 573 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 575 } 576 577 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 578 { 579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 580 char svpe_pl[MLXSW_REG_SVPE_LEN]; 581 582 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 584 } 585 586 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 587 bool learn_enable) 588 { 589 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 590 char *spvmlr_pl; 591 int err; 592 593 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 594 if (!spvmlr_pl) 595 return -ENOMEM; 596 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 597 learn_enable); 598 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 599 kfree(spvmlr_pl); 600 return err; 601 } 602 603 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 604 u16 vid) 605 { 606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 607 char spvid_pl[MLXSW_REG_SPVID_LEN]; 608 609 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 610 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 611 } 612 613 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 614 bool allow) 615 { 616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 617 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 618 619 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 621 } 622 623 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 624 { 625 int err; 626 627 if (!vid) { 628 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 629 if (err) 630 return err; 631 } else { 632 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 633 if (err) 634 return err; 635 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 636 if (err) 637 goto err_port_allow_untagged_set; 638 } 639 640 mlxsw_sp_port->pvid = vid; 641 return 0; 642 643 err_port_allow_untagged_set: 644 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 645 return err; 646 } 647 648 static int 649 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 650 { 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 char sspr_pl[MLXSW_REG_SSPR_LEN]; 653 654 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 655 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 656 } 657 658 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 659 u8 local_port, u8 *p_module, 660 u8 *p_width, u8 *p_lane) 661 { 662 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 663 int err; 664 665 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 666 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 667 if (err) 668 return err; 669 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 670 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 671 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 672 return 0; 673 } 674 675 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 676 u8 module, u8 width, u8 lane) 677 { 678 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 679 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 680 int i; 681 682 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 683 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 684 for (i = 0; i < width; i++) { 685 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 686 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 687 } 688 689 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 690 } 691 692 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 693 { 694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 695 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 696 697 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 698 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 699 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 700 } 701 702 static int mlxsw_sp_port_open(struct net_device *dev) 703 { 704 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 705 int err; 706 707 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 708 if (err) 709 return err; 710 netif_start_queue(dev); 711 return 0; 712 } 713 714 static int mlxsw_sp_port_stop(struct net_device *dev) 715 { 716 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 717 718 netif_stop_queue(dev); 719 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 720 } 721 722 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 723 struct net_device *dev) 724 { 725 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 727 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 728 const struct mlxsw_tx_info tx_info = { 729 .local_port = mlxsw_sp_port->local_port, 730 .is_emad = false, 731 }; 732 u64 len; 733 int err; 734 735 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 736 return NETDEV_TX_BUSY; 737 738 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 739 struct sk_buff *skb_orig = skb; 740 741 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 742 if (!skb) { 743 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 744 dev_kfree_skb_any(skb_orig); 745 return NETDEV_TX_OK; 746 } 747 dev_consume_skb_any(skb_orig); 748 } 749 750 if (eth_skb_pad(skb)) { 751 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 752 return NETDEV_TX_OK; 753 } 754 755 mlxsw_sp_txhdr_construct(skb, &tx_info); 756 /* TX header is consumed by HW on the way so we shouldn't count its 757 * bytes as being sent. 758 */ 759 len = skb->len - MLXSW_TXHDR_LEN; 760 761 /* Due to a race we might fail here because of a full queue. In that 762 * unlikely case we simply drop the packet. 763 */ 764 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 765 766 if (!err) { 767 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 768 u64_stats_update_begin(&pcpu_stats->syncp); 769 pcpu_stats->tx_packets++; 770 pcpu_stats->tx_bytes += len; 771 u64_stats_update_end(&pcpu_stats->syncp); 772 } else { 773 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 774 dev_kfree_skb_any(skb); 775 } 776 return NETDEV_TX_OK; 777 } 778 779 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 780 { 781 } 782 783 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 784 { 785 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 786 struct sockaddr *addr = p; 787 int err; 788 789 if (!is_valid_ether_addr(addr->sa_data)) 790 return -EADDRNOTAVAIL; 791 792 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 793 if (err) 794 return err; 795 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 796 return 0; 797 } 798 799 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 800 int mtu) 801 { 802 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 803 } 804 805 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 806 807 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 808 u16 delay) 809 { 810 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 811 BITS_PER_BYTE)); 812 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 813 mtu); 814 } 815 816 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 817 * Assumes 100m cable and maximum MTU. 818 */ 819 #define MLXSW_SP_PAUSE_DELAY 58752 820 821 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 822 u16 delay, bool pfc, bool pause) 823 { 824 if (pfc) 825 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 826 else if (pause) 827 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 828 else 829 return 0; 830 } 831 832 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 833 bool lossy) 834 { 835 if (lossy) 836 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 837 else 838 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 839 thres); 840 } 841 842 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 843 u8 *prio_tc, bool pause_en, 844 struct ieee_pfc *my_pfc) 845 { 846 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 847 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 848 u16 delay = !!my_pfc ? my_pfc->delay : 0; 849 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 850 int i, j, err; 851 852 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 853 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 854 if (err) 855 return err; 856 857 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 858 bool configure = false; 859 bool pfc = false; 860 bool lossy; 861 u16 thres; 862 863 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 864 if (prio_tc[j] == i) { 865 pfc = pfc_en & BIT(j); 866 configure = true; 867 break; 868 } 869 } 870 871 if (!configure) 872 continue; 873 874 lossy = !(pfc || pause_en); 875 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 876 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 877 pause_en); 878 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 879 } 880 881 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 882 } 883 884 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 885 int mtu, bool pause_en) 886 { 887 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 888 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 889 struct ieee_pfc *my_pfc; 890 u8 *prio_tc; 891 892 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 893 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 894 895 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 896 pause_en, my_pfc); 897 } 898 899 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 900 { 901 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 902 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 903 int err; 904 905 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 906 if (err) 907 return err; 908 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 909 if (err) 910 goto err_span_port_mtu_update; 911 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 912 if (err) 913 goto err_port_mtu_set; 914 dev->mtu = mtu; 915 return 0; 916 917 err_port_mtu_set: 918 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 919 err_span_port_mtu_update: 920 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 921 return err; 922 } 923 924 static int 925 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 926 struct rtnl_link_stats64 *stats) 927 { 928 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 929 struct mlxsw_sp_port_pcpu_stats *p; 930 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 931 u32 tx_dropped = 0; 932 unsigned int start; 933 int i; 934 935 for_each_possible_cpu(i) { 936 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 937 do { 938 start = u64_stats_fetch_begin_irq(&p->syncp); 939 rx_packets = p->rx_packets; 940 rx_bytes = p->rx_bytes; 941 tx_packets = p->tx_packets; 942 tx_bytes = p->tx_bytes; 943 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 944 945 stats->rx_packets += rx_packets; 946 stats->rx_bytes += rx_bytes; 947 stats->tx_packets += tx_packets; 948 stats->tx_bytes += tx_bytes; 949 /* tx_dropped is u32, updated without syncp protection. */ 950 tx_dropped += p->tx_dropped; 951 } 952 stats->tx_dropped = tx_dropped; 953 return 0; 954 } 955 956 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 957 { 958 switch (attr_id) { 959 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 960 return true; 961 } 962 963 return false; 964 } 965 966 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 967 void *sp) 968 { 969 switch (attr_id) { 970 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 971 return mlxsw_sp_port_get_sw_stats64(dev, sp); 972 } 973 974 return -EINVAL; 975 } 976 977 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 978 int prio, char *ppcnt_pl) 979 { 980 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 982 983 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 984 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 985 } 986 987 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 988 struct rtnl_link_stats64 *stats) 989 { 990 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 991 int err; 992 993 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 994 0, ppcnt_pl); 995 if (err) 996 goto out; 997 998 stats->tx_packets = 999 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1000 stats->rx_packets = 1001 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1002 stats->tx_bytes = 1003 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1004 stats->rx_bytes = 1005 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1006 stats->multicast = 1007 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1008 1009 stats->rx_crc_errors = 1010 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1011 stats->rx_frame_errors = 1012 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1013 1014 stats->rx_length_errors = ( 1015 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1016 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1017 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1018 1019 stats->rx_errors = (stats->rx_crc_errors + 1020 stats->rx_frame_errors + stats->rx_length_errors); 1021 1022 out: 1023 return err; 1024 } 1025 1026 static void 1027 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1028 struct mlxsw_sp_port_xstats *xstats) 1029 { 1030 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1031 int err, i; 1032 1033 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1034 ppcnt_pl); 1035 if (!err) 1036 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1037 1038 for (i = 0; i < TC_MAX_QUEUE; i++) { 1039 err = mlxsw_sp_port_get_stats_raw(dev, 1040 MLXSW_REG_PPCNT_TC_CONG_TC, 1041 i, ppcnt_pl); 1042 if (!err) 1043 xstats->wred_drop[i] = 1044 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1045 1046 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1047 i, ppcnt_pl); 1048 if (err) 1049 continue; 1050 1051 xstats->backlog[i] = 1052 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1053 xstats->tail_drop[i] = 1054 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1055 } 1056 1057 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1058 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1059 i, ppcnt_pl); 1060 if (err) 1061 continue; 1062 1063 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1064 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1065 } 1066 } 1067 1068 static void update_stats_cache(struct work_struct *work) 1069 { 1070 struct mlxsw_sp_port *mlxsw_sp_port = 1071 container_of(work, struct mlxsw_sp_port, 1072 periodic_hw_stats.update_dw.work); 1073 1074 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1075 goto out; 1076 1077 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1078 &mlxsw_sp_port->periodic_hw_stats.stats); 1079 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1080 &mlxsw_sp_port->periodic_hw_stats.xstats); 1081 1082 out: 1083 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1084 MLXSW_HW_STATS_UPDATE_TIME); 1085 } 1086 1087 /* Return the stats from a cache that is updated periodically, 1088 * as this function might get called in an atomic context. 1089 */ 1090 static void 1091 mlxsw_sp_port_get_stats64(struct net_device *dev, 1092 struct rtnl_link_stats64 *stats) 1093 { 1094 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1095 1096 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1097 } 1098 1099 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1100 u16 vid_begin, u16 vid_end, 1101 bool is_member, bool untagged) 1102 { 1103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1104 char *spvm_pl; 1105 int err; 1106 1107 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1108 if (!spvm_pl) 1109 return -ENOMEM; 1110 1111 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1112 vid_end, is_member, untagged); 1113 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1114 kfree(spvm_pl); 1115 return err; 1116 } 1117 1118 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1119 u16 vid_end, bool is_member, bool untagged) 1120 { 1121 u16 vid, vid_e; 1122 int err; 1123 1124 for (vid = vid_begin; vid <= vid_end; 1125 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1126 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1127 vid_end); 1128 1129 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1130 is_member, untagged); 1131 if (err) 1132 return err; 1133 } 1134 1135 return 0; 1136 } 1137 1138 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1139 { 1140 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1141 1142 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1143 &mlxsw_sp_port->vlans_list, list) 1144 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1145 } 1146 1147 static struct mlxsw_sp_port_vlan * 1148 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1149 { 1150 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1151 bool untagged = vid == 1; 1152 int err; 1153 1154 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1155 if (err) 1156 return ERR_PTR(err); 1157 1158 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1159 if (!mlxsw_sp_port_vlan) { 1160 err = -ENOMEM; 1161 goto err_port_vlan_alloc; 1162 } 1163 1164 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1165 mlxsw_sp_port_vlan->ref_count = 1; 1166 mlxsw_sp_port_vlan->vid = vid; 1167 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1168 1169 return mlxsw_sp_port_vlan; 1170 1171 err_port_vlan_alloc: 1172 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1173 return ERR_PTR(err); 1174 } 1175 1176 static void 1177 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1178 { 1179 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1180 u16 vid = mlxsw_sp_port_vlan->vid; 1181 1182 list_del(&mlxsw_sp_port_vlan->list); 1183 kfree(mlxsw_sp_port_vlan); 1184 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1185 } 1186 1187 struct mlxsw_sp_port_vlan * 1188 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1189 { 1190 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1191 1192 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1193 if (mlxsw_sp_port_vlan) { 1194 mlxsw_sp_port_vlan->ref_count++; 1195 return mlxsw_sp_port_vlan; 1196 } 1197 1198 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1199 } 1200 1201 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1202 { 1203 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1204 1205 if (--mlxsw_sp_port_vlan->ref_count != 0) 1206 return; 1207 1208 if (mlxsw_sp_port_vlan->bridge_port) 1209 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1210 else if (fid) 1211 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1212 1213 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1214 } 1215 1216 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1217 __be16 __always_unused proto, u16 vid) 1218 { 1219 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1220 1221 /* VLAN 0 is added to HW filter when device goes up, but it is 1222 * reserved in our case, so simply return. 1223 */ 1224 if (!vid) 1225 return 0; 1226 1227 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1228 } 1229 1230 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1231 __be16 __always_unused proto, u16 vid) 1232 { 1233 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1234 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1235 1236 /* VLAN 0 is removed from HW filter when device goes down, but 1237 * it is reserved in our case, so simply return. 1238 */ 1239 if (!vid) 1240 return 0; 1241 1242 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1243 if (!mlxsw_sp_port_vlan) 1244 return 0; 1245 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1246 1247 return 0; 1248 } 1249 1250 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1251 size_t len) 1252 { 1253 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1254 1255 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, 1256 mlxsw_sp_port->local_port, 1257 name, len); 1258 } 1259 1260 static struct mlxsw_sp_port_mall_tc_entry * 1261 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1262 unsigned long cookie) { 1263 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1264 1265 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1266 if (mall_tc_entry->cookie == cookie) 1267 return mall_tc_entry; 1268 1269 return NULL; 1270 } 1271 1272 static int 1273 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1274 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1275 const struct tc_action *a, 1276 bool ingress) 1277 { 1278 enum mlxsw_sp_span_type span_type; 1279 struct net_device *to_dev; 1280 1281 to_dev = tcf_mirred_dev(a); 1282 if (!to_dev) { 1283 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1284 return -EINVAL; 1285 } 1286 1287 mirror->ingress = ingress; 1288 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1289 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1290 true, &mirror->span_id); 1291 } 1292 1293 static void 1294 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1295 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1296 { 1297 enum mlxsw_sp_span_type span_type; 1298 1299 span_type = mirror->ingress ? 1300 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1301 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1302 span_type, true); 1303 } 1304 1305 static int 1306 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1307 struct tc_cls_matchall_offload *cls, 1308 const struct tc_action *a, 1309 bool ingress) 1310 { 1311 int err; 1312 1313 if (!mlxsw_sp_port->sample) 1314 return -EOPNOTSUPP; 1315 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1316 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1317 return -EEXIST; 1318 } 1319 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1320 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1321 return -EOPNOTSUPP; 1322 } 1323 1324 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1325 tcf_sample_psample_group(a)); 1326 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1327 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1328 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1329 1330 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1331 if (err) 1332 goto err_port_sample_set; 1333 return 0; 1334 1335 err_port_sample_set: 1336 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1337 return err; 1338 } 1339 1340 static void 1341 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1342 { 1343 if (!mlxsw_sp_port->sample) 1344 return; 1345 1346 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1347 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1348 } 1349 1350 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1351 struct tc_cls_matchall_offload *f, 1352 bool ingress) 1353 { 1354 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1355 __be16 protocol = f->common.protocol; 1356 const struct tc_action *a; 1357 LIST_HEAD(actions); 1358 int err; 1359 1360 if (!tcf_exts_has_one_action(f->exts)) { 1361 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1362 return -EOPNOTSUPP; 1363 } 1364 1365 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1366 if (!mall_tc_entry) 1367 return -ENOMEM; 1368 mall_tc_entry->cookie = f->cookie; 1369 1370 a = tcf_exts_first_action(f->exts); 1371 1372 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1373 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1374 1375 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1376 mirror = &mall_tc_entry->mirror; 1377 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1378 mirror, a, ingress); 1379 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1380 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1381 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1382 a, ingress); 1383 } else { 1384 err = -EOPNOTSUPP; 1385 } 1386 1387 if (err) 1388 goto err_add_action; 1389 1390 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1391 return 0; 1392 1393 err_add_action: 1394 kfree(mall_tc_entry); 1395 return err; 1396 } 1397 1398 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1399 struct tc_cls_matchall_offload *f) 1400 { 1401 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1402 1403 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1404 f->cookie); 1405 if (!mall_tc_entry) { 1406 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1407 return; 1408 } 1409 list_del(&mall_tc_entry->list); 1410 1411 switch (mall_tc_entry->type) { 1412 case MLXSW_SP_PORT_MALL_MIRROR: 1413 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1414 &mall_tc_entry->mirror); 1415 break; 1416 case MLXSW_SP_PORT_MALL_SAMPLE: 1417 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1418 break; 1419 default: 1420 WARN_ON(1); 1421 } 1422 1423 kfree(mall_tc_entry); 1424 } 1425 1426 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1427 struct tc_cls_matchall_offload *f, 1428 bool ingress) 1429 { 1430 switch (f->command) { 1431 case TC_CLSMATCHALL_REPLACE: 1432 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1433 ingress); 1434 case TC_CLSMATCHALL_DESTROY: 1435 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1436 return 0; 1437 default: 1438 return -EOPNOTSUPP; 1439 } 1440 } 1441 1442 static int 1443 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1444 struct tc_cls_flower_offload *f) 1445 { 1446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1447 1448 switch (f->command) { 1449 case TC_CLSFLOWER_REPLACE: 1450 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1451 case TC_CLSFLOWER_DESTROY: 1452 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1453 return 0; 1454 case TC_CLSFLOWER_STATS: 1455 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1456 case TC_CLSFLOWER_TMPLT_CREATE: 1457 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1458 case TC_CLSFLOWER_TMPLT_DESTROY: 1459 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1460 return 0; 1461 default: 1462 return -EOPNOTSUPP; 1463 } 1464 } 1465 1466 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1467 void *type_data, 1468 void *cb_priv, bool ingress) 1469 { 1470 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1471 1472 switch (type) { 1473 case TC_SETUP_CLSMATCHALL: 1474 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1475 type_data)) 1476 return -EOPNOTSUPP; 1477 1478 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1479 ingress); 1480 case TC_SETUP_CLSFLOWER: 1481 return 0; 1482 default: 1483 return -EOPNOTSUPP; 1484 } 1485 } 1486 1487 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1488 void *type_data, 1489 void *cb_priv) 1490 { 1491 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1492 cb_priv, true); 1493 } 1494 1495 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1496 void *type_data, 1497 void *cb_priv) 1498 { 1499 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1500 cb_priv, false); 1501 } 1502 1503 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1504 void *type_data, void *cb_priv) 1505 { 1506 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1507 1508 switch (type) { 1509 case TC_SETUP_CLSMATCHALL: 1510 return 0; 1511 case TC_SETUP_CLSFLOWER: 1512 if (mlxsw_sp_acl_block_disabled(acl_block)) 1513 return -EOPNOTSUPP; 1514 1515 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1516 default: 1517 return -EOPNOTSUPP; 1518 } 1519 } 1520 1521 static int 1522 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1523 struct tcf_block *block, bool ingress, 1524 struct netlink_ext_ack *extack) 1525 { 1526 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1527 struct mlxsw_sp_acl_block *acl_block; 1528 struct tcf_block_cb *block_cb; 1529 int err; 1530 1531 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1532 mlxsw_sp); 1533 if (!block_cb) { 1534 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1535 if (!acl_block) 1536 return -ENOMEM; 1537 block_cb = __tcf_block_cb_register(block, 1538 mlxsw_sp_setup_tc_block_cb_flower, 1539 mlxsw_sp, acl_block, extack); 1540 if (IS_ERR(block_cb)) { 1541 err = PTR_ERR(block_cb); 1542 goto err_cb_register; 1543 } 1544 } else { 1545 acl_block = tcf_block_cb_priv(block_cb); 1546 } 1547 tcf_block_cb_incref(block_cb); 1548 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1549 mlxsw_sp_port, ingress); 1550 if (err) 1551 goto err_block_bind; 1552 1553 if (ingress) 1554 mlxsw_sp_port->ing_acl_block = acl_block; 1555 else 1556 mlxsw_sp_port->eg_acl_block = acl_block; 1557 1558 return 0; 1559 1560 err_block_bind: 1561 if (!tcf_block_cb_decref(block_cb)) { 1562 __tcf_block_cb_unregister(block, block_cb); 1563 err_cb_register: 1564 mlxsw_sp_acl_block_destroy(acl_block); 1565 } 1566 return err; 1567 } 1568 1569 static void 1570 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1571 struct tcf_block *block, bool ingress) 1572 { 1573 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1574 struct mlxsw_sp_acl_block *acl_block; 1575 struct tcf_block_cb *block_cb; 1576 int err; 1577 1578 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1579 mlxsw_sp); 1580 if (!block_cb) 1581 return; 1582 1583 if (ingress) 1584 mlxsw_sp_port->ing_acl_block = NULL; 1585 else 1586 mlxsw_sp_port->eg_acl_block = NULL; 1587 1588 acl_block = tcf_block_cb_priv(block_cb); 1589 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1590 mlxsw_sp_port, ingress); 1591 if (!err && !tcf_block_cb_decref(block_cb)) { 1592 __tcf_block_cb_unregister(block, block_cb); 1593 mlxsw_sp_acl_block_destroy(acl_block); 1594 } 1595 } 1596 1597 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1598 struct tc_block_offload *f) 1599 { 1600 tc_setup_cb_t *cb; 1601 bool ingress; 1602 int err; 1603 1604 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1605 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1606 ingress = true; 1607 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1608 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1609 ingress = false; 1610 } else { 1611 return -EOPNOTSUPP; 1612 } 1613 1614 switch (f->command) { 1615 case TC_BLOCK_BIND: 1616 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1617 mlxsw_sp_port, f->extack); 1618 if (err) 1619 return err; 1620 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1621 f->block, ingress, 1622 f->extack); 1623 if (err) { 1624 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1625 return err; 1626 } 1627 return 0; 1628 case TC_BLOCK_UNBIND: 1629 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1630 f->block, ingress); 1631 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1632 return 0; 1633 default: 1634 return -EOPNOTSUPP; 1635 } 1636 } 1637 1638 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1639 void *type_data) 1640 { 1641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1642 1643 switch (type) { 1644 case TC_SETUP_BLOCK: 1645 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1646 case TC_SETUP_QDISC_RED: 1647 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1648 case TC_SETUP_QDISC_PRIO: 1649 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1650 default: 1651 return -EOPNOTSUPP; 1652 } 1653 } 1654 1655 1656 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1657 { 1658 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1659 1660 if (!enable) { 1661 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1662 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1663 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1664 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1665 return -EINVAL; 1666 } 1667 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1668 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1669 } else { 1670 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1671 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1672 } 1673 return 0; 1674 } 1675 1676 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1677 1678 static int mlxsw_sp_handle_feature(struct net_device *dev, 1679 netdev_features_t wanted_features, 1680 netdev_features_t feature, 1681 mlxsw_sp_feature_handler feature_handler) 1682 { 1683 netdev_features_t changes = wanted_features ^ dev->features; 1684 bool enable = !!(wanted_features & feature); 1685 int err; 1686 1687 if (!(changes & feature)) 1688 return 0; 1689 1690 err = feature_handler(dev, enable); 1691 if (err) { 1692 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1693 enable ? "Enable" : "Disable", &feature, err); 1694 return err; 1695 } 1696 1697 if (enable) 1698 dev->features |= feature; 1699 else 1700 dev->features &= ~feature; 1701 1702 return 0; 1703 } 1704 static int mlxsw_sp_set_features(struct net_device *dev, 1705 netdev_features_t features) 1706 { 1707 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1708 mlxsw_sp_feature_hw_tc); 1709 } 1710 1711 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1712 .ndo_open = mlxsw_sp_port_open, 1713 .ndo_stop = mlxsw_sp_port_stop, 1714 .ndo_start_xmit = mlxsw_sp_port_xmit, 1715 .ndo_setup_tc = mlxsw_sp_setup_tc, 1716 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1717 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1718 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1719 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1720 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1721 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1722 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1723 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1724 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1725 .ndo_set_features = mlxsw_sp_set_features, 1726 }; 1727 1728 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1729 struct ethtool_drvinfo *drvinfo) 1730 { 1731 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1733 1734 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1735 sizeof(drvinfo->driver)); 1736 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1737 sizeof(drvinfo->version)); 1738 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1739 "%d.%d.%d", 1740 mlxsw_sp->bus_info->fw_rev.major, 1741 mlxsw_sp->bus_info->fw_rev.minor, 1742 mlxsw_sp->bus_info->fw_rev.subminor); 1743 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1744 sizeof(drvinfo->bus_info)); 1745 } 1746 1747 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1748 struct ethtool_pauseparam *pause) 1749 { 1750 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1751 1752 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1753 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1754 } 1755 1756 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1757 struct ethtool_pauseparam *pause) 1758 { 1759 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1760 1761 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1762 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1763 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1764 1765 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1766 pfcc_pl); 1767 } 1768 1769 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1770 struct ethtool_pauseparam *pause) 1771 { 1772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1773 bool pause_en = pause->tx_pause || pause->rx_pause; 1774 int err; 1775 1776 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1777 netdev_err(dev, "PFC already enabled on port\n"); 1778 return -EINVAL; 1779 } 1780 1781 if (pause->autoneg) { 1782 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1783 return -EINVAL; 1784 } 1785 1786 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1787 if (err) { 1788 netdev_err(dev, "Failed to configure port's headroom\n"); 1789 return err; 1790 } 1791 1792 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1793 if (err) { 1794 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1795 goto err_port_pause_configure; 1796 } 1797 1798 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1799 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1800 1801 return 0; 1802 1803 err_port_pause_configure: 1804 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1805 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1806 return err; 1807 } 1808 1809 struct mlxsw_sp_port_hw_stats { 1810 char str[ETH_GSTRING_LEN]; 1811 u64 (*getter)(const char *payload); 1812 bool cells_bytes; 1813 }; 1814 1815 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1816 { 1817 .str = "a_frames_transmitted_ok", 1818 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1819 }, 1820 { 1821 .str = "a_frames_received_ok", 1822 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1823 }, 1824 { 1825 .str = "a_frame_check_sequence_errors", 1826 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1827 }, 1828 { 1829 .str = "a_alignment_errors", 1830 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1831 }, 1832 { 1833 .str = "a_octets_transmitted_ok", 1834 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1835 }, 1836 { 1837 .str = "a_octets_received_ok", 1838 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1839 }, 1840 { 1841 .str = "a_multicast_frames_xmitted_ok", 1842 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1843 }, 1844 { 1845 .str = "a_broadcast_frames_xmitted_ok", 1846 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1847 }, 1848 { 1849 .str = "a_multicast_frames_received_ok", 1850 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1851 }, 1852 { 1853 .str = "a_broadcast_frames_received_ok", 1854 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1855 }, 1856 { 1857 .str = "a_in_range_length_errors", 1858 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1859 }, 1860 { 1861 .str = "a_out_of_range_length_field", 1862 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1863 }, 1864 { 1865 .str = "a_frame_too_long_errors", 1866 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1867 }, 1868 { 1869 .str = "a_symbol_error_during_carrier", 1870 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1871 }, 1872 { 1873 .str = "a_mac_control_frames_transmitted", 1874 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1875 }, 1876 { 1877 .str = "a_mac_control_frames_received", 1878 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1879 }, 1880 { 1881 .str = "a_unsupported_opcodes_received", 1882 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1883 }, 1884 { 1885 .str = "a_pause_mac_ctrl_frames_received", 1886 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1887 }, 1888 { 1889 .str = "a_pause_mac_ctrl_frames_xmitted", 1890 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1891 }, 1892 }; 1893 1894 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1895 1896 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1897 { 1898 .str = "if_in_discards", 1899 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1900 }, 1901 { 1902 .str = "if_out_discards", 1903 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1904 }, 1905 { 1906 .str = "if_out_errors", 1907 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1908 }, 1909 }; 1910 1911 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1912 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1913 1914 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1915 { 1916 .str = "ether_stats_undersize_pkts", 1917 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1918 }, 1919 { 1920 .str = "ether_stats_oversize_pkts", 1921 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1922 }, 1923 { 1924 .str = "ether_stats_fragments", 1925 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 1926 }, 1927 { 1928 .str = "ether_pkts64octets", 1929 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1930 }, 1931 { 1932 .str = "ether_pkts65to127octets", 1933 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1934 }, 1935 { 1936 .str = "ether_pkts128to255octets", 1937 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1938 }, 1939 { 1940 .str = "ether_pkts256to511octets", 1941 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1942 }, 1943 { 1944 .str = "ether_pkts512to1023octets", 1945 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1946 }, 1947 { 1948 .str = "ether_pkts1024to1518octets", 1949 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1950 }, 1951 { 1952 .str = "ether_pkts1519to2047octets", 1953 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1954 }, 1955 { 1956 .str = "ether_pkts2048to4095octets", 1957 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1958 }, 1959 { 1960 .str = "ether_pkts4096to8191octets", 1961 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1962 }, 1963 { 1964 .str = "ether_pkts8192to10239octets", 1965 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1966 }, 1967 }; 1968 1969 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1970 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1971 1972 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 1973 { 1974 .str = "dot3stats_fcs_errors", 1975 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 1976 }, 1977 { 1978 .str = "dot3stats_symbol_errors", 1979 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 1980 }, 1981 { 1982 .str = "dot3control_in_unknown_opcodes", 1983 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 1984 }, 1985 { 1986 .str = "dot3in_pause_frames", 1987 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 1988 }, 1989 }; 1990 1991 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 1992 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 1993 1994 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 1995 { 1996 .str = "discard_ingress_general", 1997 .getter = mlxsw_reg_ppcnt_ingress_general_get, 1998 }, 1999 { 2000 .str = "discard_ingress_policy_engine", 2001 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2002 }, 2003 { 2004 .str = "discard_ingress_vlan_membership", 2005 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2006 }, 2007 { 2008 .str = "discard_ingress_tag_frame_type", 2009 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2010 }, 2011 { 2012 .str = "discard_egress_vlan_membership", 2013 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2014 }, 2015 { 2016 .str = "discard_loopback_filter", 2017 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2018 }, 2019 { 2020 .str = "discard_egress_general", 2021 .getter = mlxsw_reg_ppcnt_egress_general_get, 2022 }, 2023 { 2024 .str = "discard_egress_hoq", 2025 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2026 }, 2027 { 2028 .str = "discard_egress_policy_engine", 2029 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2030 }, 2031 { 2032 .str = "discard_ingress_tx_link_down", 2033 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2034 }, 2035 { 2036 .str = "discard_egress_stp_filter", 2037 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2038 }, 2039 { 2040 .str = "discard_egress_sll", 2041 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2042 }, 2043 }; 2044 2045 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2046 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2047 2048 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2049 { 2050 .str = "rx_octets_prio", 2051 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2052 }, 2053 { 2054 .str = "rx_frames_prio", 2055 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2056 }, 2057 { 2058 .str = "tx_octets_prio", 2059 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2060 }, 2061 { 2062 .str = "tx_frames_prio", 2063 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2064 }, 2065 { 2066 .str = "rx_pause_prio", 2067 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2068 }, 2069 { 2070 .str = "rx_pause_duration_prio", 2071 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2072 }, 2073 { 2074 .str = "tx_pause_prio", 2075 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2076 }, 2077 { 2078 .str = "tx_pause_duration_prio", 2079 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2080 }, 2081 }; 2082 2083 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2084 2085 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2086 { 2087 .str = "tc_transmit_queue_tc", 2088 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2089 .cells_bytes = true, 2090 }, 2091 { 2092 .str = "tc_no_buffer_discard_uc_tc", 2093 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2094 }, 2095 }; 2096 2097 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2098 2099 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2100 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2101 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2102 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2103 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2104 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2105 IEEE_8021QAZ_MAX_TCS) + \ 2106 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2107 TC_MAX_QUEUE)) 2108 2109 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2110 { 2111 int i; 2112 2113 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2114 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2115 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2116 *p += ETH_GSTRING_LEN; 2117 } 2118 } 2119 2120 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2121 { 2122 int i; 2123 2124 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2125 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 2126 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2127 *p += ETH_GSTRING_LEN; 2128 } 2129 } 2130 2131 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2132 u32 stringset, u8 *data) 2133 { 2134 u8 *p = data; 2135 int i; 2136 2137 switch (stringset) { 2138 case ETH_SS_STATS: 2139 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2140 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2141 ETH_GSTRING_LEN); 2142 p += ETH_GSTRING_LEN; 2143 } 2144 2145 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2146 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2147 ETH_GSTRING_LEN); 2148 p += ETH_GSTRING_LEN; 2149 } 2150 2151 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2152 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2153 ETH_GSTRING_LEN); 2154 p += ETH_GSTRING_LEN; 2155 } 2156 2157 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2158 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2159 ETH_GSTRING_LEN); 2160 p += ETH_GSTRING_LEN; 2161 } 2162 2163 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2164 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2165 ETH_GSTRING_LEN); 2166 p += ETH_GSTRING_LEN; 2167 } 2168 2169 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2170 mlxsw_sp_port_get_prio_strings(&p, i); 2171 2172 for (i = 0; i < TC_MAX_QUEUE; i++) 2173 mlxsw_sp_port_get_tc_strings(&p, i); 2174 2175 break; 2176 } 2177 } 2178 2179 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2180 enum ethtool_phys_id_state state) 2181 { 2182 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2183 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2184 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2185 bool active; 2186 2187 switch (state) { 2188 case ETHTOOL_ID_ACTIVE: 2189 active = true; 2190 break; 2191 case ETHTOOL_ID_INACTIVE: 2192 active = false; 2193 break; 2194 default: 2195 return -EOPNOTSUPP; 2196 } 2197 2198 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2200 } 2201 2202 static int 2203 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2204 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2205 { 2206 switch (grp) { 2207 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2208 *p_hw_stats = mlxsw_sp_port_hw_stats; 2209 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2210 break; 2211 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2212 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2213 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2214 break; 2215 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2216 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2217 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2218 break; 2219 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2220 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2221 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2222 break; 2223 case MLXSW_REG_PPCNT_DISCARD_CNT: 2224 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2225 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2226 break; 2227 case MLXSW_REG_PPCNT_PRIO_CNT: 2228 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2229 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2230 break; 2231 case MLXSW_REG_PPCNT_TC_CNT: 2232 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2233 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2234 break; 2235 default: 2236 WARN_ON(1); 2237 return -EOPNOTSUPP; 2238 } 2239 return 0; 2240 } 2241 2242 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2243 enum mlxsw_reg_ppcnt_grp grp, int prio, 2244 u64 *data, int data_index) 2245 { 2246 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2247 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2248 struct mlxsw_sp_port_hw_stats *hw_stats; 2249 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2250 int i, len; 2251 int err; 2252 2253 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2254 if (err) 2255 return; 2256 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2257 for (i = 0; i < len; i++) { 2258 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2259 if (!hw_stats[i].cells_bytes) 2260 continue; 2261 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2262 data[data_index + i]); 2263 } 2264 } 2265 2266 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2267 struct ethtool_stats *stats, u64 *data) 2268 { 2269 int i, data_index = 0; 2270 2271 /* IEEE 802.3 Counters */ 2272 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2273 data, data_index); 2274 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2275 2276 /* RFC 2863 Counters */ 2277 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2278 data, data_index); 2279 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2280 2281 /* RFC 2819 Counters */ 2282 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2283 data, data_index); 2284 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2285 2286 /* RFC 3635 Counters */ 2287 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2288 data, data_index); 2289 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2290 2291 /* Discard Counters */ 2292 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2293 data, data_index); 2294 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2295 2296 /* Per-Priority Counters */ 2297 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2298 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2299 data, data_index); 2300 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2301 } 2302 2303 /* Per-TC Counters */ 2304 for (i = 0; i < TC_MAX_QUEUE; i++) { 2305 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2306 data, data_index); 2307 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2308 } 2309 } 2310 2311 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2312 { 2313 switch (sset) { 2314 case ETH_SS_STATS: 2315 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2316 default: 2317 return -EOPNOTSUPP; 2318 } 2319 } 2320 2321 struct mlxsw_sp_port_link_mode { 2322 enum ethtool_link_mode_bit_indices mask_ethtool; 2323 u32 mask; 2324 u32 speed; 2325 }; 2326 2327 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2328 { 2329 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2330 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2331 .speed = SPEED_100, 2332 }, 2333 { 2334 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2335 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2336 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2337 .speed = SPEED_1000, 2338 }, 2339 { 2340 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2341 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2342 .speed = SPEED_10000, 2343 }, 2344 { 2345 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2346 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2347 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2348 .speed = SPEED_10000, 2349 }, 2350 { 2351 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2352 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2353 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2354 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2355 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2356 .speed = SPEED_10000, 2357 }, 2358 { 2359 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2360 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2361 .speed = SPEED_20000, 2362 }, 2363 { 2364 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2365 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2366 .speed = SPEED_40000, 2367 }, 2368 { 2369 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2370 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2371 .speed = SPEED_40000, 2372 }, 2373 { 2374 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2375 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2376 .speed = SPEED_40000, 2377 }, 2378 { 2379 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2380 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2381 .speed = SPEED_40000, 2382 }, 2383 { 2384 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2385 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2386 .speed = SPEED_25000, 2387 }, 2388 { 2389 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2390 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2391 .speed = SPEED_25000, 2392 }, 2393 { 2394 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2395 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2396 .speed = SPEED_25000, 2397 }, 2398 { 2399 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2400 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2401 .speed = SPEED_25000, 2402 }, 2403 { 2404 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2405 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2406 .speed = SPEED_50000, 2407 }, 2408 { 2409 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2410 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2411 .speed = SPEED_50000, 2412 }, 2413 { 2414 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2415 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2416 .speed = SPEED_50000, 2417 }, 2418 { 2419 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2420 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2421 .speed = SPEED_56000, 2422 }, 2423 { 2424 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2425 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2426 .speed = SPEED_56000, 2427 }, 2428 { 2429 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2430 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2431 .speed = SPEED_56000, 2432 }, 2433 { 2434 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2435 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2436 .speed = SPEED_56000, 2437 }, 2438 { 2439 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2440 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2441 .speed = SPEED_100000, 2442 }, 2443 { 2444 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2445 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2446 .speed = SPEED_100000, 2447 }, 2448 { 2449 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2450 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2451 .speed = SPEED_100000, 2452 }, 2453 { 2454 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2455 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2456 .speed = SPEED_100000, 2457 }, 2458 }; 2459 2460 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2461 2462 static void 2463 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2464 struct ethtool_link_ksettings *cmd) 2465 { 2466 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2467 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2468 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2469 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2470 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2471 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2472 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2473 2474 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2475 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2476 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2477 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2478 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2479 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2480 } 2481 2482 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2483 { 2484 int i; 2485 2486 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2487 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2488 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2489 mode); 2490 } 2491 } 2492 2493 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2494 struct ethtool_link_ksettings *cmd) 2495 { 2496 u32 speed = SPEED_UNKNOWN; 2497 u8 duplex = DUPLEX_UNKNOWN; 2498 int i; 2499 2500 if (!carrier_ok) 2501 goto out; 2502 2503 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2504 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2505 speed = mlxsw_sp_port_link_mode[i].speed; 2506 duplex = DUPLEX_FULL; 2507 break; 2508 } 2509 } 2510 out: 2511 cmd->base.speed = speed; 2512 cmd->base.duplex = duplex; 2513 } 2514 2515 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2516 { 2517 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2518 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2519 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2520 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2521 return PORT_FIBRE; 2522 2523 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2524 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2525 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2526 return PORT_DA; 2527 2528 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2529 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2530 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2531 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2532 return PORT_NONE; 2533 2534 return PORT_OTHER; 2535 } 2536 2537 static u32 2538 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2539 { 2540 u32 ptys_proto = 0; 2541 int i; 2542 2543 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2544 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2545 cmd->link_modes.advertising)) 2546 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2547 } 2548 return ptys_proto; 2549 } 2550 2551 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2552 { 2553 u32 ptys_proto = 0; 2554 int i; 2555 2556 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2557 if (speed == mlxsw_sp_port_link_mode[i].speed) 2558 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2559 } 2560 return ptys_proto; 2561 } 2562 2563 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2564 { 2565 u32 ptys_proto = 0; 2566 int i; 2567 2568 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2569 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2570 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2571 } 2572 return ptys_proto; 2573 } 2574 2575 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2576 struct ethtool_link_ksettings *cmd) 2577 { 2578 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2579 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2580 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2581 2582 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2583 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2584 } 2585 2586 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2587 struct ethtool_link_ksettings *cmd) 2588 { 2589 if (!autoneg) 2590 return; 2591 2592 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2593 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2594 } 2595 2596 static void 2597 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2598 struct ethtool_link_ksettings *cmd) 2599 { 2600 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2601 return; 2602 2603 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2604 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2605 } 2606 2607 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2608 struct ethtool_link_ksettings *cmd) 2609 { 2610 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2611 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2612 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2613 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2614 u8 autoneg_status; 2615 bool autoneg; 2616 int err; 2617 2618 autoneg = mlxsw_sp_port->link.autoneg; 2619 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2620 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2621 if (err) 2622 return err; 2623 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2624 ð_proto_oper); 2625 2626 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2627 2628 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2629 2630 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2631 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2632 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2633 2634 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2635 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2636 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2637 cmd); 2638 2639 return 0; 2640 } 2641 2642 static int 2643 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2644 const struct ethtool_link_ksettings *cmd) 2645 { 2646 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2647 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2648 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2649 u32 eth_proto_cap, eth_proto_new; 2650 bool autoneg; 2651 int err; 2652 2653 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2654 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2655 if (err) 2656 return err; 2657 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2658 2659 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2660 eth_proto_new = autoneg ? 2661 mlxsw_sp_to_ptys_advert_link(cmd) : 2662 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2663 2664 eth_proto_new = eth_proto_new & eth_proto_cap; 2665 if (!eth_proto_new) { 2666 netdev_err(dev, "No supported speed requested\n"); 2667 return -EINVAL; 2668 } 2669 2670 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2671 eth_proto_new, autoneg); 2672 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2673 if (err) 2674 return err; 2675 2676 if (!netif_running(dev)) 2677 return 0; 2678 2679 mlxsw_sp_port->link.autoneg = autoneg; 2680 2681 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2682 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2683 2684 return 0; 2685 } 2686 2687 static int mlxsw_sp_flash_device(struct net_device *dev, 2688 struct ethtool_flash *flash) 2689 { 2690 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2692 const struct firmware *firmware; 2693 int err; 2694 2695 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2696 return -EOPNOTSUPP; 2697 2698 dev_hold(dev); 2699 rtnl_unlock(); 2700 2701 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2702 if (err) 2703 goto out; 2704 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2705 release_firmware(firmware); 2706 out: 2707 rtnl_lock(); 2708 dev_put(dev); 2709 return err; 2710 } 2711 2712 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2713 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2714 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2715 2716 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2717 u16 offset, u16 size, void *data, 2718 unsigned int *p_read_size) 2719 { 2720 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2721 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2722 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2723 u16 i2c_addr; 2724 int status; 2725 int err; 2726 2727 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2728 2729 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2730 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2731 /* Cross pages read, read until offset 256 in low page */ 2732 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2733 2734 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2735 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2736 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2737 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2738 } 2739 2740 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2741 0, 0, offset, size, i2c_addr); 2742 2743 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2744 if (err) 2745 return err; 2746 2747 status = mlxsw_reg_mcia_status_get(mcia_pl); 2748 if (status) 2749 return -EIO; 2750 2751 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2752 memcpy(data, eeprom_tmp, size); 2753 *p_read_size = size; 2754 2755 return 0; 2756 } 2757 2758 enum mlxsw_sp_eeprom_module_info_rev_id { 2759 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2760 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2761 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2762 }; 2763 2764 enum mlxsw_sp_eeprom_module_info_id { 2765 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2766 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2767 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2768 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2769 }; 2770 2771 enum mlxsw_sp_eeprom_module_info { 2772 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2773 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2774 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2775 }; 2776 2777 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2778 struct ethtool_modinfo *modinfo) 2779 { 2780 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2781 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2782 u8 module_rev_id, module_id; 2783 unsigned int read_size; 2784 int err; 2785 2786 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2787 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2788 module_info, &read_size); 2789 if (err) 2790 return err; 2791 2792 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2793 return -EIO; 2794 2795 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2796 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2797 2798 switch (module_id) { 2799 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2800 modinfo->type = ETH_MODULE_SFF_8436; 2801 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2802 break; 2803 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2804 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2805 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2806 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2807 modinfo->type = ETH_MODULE_SFF_8636; 2808 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2809 } else { 2810 modinfo->type = ETH_MODULE_SFF_8436; 2811 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2812 } 2813 break; 2814 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2815 modinfo->type = ETH_MODULE_SFF_8472; 2816 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2817 break; 2818 default: 2819 return -EINVAL; 2820 } 2821 2822 return 0; 2823 } 2824 2825 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2826 struct ethtool_eeprom *ee, 2827 u8 *data) 2828 { 2829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2830 int offset = ee->offset; 2831 unsigned int read_size; 2832 int i = 0; 2833 int err; 2834 2835 if (!ee->len) 2836 return -EINVAL; 2837 2838 memset(data, 0, ee->len); 2839 2840 while (i < ee->len) { 2841 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2842 ee->len - i, data + i, 2843 &read_size); 2844 if (err) { 2845 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2846 return err; 2847 } 2848 2849 i += read_size; 2850 offset += read_size; 2851 } 2852 2853 return 0; 2854 } 2855 2856 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2857 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2858 .get_link = ethtool_op_get_link, 2859 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2860 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2861 .get_strings = mlxsw_sp_port_get_strings, 2862 .set_phys_id = mlxsw_sp_port_set_phys_id, 2863 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2864 .get_sset_count = mlxsw_sp_port_get_sset_count, 2865 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2866 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2867 .flash_device = mlxsw_sp_flash_device, 2868 .get_module_info = mlxsw_sp_get_module_info, 2869 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2870 }; 2871 2872 static int 2873 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2874 { 2875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2876 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2877 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2878 u32 eth_proto_admin; 2879 2880 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2881 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2882 eth_proto_admin, mlxsw_sp_port->link.autoneg); 2883 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2884 } 2885 2886 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2887 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2888 bool dwrr, u8 dwrr_weight) 2889 { 2890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2891 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2892 2893 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2894 next_index); 2895 mlxsw_reg_qeec_de_set(qeec_pl, true); 2896 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2897 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2898 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2899 } 2900 2901 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2902 enum mlxsw_reg_qeec_hr hr, u8 index, 2903 u8 next_index, u32 maxrate) 2904 { 2905 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2906 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2907 2908 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2909 next_index); 2910 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2911 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2912 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2913 } 2914 2915 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 2916 enum mlxsw_reg_qeec_hr hr, u8 index, 2917 u8 next_index, u32 minrate) 2918 { 2919 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2920 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2921 2922 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2923 next_index); 2924 mlxsw_reg_qeec_mise_set(qeec_pl, true); 2925 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 2926 2927 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2928 } 2929 2930 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2931 u8 switch_prio, u8 tclass) 2932 { 2933 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2934 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2935 2936 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2937 tclass); 2938 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2939 } 2940 2941 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2942 { 2943 int err, i; 2944 2945 /* Setup the elements hierarcy, so that each TC is linked to 2946 * one subgroup, which are all member in the same group. 2947 */ 2948 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2949 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2950 0); 2951 if (err) 2952 return err; 2953 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2954 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2955 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2956 0, false, 0); 2957 if (err) 2958 return err; 2959 } 2960 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2961 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2962 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2963 false, 0); 2964 if (err) 2965 return err; 2966 2967 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2968 MLXSW_REG_QEEC_HIERARCY_TC, 2969 i + 8, i, 2970 false, 0); 2971 if (err) 2972 return err; 2973 } 2974 2975 /* Make sure the max shaper is disabled in all hierarchies that 2976 * support it. 2977 */ 2978 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2979 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2980 MLXSW_REG_QEEC_MAS_DIS); 2981 if (err) 2982 return err; 2983 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2984 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2985 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2986 i, 0, 2987 MLXSW_REG_QEEC_MAS_DIS); 2988 if (err) 2989 return err; 2990 } 2991 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2992 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2993 MLXSW_REG_QEEC_HIERARCY_TC, 2994 i, i, 2995 MLXSW_REG_QEEC_MAS_DIS); 2996 if (err) 2997 return err; 2998 2999 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3000 MLXSW_REG_QEEC_HIERARCY_TC, 3001 i + 8, i, 3002 MLXSW_REG_QEEC_MAS_DIS); 3003 if (err) 3004 return err; 3005 } 3006 3007 /* Configure the min shaper for multicast TCs. */ 3008 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3009 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3010 MLXSW_REG_QEEC_HIERARCY_TC, 3011 i + 8, i, 3012 MLXSW_REG_QEEC_MIS_MIN); 3013 if (err) 3014 return err; 3015 } 3016 3017 /* Map all priorities to traffic class 0. */ 3018 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3019 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3020 if (err) 3021 return err; 3022 } 3023 3024 return 0; 3025 } 3026 3027 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3028 bool enable) 3029 { 3030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3031 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3032 3033 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3034 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3035 } 3036 3037 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3038 bool split, u8 module, u8 width, u8 lane) 3039 { 3040 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3041 struct mlxsw_sp_port *mlxsw_sp_port; 3042 struct net_device *dev; 3043 int err; 3044 3045 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 3046 if (err) { 3047 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3048 local_port); 3049 return err; 3050 } 3051 3052 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3053 if (!dev) { 3054 err = -ENOMEM; 3055 goto err_alloc_etherdev; 3056 } 3057 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3058 mlxsw_sp_port = netdev_priv(dev); 3059 mlxsw_sp_port->dev = dev; 3060 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3061 mlxsw_sp_port->local_port = local_port; 3062 mlxsw_sp_port->pvid = 1; 3063 mlxsw_sp_port->split = split; 3064 mlxsw_sp_port->mapping.module = module; 3065 mlxsw_sp_port->mapping.width = width; 3066 mlxsw_sp_port->mapping.lane = lane; 3067 mlxsw_sp_port->link.autoneg = 1; 3068 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3069 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3070 3071 mlxsw_sp_port->pcpu_stats = 3072 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3073 if (!mlxsw_sp_port->pcpu_stats) { 3074 err = -ENOMEM; 3075 goto err_alloc_stats; 3076 } 3077 3078 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3079 GFP_KERNEL); 3080 if (!mlxsw_sp_port->sample) { 3081 err = -ENOMEM; 3082 goto err_alloc_sample; 3083 } 3084 3085 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3086 &update_stats_cache); 3087 3088 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3089 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3090 3091 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3092 if (err) { 3093 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3094 mlxsw_sp_port->local_port); 3095 goto err_port_module_map; 3096 } 3097 3098 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3099 if (err) { 3100 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3101 mlxsw_sp_port->local_port); 3102 goto err_port_swid_set; 3103 } 3104 3105 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3106 if (err) { 3107 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3108 mlxsw_sp_port->local_port); 3109 goto err_dev_addr_init; 3110 } 3111 3112 netif_carrier_off(dev); 3113 3114 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3115 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3116 dev->hw_features |= NETIF_F_HW_TC; 3117 3118 dev->min_mtu = 0; 3119 dev->max_mtu = ETH_MAX_MTU; 3120 3121 /* Each packet needs to have a Tx header (metadata) on top all other 3122 * headers. 3123 */ 3124 dev->needed_headroom = MLXSW_TXHDR_LEN; 3125 3126 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3127 if (err) { 3128 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3129 mlxsw_sp_port->local_port); 3130 goto err_port_system_port_mapping_set; 3131 } 3132 3133 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3134 if (err) { 3135 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3136 mlxsw_sp_port->local_port); 3137 goto err_port_speed_by_width_set; 3138 } 3139 3140 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3141 if (err) { 3142 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3143 mlxsw_sp_port->local_port); 3144 goto err_port_mtu_set; 3145 } 3146 3147 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3148 if (err) 3149 goto err_port_admin_status_set; 3150 3151 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3152 if (err) { 3153 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3154 mlxsw_sp_port->local_port); 3155 goto err_port_buffers_init; 3156 } 3157 3158 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3159 if (err) { 3160 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3161 mlxsw_sp_port->local_port); 3162 goto err_port_ets_init; 3163 } 3164 3165 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3166 if (err) { 3167 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3168 mlxsw_sp_port->local_port); 3169 goto err_port_tc_mc_mode; 3170 } 3171 3172 /* ETS and buffers must be initialized before DCB. */ 3173 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3174 if (err) { 3175 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3176 mlxsw_sp_port->local_port); 3177 goto err_port_dcb_init; 3178 } 3179 3180 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3181 if (err) { 3182 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3183 mlxsw_sp_port->local_port); 3184 goto err_port_fids_init; 3185 } 3186 3187 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3188 if (err) { 3189 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3190 mlxsw_sp_port->local_port); 3191 goto err_port_qdiscs_init; 3192 } 3193 3194 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3195 if (err) { 3196 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3197 mlxsw_sp_port->local_port); 3198 goto err_port_nve_init; 3199 } 3200 3201 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 3202 if (IS_ERR(mlxsw_sp_port_vlan)) { 3203 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3204 mlxsw_sp_port->local_port); 3205 err = PTR_ERR(mlxsw_sp_port_vlan); 3206 goto err_port_vlan_get; 3207 } 3208 3209 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 3210 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3211 err = register_netdev(dev); 3212 if (err) { 3213 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3214 mlxsw_sp_port->local_port); 3215 goto err_register_netdev; 3216 } 3217 3218 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3219 mlxsw_sp_port, dev, module + 1, 3220 mlxsw_sp_port->split, lane / width); 3221 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3222 return 0; 3223 3224 err_register_netdev: 3225 mlxsw_sp->ports[local_port] = NULL; 3226 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3227 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3228 err_port_vlan_get: 3229 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3230 err_port_nve_init: 3231 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3232 err_port_qdiscs_init: 3233 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3234 err_port_fids_init: 3235 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3236 err_port_dcb_init: 3237 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3238 err_port_tc_mc_mode: 3239 err_port_ets_init: 3240 err_port_buffers_init: 3241 err_port_admin_status_set: 3242 err_port_mtu_set: 3243 err_port_speed_by_width_set: 3244 err_port_system_port_mapping_set: 3245 err_dev_addr_init: 3246 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3247 err_port_swid_set: 3248 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3249 err_port_module_map: 3250 kfree(mlxsw_sp_port->sample); 3251 err_alloc_sample: 3252 free_percpu(mlxsw_sp_port->pcpu_stats); 3253 err_alloc_stats: 3254 free_netdev(dev); 3255 err_alloc_etherdev: 3256 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3257 return err; 3258 } 3259 3260 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3261 { 3262 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3263 3264 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3265 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3266 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3267 mlxsw_sp->ports[local_port] = NULL; 3268 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3269 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3270 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3271 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3272 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3273 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3274 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3275 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3276 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3277 kfree(mlxsw_sp_port->sample); 3278 free_percpu(mlxsw_sp_port->pcpu_stats); 3279 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3280 free_netdev(mlxsw_sp_port->dev); 3281 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3282 } 3283 3284 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3285 { 3286 return mlxsw_sp->ports[local_port] != NULL; 3287 } 3288 3289 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3290 { 3291 int i; 3292 3293 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3294 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3295 mlxsw_sp_port_remove(mlxsw_sp, i); 3296 kfree(mlxsw_sp->port_to_module); 3297 kfree(mlxsw_sp->ports); 3298 } 3299 3300 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3301 { 3302 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3303 u8 module, width, lane; 3304 size_t alloc_size; 3305 int i; 3306 int err; 3307 3308 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3309 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3310 if (!mlxsw_sp->ports) 3311 return -ENOMEM; 3312 3313 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3314 GFP_KERNEL); 3315 if (!mlxsw_sp->port_to_module) { 3316 err = -ENOMEM; 3317 goto err_port_to_module_alloc; 3318 } 3319 3320 for (i = 1; i < max_ports; i++) { 3321 /* Mark as invalid */ 3322 mlxsw_sp->port_to_module[i] = -1; 3323 3324 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3325 &width, &lane); 3326 if (err) 3327 goto err_port_module_info_get; 3328 if (!width) 3329 continue; 3330 mlxsw_sp->port_to_module[i] = module; 3331 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3332 module, width, lane); 3333 if (err) 3334 goto err_port_create; 3335 } 3336 return 0; 3337 3338 err_port_create: 3339 err_port_module_info_get: 3340 for (i--; i >= 1; i--) 3341 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3342 mlxsw_sp_port_remove(mlxsw_sp, i); 3343 kfree(mlxsw_sp->port_to_module); 3344 err_port_to_module_alloc: 3345 kfree(mlxsw_sp->ports); 3346 return err; 3347 } 3348 3349 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3350 { 3351 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3352 3353 return local_port - offset; 3354 } 3355 3356 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3357 u8 module, unsigned int count) 3358 { 3359 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3360 int err, i; 3361 3362 for (i = 0; i < count; i++) { 3363 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3364 module, width, i * width); 3365 if (err) 3366 goto err_port_create; 3367 } 3368 3369 return 0; 3370 3371 err_port_create: 3372 for (i--; i >= 0; i--) 3373 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3374 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3375 return err; 3376 } 3377 3378 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3379 u8 base_port, unsigned int count) 3380 { 3381 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3382 int i; 3383 3384 /* Split by four means we need to re-create two ports, otherwise 3385 * only one. 3386 */ 3387 count = count / 2; 3388 3389 for (i = 0; i < count; i++) { 3390 local_port = base_port + i * 2; 3391 if (mlxsw_sp->port_to_module[local_port] < 0) 3392 continue; 3393 module = mlxsw_sp->port_to_module[local_port]; 3394 3395 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3396 width, 0); 3397 } 3398 } 3399 3400 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3401 unsigned int count, 3402 struct netlink_ext_ack *extack) 3403 { 3404 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3405 struct mlxsw_sp_port *mlxsw_sp_port; 3406 u8 module, cur_width, base_port; 3407 int i; 3408 int err; 3409 3410 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3411 if (!mlxsw_sp_port) { 3412 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3413 local_port); 3414 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3415 return -EINVAL; 3416 } 3417 3418 module = mlxsw_sp_port->mapping.module; 3419 cur_width = mlxsw_sp_port->mapping.width; 3420 3421 if (count != 2 && count != 4) { 3422 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3423 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3424 return -EINVAL; 3425 } 3426 3427 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3428 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3429 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3430 return -EINVAL; 3431 } 3432 3433 /* Make sure we have enough slave (even) ports for the split. */ 3434 if (count == 2) { 3435 base_port = local_port; 3436 if (mlxsw_sp->ports[base_port + 1]) { 3437 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3438 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3439 return -EINVAL; 3440 } 3441 } else { 3442 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3443 if (mlxsw_sp->ports[base_port + 1] || 3444 mlxsw_sp->ports[base_port + 3]) { 3445 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3446 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3447 return -EINVAL; 3448 } 3449 } 3450 3451 for (i = 0; i < count; i++) 3452 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3453 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3454 3455 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3456 if (err) { 3457 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3458 goto err_port_split_create; 3459 } 3460 3461 return 0; 3462 3463 err_port_split_create: 3464 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3465 return err; 3466 } 3467 3468 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3469 struct netlink_ext_ack *extack) 3470 { 3471 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3472 struct mlxsw_sp_port *mlxsw_sp_port; 3473 u8 cur_width, base_port; 3474 unsigned int count; 3475 int i; 3476 3477 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3478 if (!mlxsw_sp_port) { 3479 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3480 local_port); 3481 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3482 return -EINVAL; 3483 } 3484 3485 if (!mlxsw_sp_port->split) { 3486 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3487 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3488 return -EINVAL; 3489 } 3490 3491 cur_width = mlxsw_sp_port->mapping.width; 3492 count = cur_width == 1 ? 4 : 2; 3493 3494 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3495 3496 /* Determine which ports to remove. */ 3497 if (count == 2 && local_port >= base_port + 2) 3498 base_port = base_port + 2; 3499 3500 for (i = 0; i < count; i++) 3501 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3502 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3503 3504 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3505 3506 return 0; 3507 } 3508 3509 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3510 char *pude_pl, void *priv) 3511 { 3512 struct mlxsw_sp *mlxsw_sp = priv; 3513 struct mlxsw_sp_port *mlxsw_sp_port; 3514 enum mlxsw_reg_pude_oper_status status; 3515 u8 local_port; 3516 3517 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3518 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3519 if (!mlxsw_sp_port) 3520 return; 3521 3522 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3523 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3524 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3525 netif_carrier_on(mlxsw_sp_port->dev); 3526 } else { 3527 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3528 netif_carrier_off(mlxsw_sp_port->dev); 3529 } 3530 } 3531 3532 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3533 u8 local_port, void *priv) 3534 { 3535 struct mlxsw_sp *mlxsw_sp = priv; 3536 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3537 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3538 3539 if (unlikely(!mlxsw_sp_port)) { 3540 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3541 local_port); 3542 return; 3543 } 3544 3545 skb->dev = mlxsw_sp_port->dev; 3546 3547 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3548 u64_stats_update_begin(&pcpu_stats->syncp); 3549 pcpu_stats->rx_packets++; 3550 pcpu_stats->rx_bytes += skb->len; 3551 u64_stats_update_end(&pcpu_stats->syncp); 3552 3553 skb->protocol = eth_type_trans(skb, skb->dev); 3554 netif_receive_skb(skb); 3555 } 3556 3557 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3558 void *priv) 3559 { 3560 skb->offload_fwd_mark = 1; 3561 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3562 } 3563 3564 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 3565 u8 local_port, void *priv) 3566 { 3567 skb->offload_l3_fwd_mark = 1; 3568 skb->offload_fwd_mark = 1; 3569 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3570 } 3571 3572 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3573 void *priv) 3574 { 3575 struct mlxsw_sp *mlxsw_sp = priv; 3576 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3577 struct psample_group *psample_group; 3578 u32 size; 3579 3580 if (unlikely(!mlxsw_sp_port)) { 3581 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3582 local_port); 3583 goto out; 3584 } 3585 if (unlikely(!mlxsw_sp_port->sample)) { 3586 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3587 local_port); 3588 goto out; 3589 } 3590 3591 size = mlxsw_sp_port->sample->truncate ? 3592 mlxsw_sp_port->sample->trunc_size : skb->len; 3593 3594 rcu_read_lock(); 3595 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3596 if (!psample_group) 3597 goto out_unlock; 3598 psample_sample_packet(psample_group, skb, size, 3599 mlxsw_sp_port->dev->ifindex, 0, 3600 mlxsw_sp_port->sample->rate); 3601 out_unlock: 3602 rcu_read_unlock(); 3603 out: 3604 consume_skb(skb); 3605 } 3606 3607 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3608 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3609 _is_ctrl, SP_##_trap_group, DISCARD) 3610 3611 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3612 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3613 _is_ctrl, SP_##_trap_group, DISCARD) 3614 3615 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3616 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 3617 _is_ctrl, SP_##_trap_group, DISCARD) 3618 3619 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3620 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3621 3622 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3623 /* Events */ 3624 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3625 /* L2 traps */ 3626 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3627 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3628 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3629 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3630 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3631 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3632 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3633 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3634 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3635 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3636 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3637 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3638 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3639 false), 3640 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3641 false), 3642 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3643 false), 3644 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3645 false), 3646 /* L3 traps */ 3647 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3648 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3649 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 3650 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3651 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3652 false), 3653 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3654 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3655 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3656 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3657 false), 3658 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3659 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3660 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3661 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3662 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3663 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3664 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3665 false), 3666 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3667 false), 3668 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3669 false), 3670 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3671 false), 3672 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3673 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3674 false), 3675 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3676 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3677 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3678 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3679 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3680 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 3681 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3682 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3683 /* PKT Sample trap */ 3684 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3685 false, SP_IP2ME, DISCARD), 3686 /* ACL trap */ 3687 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3688 /* Multicast Router Traps */ 3689 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3690 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 3691 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3692 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3693 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3694 /* NVE traps */ 3695 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 3696 }; 3697 3698 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3699 { 3700 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3701 enum mlxsw_reg_qpcr_ir_units ir_units; 3702 int max_cpu_policers; 3703 bool is_bytes; 3704 u8 burst_size; 3705 u32 rate; 3706 int i, err; 3707 3708 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3709 return -EIO; 3710 3711 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3712 3713 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3714 for (i = 0; i < max_cpu_policers; i++) { 3715 is_bytes = false; 3716 switch (i) { 3717 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3718 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3719 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3720 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3721 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3722 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3723 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 3724 rate = 128; 3725 burst_size = 7; 3726 break; 3727 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3728 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3729 rate = 16 * 1024; 3730 burst_size = 10; 3731 break; 3732 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3733 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3734 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3735 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3736 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3737 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3738 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3739 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3740 rate = 1024; 3741 burst_size = 7; 3742 break; 3743 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3744 rate = 4 * 1024; 3745 burst_size = 4; 3746 break; 3747 default: 3748 continue; 3749 } 3750 3751 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3752 burst_size); 3753 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3754 if (err) 3755 return err; 3756 } 3757 3758 return 0; 3759 } 3760 3761 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3762 { 3763 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3764 enum mlxsw_reg_htgt_trap_group i; 3765 int max_cpu_policers; 3766 int max_trap_groups; 3767 u8 priority, tc; 3768 u16 policer_id; 3769 int err; 3770 3771 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3772 return -EIO; 3773 3774 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3775 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3776 3777 for (i = 0; i < max_trap_groups; i++) { 3778 policer_id = i; 3779 switch (i) { 3780 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3781 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3782 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3783 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3784 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3785 priority = 5; 3786 tc = 5; 3787 break; 3788 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3789 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3790 priority = 4; 3791 tc = 4; 3792 break; 3793 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3794 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3795 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3796 priority = 3; 3797 tc = 3; 3798 break; 3799 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3800 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3801 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3802 priority = 2; 3803 tc = 2; 3804 break; 3805 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3806 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3807 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3808 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3809 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 3810 priority = 1; 3811 tc = 1; 3812 break; 3813 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3814 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3815 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3816 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3817 break; 3818 default: 3819 continue; 3820 } 3821 3822 if (max_cpu_policers <= policer_id && 3823 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3824 return -EIO; 3825 3826 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3827 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3828 if (err) 3829 return err; 3830 } 3831 3832 return 0; 3833 } 3834 3835 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3836 { 3837 int i; 3838 int err; 3839 3840 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3841 if (err) 3842 return err; 3843 3844 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3845 if (err) 3846 return err; 3847 3848 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3849 err = mlxsw_core_trap_register(mlxsw_sp->core, 3850 &mlxsw_sp_listener[i], 3851 mlxsw_sp); 3852 if (err) 3853 goto err_listener_register; 3854 3855 } 3856 return 0; 3857 3858 err_listener_register: 3859 for (i--; i >= 0; i--) { 3860 mlxsw_core_trap_unregister(mlxsw_sp->core, 3861 &mlxsw_sp_listener[i], 3862 mlxsw_sp); 3863 } 3864 return err; 3865 } 3866 3867 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3868 { 3869 int i; 3870 3871 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3872 mlxsw_core_trap_unregister(mlxsw_sp->core, 3873 &mlxsw_sp_listener[i], 3874 mlxsw_sp); 3875 } 3876 } 3877 3878 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3879 { 3880 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3881 u32 seed; 3882 int err; 3883 3884 get_random_bytes(&seed, sizeof(seed)); 3885 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3886 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3887 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3888 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3889 MLXSW_REG_SLCR_LAG_HASH_SIP | 3890 MLXSW_REG_SLCR_LAG_HASH_DIP | 3891 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3892 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3893 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 3894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3895 if (err) 3896 return err; 3897 3898 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3899 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3900 return -EIO; 3901 3902 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3903 sizeof(struct mlxsw_sp_upper), 3904 GFP_KERNEL); 3905 if (!mlxsw_sp->lags) 3906 return -ENOMEM; 3907 3908 return 0; 3909 } 3910 3911 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3912 { 3913 kfree(mlxsw_sp->lags); 3914 } 3915 3916 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3917 { 3918 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3919 3920 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3921 MLXSW_REG_HTGT_INVALID_POLICER, 3922 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3923 MLXSW_REG_HTGT_DEFAULT_TC); 3924 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3925 } 3926 3927 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3928 unsigned long event, void *ptr); 3929 3930 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3931 const struct mlxsw_bus_info *mlxsw_bus_info) 3932 { 3933 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3934 int err; 3935 3936 mlxsw_sp->core = mlxsw_core; 3937 mlxsw_sp->bus_info = mlxsw_bus_info; 3938 3939 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3940 if (err) 3941 return err; 3942 3943 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3944 if (err) { 3945 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3946 return err; 3947 } 3948 3949 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3950 if (err) { 3951 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3952 return err; 3953 } 3954 3955 err = mlxsw_sp_fids_init(mlxsw_sp); 3956 if (err) { 3957 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3958 goto err_fids_init; 3959 } 3960 3961 err = mlxsw_sp_traps_init(mlxsw_sp); 3962 if (err) { 3963 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3964 goto err_traps_init; 3965 } 3966 3967 err = mlxsw_sp_buffers_init(mlxsw_sp); 3968 if (err) { 3969 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3970 goto err_buffers_init; 3971 } 3972 3973 err = mlxsw_sp_lag_init(mlxsw_sp); 3974 if (err) { 3975 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3976 goto err_lag_init; 3977 } 3978 3979 /* Initialize SPAN before router and switchdev, so that those components 3980 * can call mlxsw_sp_span_respin(). 3981 */ 3982 err = mlxsw_sp_span_init(mlxsw_sp); 3983 if (err) { 3984 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3985 goto err_span_init; 3986 } 3987 3988 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3989 if (err) { 3990 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3991 goto err_switchdev_init; 3992 } 3993 3994 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3995 if (err) { 3996 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3997 goto err_counter_pool_init; 3998 } 3999 4000 err = mlxsw_sp_afa_init(mlxsw_sp); 4001 if (err) { 4002 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4003 goto err_afa_init; 4004 } 4005 4006 err = mlxsw_sp_nve_init(mlxsw_sp); 4007 if (err) { 4008 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4009 goto err_nve_init; 4010 } 4011 4012 err = mlxsw_sp_acl_init(mlxsw_sp); 4013 if (err) { 4014 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4015 goto err_acl_init; 4016 } 4017 4018 err = mlxsw_sp_router_init(mlxsw_sp); 4019 if (err) { 4020 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4021 goto err_router_init; 4022 } 4023 4024 /* Initialize netdevice notifier after router and SPAN is initialized, 4025 * so that the event handler can use router structures and call SPAN 4026 * respin. 4027 */ 4028 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4029 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4030 if (err) { 4031 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4032 goto err_netdev_notifier; 4033 } 4034 4035 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4036 if (err) { 4037 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4038 goto err_dpipe_init; 4039 } 4040 4041 err = mlxsw_sp_ports_create(mlxsw_sp); 4042 if (err) { 4043 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4044 goto err_ports_create; 4045 } 4046 4047 return 0; 4048 4049 err_ports_create: 4050 mlxsw_sp_dpipe_fini(mlxsw_sp); 4051 err_dpipe_init: 4052 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4053 err_netdev_notifier: 4054 mlxsw_sp_router_fini(mlxsw_sp); 4055 err_router_init: 4056 mlxsw_sp_acl_fini(mlxsw_sp); 4057 err_acl_init: 4058 mlxsw_sp_nve_fini(mlxsw_sp); 4059 err_nve_init: 4060 mlxsw_sp_afa_fini(mlxsw_sp); 4061 err_afa_init: 4062 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4063 err_counter_pool_init: 4064 mlxsw_sp_switchdev_fini(mlxsw_sp); 4065 err_switchdev_init: 4066 mlxsw_sp_span_fini(mlxsw_sp); 4067 err_span_init: 4068 mlxsw_sp_lag_fini(mlxsw_sp); 4069 err_lag_init: 4070 mlxsw_sp_buffers_fini(mlxsw_sp); 4071 err_buffers_init: 4072 mlxsw_sp_traps_fini(mlxsw_sp); 4073 err_traps_init: 4074 mlxsw_sp_fids_fini(mlxsw_sp); 4075 err_fids_init: 4076 mlxsw_sp_kvdl_fini(mlxsw_sp); 4077 return err; 4078 } 4079 4080 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4081 const struct mlxsw_bus_info *mlxsw_bus_info) 4082 { 4083 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4084 4085 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4086 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4087 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4088 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4089 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4090 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4091 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4092 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4093 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4094 4095 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4096 } 4097 4098 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4099 const struct mlxsw_bus_info *mlxsw_bus_info) 4100 { 4101 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4102 4103 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4104 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4105 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4106 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4107 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4108 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4109 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4110 4111 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4112 } 4113 4114 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4115 { 4116 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4117 4118 mlxsw_sp_ports_remove(mlxsw_sp); 4119 mlxsw_sp_dpipe_fini(mlxsw_sp); 4120 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4121 mlxsw_sp_router_fini(mlxsw_sp); 4122 mlxsw_sp_acl_fini(mlxsw_sp); 4123 mlxsw_sp_nve_fini(mlxsw_sp); 4124 mlxsw_sp_afa_fini(mlxsw_sp); 4125 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4126 mlxsw_sp_switchdev_fini(mlxsw_sp); 4127 mlxsw_sp_span_fini(mlxsw_sp); 4128 mlxsw_sp_lag_fini(mlxsw_sp); 4129 mlxsw_sp_buffers_fini(mlxsw_sp); 4130 mlxsw_sp_traps_fini(mlxsw_sp); 4131 mlxsw_sp_fids_fini(mlxsw_sp); 4132 mlxsw_sp_kvdl_fini(mlxsw_sp); 4133 } 4134 4135 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4136 * 802.1Q FIDs 4137 */ 4138 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4139 VLAN_VID_MASK - 1) 4140 4141 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4142 .used_max_mid = 1, 4143 .max_mid = MLXSW_SP_MID_MAX, 4144 .used_flood_tables = 1, 4145 .used_flood_mode = 1, 4146 .flood_mode = 3, 4147 .max_fid_flood_tables = 3, 4148 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4149 .used_max_ib_mc = 1, 4150 .max_ib_mc = 0, 4151 .used_max_pkey = 1, 4152 .max_pkey = 0, 4153 .used_kvd_sizes = 1, 4154 .kvd_hash_single_parts = 59, 4155 .kvd_hash_double_parts = 41, 4156 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4157 .swid_config = { 4158 { 4159 .used_type = 1, 4160 .type = MLXSW_PORT_SWID_TYPE_ETH, 4161 } 4162 }, 4163 }; 4164 4165 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4166 .used_max_mid = 1, 4167 .max_mid = MLXSW_SP_MID_MAX, 4168 .used_flood_tables = 1, 4169 .used_flood_mode = 1, 4170 .flood_mode = 3, 4171 .max_fid_flood_tables = 3, 4172 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4173 .used_max_ib_mc = 1, 4174 .max_ib_mc = 0, 4175 .used_max_pkey = 1, 4176 .max_pkey = 0, 4177 .swid_config = { 4178 { 4179 .used_type = 1, 4180 .type = MLXSW_PORT_SWID_TYPE_ETH, 4181 } 4182 }, 4183 }; 4184 4185 static void 4186 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4187 struct devlink_resource_size_params *kvd_size_params, 4188 struct devlink_resource_size_params *linear_size_params, 4189 struct devlink_resource_size_params *hash_double_size_params, 4190 struct devlink_resource_size_params *hash_single_size_params) 4191 { 4192 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4193 KVD_SINGLE_MIN_SIZE); 4194 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4195 KVD_DOUBLE_MIN_SIZE); 4196 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4197 u32 linear_size_min = 0; 4198 4199 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4200 MLXSW_SP_KVD_GRANULARITY, 4201 DEVLINK_RESOURCE_UNIT_ENTRY); 4202 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4203 kvd_size - single_size_min - 4204 double_size_min, 4205 MLXSW_SP_KVD_GRANULARITY, 4206 DEVLINK_RESOURCE_UNIT_ENTRY); 4207 devlink_resource_size_params_init(hash_double_size_params, 4208 double_size_min, 4209 kvd_size - single_size_min - 4210 linear_size_min, 4211 MLXSW_SP_KVD_GRANULARITY, 4212 DEVLINK_RESOURCE_UNIT_ENTRY); 4213 devlink_resource_size_params_init(hash_single_size_params, 4214 single_size_min, 4215 kvd_size - double_size_min - 4216 linear_size_min, 4217 MLXSW_SP_KVD_GRANULARITY, 4218 DEVLINK_RESOURCE_UNIT_ENTRY); 4219 } 4220 4221 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4222 { 4223 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4224 struct devlink_resource_size_params hash_single_size_params; 4225 struct devlink_resource_size_params hash_double_size_params; 4226 struct devlink_resource_size_params linear_size_params; 4227 struct devlink_resource_size_params kvd_size_params; 4228 u32 kvd_size, single_size, double_size, linear_size; 4229 const struct mlxsw_config_profile *profile; 4230 int err; 4231 4232 profile = &mlxsw_sp1_config_profile; 4233 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4234 return -EIO; 4235 4236 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4237 &linear_size_params, 4238 &hash_double_size_params, 4239 &hash_single_size_params); 4240 4241 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4242 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4243 kvd_size, MLXSW_SP_RESOURCE_KVD, 4244 DEVLINK_RESOURCE_ID_PARENT_TOP, 4245 &kvd_size_params); 4246 if (err) 4247 return err; 4248 4249 linear_size = profile->kvd_linear_size; 4250 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4251 linear_size, 4252 MLXSW_SP_RESOURCE_KVD_LINEAR, 4253 MLXSW_SP_RESOURCE_KVD, 4254 &linear_size_params); 4255 if (err) 4256 return err; 4257 4258 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4259 if (err) 4260 return err; 4261 4262 double_size = kvd_size - linear_size; 4263 double_size *= profile->kvd_hash_double_parts; 4264 double_size /= profile->kvd_hash_double_parts + 4265 profile->kvd_hash_single_parts; 4266 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4267 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4268 double_size, 4269 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4270 MLXSW_SP_RESOURCE_KVD, 4271 &hash_double_size_params); 4272 if (err) 4273 return err; 4274 4275 single_size = kvd_size - double_size - linear_size; 4276 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4277 single_size, 4278 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4279 MLXSW_SP_RESOURCE_KVD, 4280 &hash_single_size_params); 4281 if (err) 4282 return err; 4283 4284 return 0; 4285 } 4286 4287 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4288 { 4289 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4290 } 4291 4292 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4293 { 4294 return 0; 4295 } 4296 4297 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4298 const struct mlxsw_config_profile *profile, 4299 u64 *p_single_size, u64 *p_double_size, 4300 u64 *p_linear_size) 4301 { 4302 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4303 u32 double_size; 4304 int err; 4305 4306 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4307 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4308 return -EIO; 4309 4310 /* The hash part is what left of the kvd without the 4311 * linear part. It is split to the single size and 4312 * double size by the parts ratio from the profile. 4313 * Both sizes must be a multiplications of the 4314 * granularity from the profile. In case the user 4315 * provided the sizes they are obtained via devlink. 4316 */ 4317 err = devlink_resource_size_get(devlink, 4318 MLXSW_SP_RESOURCE_KVD_LINEAR, 4319 p_linear_size); 4320 if (err) 4321 *p_linear_size = profile->kvd_linear_size; 4322 4323 err = devlink_resource_size_get(devlink, 4324 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4325 p_double_size); 4326 if (err) { 4327 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4328 *p_linear_size; 4329 double_size *= profile->kvd_hash_double_parts; 4330 double_size /= profile->kvd_hash_double_parts + 4331 profile->kvd_hash_single_parts; 4332 *p_double_size = rounddown(double_size, 4333 MLXSW_SP_KVD_GRANULARITY); 4334 } 4335 4336 err = devlink_resource_size_get(devlink, 4337 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4338 p_single_size); 4339 if (err) 4340 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4341 *p_double_size - *p_linear_size; 4342 4343 /* Check results are legal. */ 4344 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4345 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4346 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4347 return -EIO; 4348 4349 return 0; 4350 } 4351 4352 static int 4353 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 4354 union devlink_param_value val, 4355 struct netlink_ext_ack *extack) 4356 { 4357 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 4358 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 4359 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 4360 return -EINVAL; 4361 } 4362 4363 return 0; 4364 } 4365 4366 static const struct devlink_param mlxsw_sp_devlink_params[] = { 4367 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 4368 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 4369 NULL, NULL, 4370 mlxsw_sp_devlink_param_fw_load_policy_validate), 4371 }; 4372 4373 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 4374 { 4375 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4376 union devlink_param_value value; 4377 int err; 4378 4379 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 4380 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4381 if (err) 4382 return err; 4383 4384 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 4385 devlink_param_driverinit_value_set(devlink, 4386 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 4387 value); 4388 return 0; 4389 } 4390 4391 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 4392 { 4393 devlink_params_unregister(priv_to_devlink(mlxsw_core), 4394 mlxsw_sp_devlink_params, 4395 ARRAY_SIZE(mlxsw_sp_devlink_params)); 4396 } 4397 4398 static struct mlxsw_driver mlxsw_sp1_driver = { 4399 .kind = mlxsw_sp1_driver_name, 4400 .priv_size = sizeof(struct mlxsw_sp), 4401 .init = mlxsw_sp1_init, 4402 .fini = mlxsw_sp_fini, 4403 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4404 .port_split = mlxsw_sp_port_split, 4405 .port_unsplit = mlxsw_sp_port_unsplit, 4406 .sb_pool_get = mlxsw_sp_sb_pool_get, 4407 .sb_pool_set = mlxsw_sp_sb_pool_set, 4408 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4409 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4410 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4411 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4412 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4413 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4414 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4415 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4416 .txhdr_construct = mlxsw_sp_txhdr_construct, 4417 .resources_register = mlxsw_sp1_resources_register, 4418 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4419 .params_register = mlxsw_sp_params_register, 4420 .params_unregister = mlxsw_sp_params_unregister, 4421 .txhdr_len = MLXSW_TXHDR_LEN, 4422 .profile = &mlxsw_sp1_config_profile, 4423 .res_query_enabled = true, 4424 }; 4425 4426 static struct mlxsw_driver mlxsw_sp2_driver = { 4427 .kind = mlxsw_sp2_driver_name, 4428 .priv_size = sizeof(struct mlxsw_sp), 4429 .init = mlxsw_sp2_init, 4430 .fini = mlxsw_sp_fini, 4431 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4432 .port_split = mlxsw_sp_port_split, 4433 .port_unsplit = mlxsw_sp_port_unsplit, 4434 .sb_pool_get = mlxsw_sp_sb_pool_get, 4435 .sb_pool_set = mlxsw_sp_sb_pool_set, 4436 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4437 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4438 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4439 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4440 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4441 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4442 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4443 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4444 .txhdr_construct = mlxsw_sp_txhdr_construct, 4445 .resources_register = mlxsw_sp2_resources_register, 4446 .params_register = mlxsw_sp_params_register, 4447 .params_unregister = mlxsw_sp_params_unregister, 4448 .txhdr_len = MLXSW_TXHDR_LEN, 4449 .profile = &mlxsw_sp2_config_profile, 4450 .res_query_enabled = true, 4451 }; 4452 4453 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4454 { 4455 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4456 } 4457 4458 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4459 { 4460 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4461 int ret = 0; 4462 4463 if (mlxsw_sp_port_dev_check(lower_dev)) { 4464 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4465 ret = 1; 4466 } 4467 4468 return ret; 4469 } 4470 4471 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4472 { 4473 struct mlxsw_sp_port *mlxsw_sp_port; 4474 4475 if (mlxsw_sp_port_dev_check(dev)) 4476 return netdev_priv(dev); 4477 4478 mlxsw_sp_port = NULL; 4479 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4480 4481 return mlxsw_sp_port; 4482 } 4483 4484 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4485 { 4486 struct mlxsw_sp_port *mlxsw_sp_port; 4487 4488 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4489 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4490 } 4491 4492 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4493 { 4494 struct mlxsw_sp_port *mlxsw_sp_port; 4495 4496 if (mlxsw_sp_port_dev_check(dev)) 4497 return netdev_priv(dev); 4498 4499 mlxsw_sp_port = NULL; 4500 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4501 &mlxsw_sp_port); 4502 4503 return mlxsw_sp_port; 4504 } 4505 4506 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4507 { 4508 struct mlxsw_sp_port *mlxsw_sp_port; 4509 4510 rcu_read_lock(); 4511 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4512 if (mlxsw_sp_port) 4513 dev_hold(mlxsw_sp_port->dev); 4514 rcu_read_unlock(); 4515 return mlxsw_sp_port; 4516 } 4517 4518 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4519 { 4520 dev_put(mlxsw_sp_port->dev); 4521 } 4522 4523 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4524 { 4525 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4526 4527 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4528 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4529 } 4530 4531 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4532 { 4533 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4534 4535 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4537 } 4538 4539 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4540 u16 lag_id, u8 port_index) 4541 { 4542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4543 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4544 4545 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4546 lag_id, port_index); 4547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4548 } 4549 4550 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4551 u16 lag_id) 4552 { 4553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4554 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4555 4556 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4557 lag_id); 4558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4559 } 4560 4561 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4562 u16 lag_id) 4563 { 4564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4565 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4566 4567 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4568 lag_id); 4569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4570 } 4571 4572 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4573 u16 lag_id) 4574 { 4575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4576 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4577 4578 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4579 lag_id); 4580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4581 } 4582 4583 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4584 struct net_device *lag_dev, 4585 u16 *p_lag_id) 4586 { 4587 struct mlxsw_sp_upper *lag; 4588 int free_lag_id = -1; 4589 u64 max_lag; 4590 int i; 4591 4592 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4593 for (i = 0; i < max_lag; i++) { 4594 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4595 if (lag->ref_count) { 4596 if (lag->dev == lag_dev) { 4597 *p_lag_id = i; 4598 return 0; 4599 } 4600 } else if (free_lag_id < 0) { 4601 free_lag_id = i; 4602 } 4603 } 4604 if (free_lag_id < 0) 4605 return -EBUSY; 4606 *p_lag_id = free_lag_id; 4607 return 0; 4608 } 4609 4610 static bool 4611 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4612 struct net_device *lag_dev, 4613 struct netdev_lag_upper_info *lag_upper_info, 4614 struct netlink_ext_ack *extack) 4615 { 4616 u16 lag_id; 4617 4618 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4619 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4620 return false; 4621 } 4622 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4623 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4624 return false; 4625 } 4626 return true; 4627 } 4628 4629 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4630 u16 lag_id, u8 *p_port_index) 4631 { 4632 u64 max_lag_members; 4633 int i; 4634 4635 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4636 MAX_LAG_MEMBERS); 4637 for (i = 0; i < max_lag_members; i++) { 4638 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4639 *p_port_index = i; 4640 return 0; 4641 } 4642 } 4643 return -EBUSY; 4644 } 4645 4646 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4647 struct net_device *lag_dev) 4648 { 4649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4650 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4651 struct mlxsw_sp_upper *lag; 4652 u16 lag_id; 4653 u8 port_index; 4654 int err; 4655 4656 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4657 if (err) 4658 return err; 4659 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4660 if (!lag->ref_count) { 4661 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4662 if (err) 4663 return err; 4664 lag->dev = lag_dev; 4665 } 4666 4667 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4668 if (err) 4669 return err; 4670 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4671 if (err) 4672 goto err_col_port_add; 4673 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4674 if (err) 4675 goto err_col_port_enable; 4676 4677 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4678 mlxsw_sp_port->local_port); 4679 mlxsw_sp_port->lag_id = lag_id; 4680 mlxsw_sp_port->lagged = 1; 4681 lag->ref_count++; 4682 4683 /* Port is no longer usable as a router interface */ 4684 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4685 if (mlxsw_sp_port_vlan->fid) 4686 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4687 4688 return 0; 4689 4690 err_col_port_enable: 4691 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4692 err_col_port_add: 4693 if (!lag->ref_count) 4694 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4695 return err; 4696 } 4697 4698 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4699 struct net_device *lag_dev) 4700 { 4701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4702 u16 lag_id = mlxsw_sp_port->lag_id; 4703 struct mlxsw_sp_upper *lag; 4704 4705 if (!mlxsw_sp_port->lagged) 4706 return; 4707 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4708 WARN_ON(lag->ref_count == 0); 4709 4710 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4711 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4712 4713 /* Any VLANs configured on the port are no longer valid */ 4714 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4715 4716 if (lag->ref_count == 1) 4717 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4718 4719 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4720 mlxsw_sp_port->local_port); 4721 mlxsw_sp_port->lagged = 0; 4722 lag->ref_count--; 4723 4724 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4725 /* Make sure untagged frames are allowed to ingress */ 4726 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4727 } 4728 4729 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4730 u16 lag_id) 4731 { 4732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4733 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4734 4735 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4736 mlxsw_sp_port->local_port); 4737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4738 } 4739 4740 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4741 u16 lag_id) 4742 { 4743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4744 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4745 4746 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4747 mlxsw_sp_port->local_port); 4748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4749 } 4750 4751 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4752 bool lag_tx_enabled) 4753 { 4754 if (lag_tx_enabled) 4755 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4756 mlxsw_sp_port->lag_id); 4757 else 4758 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4759 mlxsw_sp_port->lag_id); 4760 } 4761 4762 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4763 struct netdev_lag_lower_state_info *info) 4764 { 4765 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4766 } 4767 4768 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4769 bool enable) 4770 { 4771 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4772 enum mlxsw_reg_spms_state spms_state; 4773 char *spms_pl; 4774 u16 vid; 4775 int err; 4776 4777 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4778 MLXSW_REG_SPMS_STATE_DISCARDING; 4779 4780 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4781 if (!spms_pl) 4782 return -ENOMEM; 4783 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4784 4785 for (vid = 0; vid < VLAN_N_VID; vid++) 4786 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4787 4788 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4789 kfree(spms_pl); 4790 return err; 4791 } 4792 4793 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4794 { 4795 u16 vid = 1; 4796 int err; 4797 4798 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4799 if (err) 4800 return err; 4801 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4802 if (err) 4803 goto err_port_stp_set; 4804 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4805 true, false); 4806 if (err) 4807 goto err_port_vlan_set; 4808 4809 for (; vid <= VLAN_N_VID - 1; vid++) { 4810 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4811 vid, false); 4812 if (err) 4813 goto err_vid_learning_set; 4814 } 4815 4816 return 0; 4817 4818 err_vid_learning_set: 4819 for (vid--; vid >= 1; vid--) 4820 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4821 err_port_vlan_set: 4822 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4823 err_port_stp_set: 4824 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4825 return err; 4826 } 4827 4828 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4829 { 4830 u16 vid; 4831 4832 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4833 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4834 vid, true); 4835 4836 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4837 false, false); 4838 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4839 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4840 } 4841 4842 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4843 { 4844 unsigned int num_vxlans = 0; 4845 struct net_device *dev; 4846 struct list_head *iter; 4847 4848 netdev_for_each_lower_dev(br_dev, dev, iter) { 4849 if (netif_is_vxlan(dev)) 4850 num_vxlans++; 4851 } 4852 4853 return num_vxlans > 1; 4854 } 4855 4856 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4857 { 4858 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4859 struct net_device *dev; 4860 struct list_head *iter; 4861 4862 netdev_for_each_lower_dev(br_dev, dev, iter) { 4863 u16 pvid; 4864 int err; 4865 4866 if (!netif_is_vxlan(dev)) 4867 continue; 4868 4869 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4870 if (err || !pvid) 4871 continue; 4872 4873 if (test_and_set_bit(pvid, vlans)) 4874 return false; 4875 } 4876 4877 return true; 4878 } 4879 4880 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4881 struct netlink_ext_ack *extack) 4882 { 4883 if (br_multicast_enabled(br_dev)) { 4884 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4885 return false; 4886 } 4887 4888 if (!br_vlan_enabled(br_dev) && 4889 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4890 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4891 return false; 4892 } 4893 4894 if (br_vlan_enabled(br_dev) && 4895 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4896 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4897 return false; 4898 } 4899 4900 return true; 4901 } 4902 4903 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4904 struct net_device *dev, 4905 unsigned long event, void *ptr) 4906 { 4907 struct netdev_notifier_changeupper_info *info; 4908 struct mlxsw_sp_port *mlxsw_sp_port; 4909 struct netlink_ext_ack *extack; 4910 struct net_device *upper_dev; 4911 struct mlxsw_sp *mlxsw_sp; 4912 int err = 0; 4913 4914 mlxsw_sp_port = netdev_priv(dev); 4915 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4916 info = ptr; 4917 extack = netdev_notifier_info_to_extack(&info->info); 4918 4919 switch (event) { 4920 case NETDEV_PRECHANGEUPPER: 4921 upper_dev = info->upper_dev; 4922 if (!is_vlan_dev(upper_dev) && 4923 !netif_is_lag_master(upper_dev) && 4924 !netif_is_bridge_master(upper_dev) && 4925 !netif_is_ovs_master(upper_dev) && 4926 !netif_is_macvlan(upper_dev)) { 4927 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4928 return -EINVAL; 4929 } 4930 if (!info->linking) 4931 break; 4932 if (netif_is_bridge_master(upper_dev) && 4933 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4934 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4935 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4936 return -EOPNOTSUPP; 4937 if (netdev_has_any_upper_dev(upper_dev) && 4938 (!netif_is_bridge_master(upper_dev) || 4939 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4940 upper_dev))) { 4941 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4942 return -EINVAL; 4943 } 4944 if (netif_is_lag_master(upper_dev) && 4945 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4946 info->upper_info, extack)) 4947 return -EINVAL; 4948 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4949 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4950 return -EINVAL; 4951 } 4952 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4953 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4954 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4955 return -EINVAL; 4956 } 4957 if (netif_is_macvlan(upper_dev) && 4958 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 4959 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4960 return -EOPNOTSUPP; 4961 } 4962 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4963 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4964 return -EINVAL; 4965 } 4966 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4967 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4968 return -EINVAL; 4969 } 4970 if (is_vlan_dev(upper_dev) && 4971 vlan_dev_vlan_id(upper_dev) == 1) { 4972 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4973 return -EINVAL; 4974 } 4975 break; 4976 case NETDEV_CHANGEUPPER: 4977 upper_dev = info->upper_dev; 4978 if (netif_is_bridge_master(upper_dev)) { 4979 if (info->linking) 4980 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4981 lower_dev, 4982 upper_dev, 4983 extack); 4984 else 4985 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4986 lower_dev, 4987 upper_dev); 4988 } else if (netif_is_lag_master(upper_dev)) { 4989 if (info->linking) 4990 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4991 upper_dev); 4992 else 4993 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4994 upper_dev); 4995 } else if (netif_is_ovs_master(upper_dev)) { 4996 if (info->linking) 4997 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4998 else 4999 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5000 } else if (netif_is_macvlan(upper_dev)) { 5001 if (!info->linking) 5002 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5003 } 5004 break; 5005 } 5006 5007 return err; 5008 } 5009 5010 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5011 unsigned long event, void *ptr) 5012 { 5013 struct netdev_notifier_changelowerstate_info *info; 5014 struct mlxsw_sp_port *mlxsw_sp_port; 5015 int err; 5016 5017 mlxsw_sp_port = netdev_priv(dev); 5018 info = ptr; 5019 5020 switch (event) { 5021 case NETDEV_CHANGELOWERSTATE: 5022 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5023 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5024 info->lower_state_info); 5025 if (err) 5026 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5027 } 5028 break; 5029 } 5030 5031 return 0; 5032 } 5033 5034 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5035 struct net_device *port_dev, 5036 unsigned long event, void *ptr) 5037 { 5038 switch (event) { 5039 case NETDEV_PRECHANGEUPPER: 5040 case NETDEV_CHANGEUPPER: 5041 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5042 event, ptr); 5043 case NETDEV_CHANGELOWERSTATE: 5044 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5045 ptr); 5046 } 5047 5048 return 0; 5049 } 5050 5051 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5052 unsigned long event, void *ptr) 5053 { 5054 struct net_device *dev; 5055 struct list_head *iter; 5056 int ret; 5057 5058 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5059 if (mlxsw_sp_port_dev_check(dev)) { 5060 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5061 ptr); 5062 if (ret) 5063 return ret; 5064 } 5065 } 5066 5067 return 0; 5068 } 5069 5070 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5071 struct net_device *dev, 5072 unsigned long event, void *ptr, 5073 u16 vid) 5074 { 5075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5077 struct netdev_notifier_changeupper_info *info = ptr; 5078 struct netlink_ext_ack *extack; 5079 struct net_device *upper_dev; 5080 int err = 0; 5081 5082 extack = netdev_notifier_info_to_extack(&info->info); 5083 5084 switch (event) { 5085 case NETDEV_PRECHANGEUPPER: 5086 upper_dev = info->upper_dev; 5087 if (!netif_is_bridge_master(upper_dev) && 5088 !netif_is_macvlan(upper_dev)) { 5089 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5090 return -EINVAL; 5091 } 5092 if (!info->linking) 5093 break; 5094 if (netif_is_bridge_master(upper_dev) && 5095 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5096 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5097 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5098 return -EOPNOTSUPP; 5099 if (netdev_has_any_upper_dev(upper_dev) && 5100 (!netif_is_bridge_master(upper_dev) || 5101 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5102 upper_dev))) { 5103 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5104 return -EINVAL; 5105 } 5106 if (netif_is_macvlan(upper_dev) && 5107 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5108 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5109 return -EOPNOTSUPP; 5110 } 5111 break; 5112 case NETDEV_CHANGEUPPER: 5113 upper_dev = info->upper_dev; 5114 if (netif_is_bridge_master(upper_dev)) { 5115 if (info->linking) 5116 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5117 vlan_dev, 5118 upper_dev, 5119 extack); 5120 else 5121 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5122 vlan_dev, 5123 upper_dev); 5124 } else if (netif_is_macvlan(upper_dev)) { 5125 if (!info->linking) 5126 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5127 } else { 5128 err = -EINVAL; 5129 WARN_ON(1); 5130 } 5131 break; 5132 } 5133 5134 return err; 5135 } 5136 5137 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5138 struct net_device *lag_dev, 5139 unsigned long event, 5140 void *ptr, u16 vid) 5141 { 5142 struct net_device *dev; 5143 struct list_head *iter; 5144 int ret; 5145 5146 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5147 if (mlxsw_sp_port_dev_check(dev)) { 5148 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5149 event, ptr, 5150 vid); 5151 if (ret) 5152 return ret; 5153 } 5154 } 5155 5156 return 0; 5157 } 5158 5159 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 5160 unsigned long event, void *ptr) 5161 { 5162 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 5163 u16 vid = vlan_dev_vlan_id(vlan_dev); 5164 5165 if (mlxsw_sp_port_dev_check(real_dev)) 5166 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 5167 event, ptr, vid); 5168 else if (netif_is_lag_master(real_dev)) 5169 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 5170 real_dev, event, 5171 ptr, vid); 5172 5173 return 0; 5174 } 5175 5176 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 5177 unsigned long event, void *ptr) 5178 { 5179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 5180 struct netdev_notifier_changeupper_info *info = ptr; 5181 struct netlink_ext_ack *extack; 5182 struct net_device *upper_dev; 5183 5184 if (!mlxsw_sp) 5185 return 0; 5186 5187 extack = netdev_notifier_info_to_extack(&info->info); 5188 5189 switch (event) { 5190 case NETDEV_PRECHANGEUPPER: 5191 upper_dev = info->upper_dev; 5192 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 5193 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5194 return -EOPNOTSUPP; 5195 } 5196 if (!info->linking) 5197 break; 5198 if (netif_is_macvlan(upper_dev) && 5199 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 5200 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5201 return -EOPNOTSUPP; 5202 } 5203 break; 5204 case NETDEV_CHANGEUPPER: 5205 upper_dev = info->upper_dev; 5206 if (info->linking) 5207 break; 5208 if (is_vlan_dev(upper_dev)) 5209 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5210 if (netif_is_macvlan(upper_dev)) 5211 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5212 break; 5213 } 5214 5215 return 0; 5216 } 5217 5218 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5219 unsigned long event, void *ptr) 5220 { 5221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5222 struct netdev_notifier_changeupper_info *info = ptr; 5223 struct netlink_ext_ack *extack; 5224 5225 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5226 return 0; 5227 5228 extack = netdev_notifier_info_to_extack(&info->info); 5229 5230 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 5231 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5232 5233 return -EOPNOTSUPP; 5234 } 5235 5236 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 5237 { 5238 struct netdev_notifier_changeupper_info *info = ptr; 5239 5240 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 5241 return false; 5242 return netif_is_l3_master(info->upper_dev); 5243 } 5244 5245 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5246 struct net_device *dev, 5247 unsigned long event, void *ptr) 5248 { 5249 struct netdev_notifier_changeupper_info *cu_info; 5250 struct netdev_notifier_info *info = ptr; 5251 struct netlink_ext_ack *extack; 5252 struct net_device *upper_dev; 5253 5254 extack = netdev_notifier_info_to_extack(info); 5255 5256 switch (event) { 5257 case NETDEV_CHANGEUPPER: 5258 cu_info = container_of(info, 5259 struct netdev_notifier_changeupper_info, 5260 info); 5261 upper_dev = cu_info->upper_dev; 5262 if (!netif_is_bridge_master(upper_dev)) 5263 return 0; 5264 if (!mlxsw_sp_lower_get(upper_dev)) 5265 return 0; 5266 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5267 return -EOPNOTSUPP; 5268 if (cu_info->linking) { 5269 if (!netif_running(dev)) 5270 return 0; 5271 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5272 * device needs to be mapped to a VLAN, but at this 5273 * point no VLANs are configured on the VxLAN device 5274 */ 5275 if (br_vlan_enabled(upper_dev)) 5276 return 0; 5277 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5278 dev, 0, extack); 5279 } else { 5280 /* VLANs were already flushed, which triggered the 5281 * necessary cleanup 5282 */ 5283 if (br_vlan_enabled(upper_dev)) 5284 return 0; 5285 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5286 } 5287 break; 5288 case NETDEV_PRE_UP: 5289 upper_dev = netdev_master_upper_dev_get(dev); 5290 if (!upper_dev) 5291 return 0; 5292 if (!netif_is_bridge_master(upper_dev)) 5293 return 0; 5294 if (!mlxsw_sp_lower_get(upper_dev)) 5295 return 0; 5296 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5297 extack); 5298 case NETDEV_DOWN: 5299 upper_dev = netdev_master_upper_dev_get(dev); 5300 if (!upper_dev) 5301 return 0; 5302 if (!netif_is_bridge_master(upper_dev)) 5303 return 0; 5304 if (!mlxsw_sp_lower_get(upper_dev)) 5305 return 0; 5306 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5307 break; 5308 } 5309 5310 return 0; 5311 } 5312 5313 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5314 unsigned long event, void *ptr) 5315 { 5316 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5317 struct mlxsw_sp_span_entry *span_entry; 5318 struct mlxsw_sp *mlxsw_sp; 5319 int err = 0; 5320 5321 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5322 if (event == NETDEV_UNREGISTER) { 5323 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5324 if (span_entry) 5325 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5326 } 5327 mlxsw_sp_span_respin(mlxsw_sp); 5328 5329 if (netif_is_vxlan(dev)) 5330 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5331 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 5332 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 5333 event, ptr); 5334 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 5335 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 5336 event, ptr); 5337 else if (event == NETDEV_PRE_CHANGEADDR || 5338 event == NETDEV_CHANGEADDR || 5339 event == NETDEV_CHANGEMTU) 5340 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 5341 else if (mlxsw_sp_is_vrf_event(event, ptr)) 5342 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 5343 else if (mlxsw_sp_port_dev_check(dev)) 5344 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5345 else if (netif_is_lag_master(dev)) 5346 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5347 else if (is_vlan_dev(dev)) 5348 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5349 else if (netif_is_bridge_master(dev)) 5350 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5351 else if (netif_is_macvlan(dev)) 5352 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5353 5354 return notifier_from_errno(err); 5355 } 5356 5357 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5358 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5359 }; 5360 5361 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 5362 .notifier_call = mlxsw_sp_inetaddr_event, 5363 }; 5364 5365 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5366 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5367 }; 5368 5369 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 5370 .notifier_call = mlxsw_sp_inet6addr_event, 5371 }; 5372 5373 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5374 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5375 {0, }, 5376 }; 5377 5378 static struct pci_driver mlxsw_sp1_pci_driver = { 5379 .name = mlxsw_sp1_driver_name, 5380 .id_table = mlxsw_sp1_pci_id_table, 5381 }; 5382 5383 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5384 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5385 {0, }, 5386 }; 5387 5388 static struct pci_driver mlxsw_sp2_pci_driver = { 5389 .name = mlxsw_sp2_driver_name, 5390 .id_table = mlxsw_sp2_pci_id_table, 5391 }; 5392 5393 static int __init mlxsw_sp_module_init(void) 5394 { 5395 int err; 5396 5397 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5398 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5399 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5400 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5401 5402 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5403 if (err) 5404 goto err_sp1_core_driver_register; 5405 5406 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5407 if (err) 5408 goto err_sp2_core_driver_register; 5409 5410 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5411 if (err) 5412 goto err_sp1_pci_driver_register; 5413 5414 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5415 if (err) 5416 goto err_sp2_pci_driver_register; 5417 5418 return 0; 5419 5420 err_sp2_pci_driver_register: 5421 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5422 err_sp1_pci_driver_register: 5423 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5424 err_sp2_core_driver_register: 5425 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5426 err_sp1_core_driver_register: 5427 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5428 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5429 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5430 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5431 return err; 5432 } 5433 5434 static void __exit mlxsw_sp_module_exit(void) 5435 { 5436 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5437 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5438 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5439 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5440 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5441 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5442 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5443 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5444 } 5445 5446 module_init(mlxsw_sp_module_init); 5447 module_exit(mlxsw_sp_module_exit); 5448 5449 MODULE_LICENSE("Dual BSD/GPL"); 5450 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5451 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5452 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5453 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5454 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5455