1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/netevent.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "core_env.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "spectrum_ptp.h" 44 #include "spectrum_trap.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 2007 49 #define MLXSW_SP1_FWREV_SUBMINOR 1168 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 #define MLXSW_SP2_FWREV_MAJOR 29 65 #define MLXSW_SP2_FWREV_MINOR 2007 66 #define MLXSW_SP2_FWREV_SUBMINOR 1168 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP2_FWREV_MINOR, 71 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 #define MLXSW_SP3_FWREV_MINOR 2007 81 #define MLXSW_SP3_FWREV_SUBMINOR 1168 82 83 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 84 .major = MLXSW_SP3_FWREV_MAJOR, 85 .minor = MLXSW_SP3_FWREV_MINOR, 86 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 87 }; 88 89 #define MLXSW_SP3_FW_FILENAME \ 90 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 91 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 93 94 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 95 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 96 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 97 98 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 99 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 100 }; 101 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 102 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 103 }; 104 105 /* tx_hdr_version 106 * Tx header version. 107 * Must be set to 1. 108 */ 109 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 110 111 /* tx_hdr_ctl 112 * Packet control type. 113 * 0 - Ethernet control (e.g. EMADs, LACP) 114 * 1 - Ethernet data 115 */ 116 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 117 118 /* tx_hdr_proto 119 * Packet protocol type. Must be set to 1 (Ethernet). 120 */ 121 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 122 123 /* tx_hdr_rx_is_router 124 * Packet is sent from the router. Valid for data packets only. 125 */ 126 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 127 128 /* tx_hdr_fid_valid 129 * Indicates if the 'fid' field is valid and should be used for 130 * forwarding lookup. Valid for data packets only. 131 */ 132 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 133 134 /* tx_hdr_swid 135 * Switch partition ID. Must be set to 0. 136 */ 137 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 138 139 /* tx_hdr_control_tclass 140 * Indicates if the packet should use the control TClass and not one 141 * of the data TClasses. 142 */ 143 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 144 145 /* tx_hdr_etclass 146 * Egress TClass to be used on the egress device on the egress port. 147 */ 148 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 149 150 /* tx_hdr_port_mid 151 * Destination local port for unicast packets. 152 * Destination multicast ID for multicast packets. 153 * 154 * Control packets are directed to a specific egress port, while data 155 * packets are transmitted through the CPU port (0) into the switch partition, 156 * where forwarding rules are applied. 157 */ 158 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 159 160 /* tx_hdr_fid 161 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 162 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 163 * Valid for data packets only. 164 */ 165 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 166 167 /* tx_hdr_type 168 * 0 - Data packets 169 * 6 - Control packets 170 */ 171 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 172 173 struct mlxsw_sp_mlxfw_dev { 174 struct mlxfw_dev mlxfw_dev; 175 struct mlxsw_sp *mlxsw_sp; 176 }; 177 178 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 179 u16 component_index, u32 *p_max_size, 180 u8 *p_align_bits, u16 *p_max_write_size) 181 { 182 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 183 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 184 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 185 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 186 int err; 187 188 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 190 if (err) 191 return err; 192 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 193 p_max_write_size); 194 195 *p_align_bits = max_t(u8, *p_align_bits, 2); 196 *p_max_write_size = min_t(u16, *p_max_write_size, 197 MLXSW_REG_MCDA_MAX_DATA_LEN); 198 return 0; 199 } 200 201 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 202 { 203 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 204 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 206 char mcc_pl[MLXSW_REG_MCC_LEN]; 207 u8 control_state; 208 int err; 209 210 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 212 if (err) 213 return err; 214 215 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 216 if (control_state != MLXFW_FSM_STATE_IDLE) 217 return -EBUSY; 218 219 mlxsw_reg_mcc_pack(mcc_pl, 220 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 221 0, *fwhandle, 0); 222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 223 } 224 225 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 226 u32 fwhandle, u16 component_index, 227 u32 component_size) 228 { 229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 232 char mcc_pl[MLXSW_REG_MCC_LEN]; 233 234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 235 component_index, fwhandle, component_size); 236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 } 238 239 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 240 u32 fwhandle, u8 *data, u16 size, 241 u32 offset) 242 { 243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 246 char mcda_pl[MLXSW_REG_MCDA_LEN]; 247 248 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 250 } 251 252 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 253 u32 fwhandle, u16 component_index) 254 { 255 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 256 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 258 char mcc_pl[MLXSW_REG_MCC_LEN]; 259 260 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 261 component_index, fwhandle, 0); 262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 263 } 264 265 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcc_pl[MLXSW_REG_MCC_LEN]; 271 272 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 273 fwhandle, 0); 274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 275 } 276 277 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 278 enum mlxfw_fsm_state *fsm_state, 279 enum mlxfw_fsm_state_err *fsm_state_err) 280 { 281 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 282 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 284 char mcc_pl[MLXSW_REG_MCC_LEN]; 285 u8 control_state; 286 u8 error_code; 287 int err; 288 289 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 291 if (err) 292 return err; 293 294 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 295 *fsm_state = control_state; 296 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 297 MLXFW_FSM_STATE_ERR_MAX); 298 return 0; 299 } 300 301 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 308 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 309 fwhandle, 0); 310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 311 } 312 313 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 314 { 315 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 316 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 317 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 318 char mcc_pl[MLXSW_REG_MCC_LEN]; 319 320 mlxsw_reg_mcc_pack(mcc_pl, 321 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 322 fwhandle, 0); 323 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 324 } 325 326 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 327 .component_query = mlxsw_sp_component_query, 328 .fsm_lock = mlxsw_sp_fsm_lock, 329 .fsm_component_update = mlxsw_sp_fsm_component_update, 330 .fsm_block_download = mlxsw_sp_fsm_block_download, 331 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 332 .fsm_activate = mlxsw_sp_fsm_activate, 333 .fsm_query_state = mlxsw_sp_fsm_query_state, 334 .fsm_cancel = mlxsw_sp_fsm_cancel, 335 .fsm_release = mlxsw_sp_fsm_release, 336 }; 337 338 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 339 const struct firmware *firmware, 340 struct netlink_ext_ack *extack) 341 { 342 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 343 .mlxfw_dev = { 344 .ops = &mlxsw_sp_mlxfw_dev_ops, 345 .psid = mlxsw_sp->bus_info->psid, 346 .psid_size = strlen(mlxsw_sp->bus_info->psid), 347 .devlink = priv_to_devlink(mlxsw_sp->core), 348 }, 349 .mlxsw_sp = mlxsw_sp 350 }; 351 int err; 352 353 mlxsw_core_fw_flash_start(mlxsw_sp->core); 354 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 355 firmware, extack); 356 mlxsw_core_fw_flash_end(mlxsw_sp->core); 357 358 return err; 359 } 360 361 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 362 { 363 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 364 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 365 const char *fw_filename = mlxsw_sp->fw_filename; 366 union devlink_param_value value; 367 const struct firmware *firmware; 368 int err; 369 370 /* Don't check if driver does not require it */ 371 if (!req_rev || !fw_filename) 372 return 0; 373 374 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 375 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 376 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 377 &value); 378 if (err) 379 return err; 380 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 381 return 0; 382 383 /* Validate driver & FW are compatible */ 384 if (rev->major != req_rev->major) { 385 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 386 rev->major, req_rev->major); 387 return -EINVAL; 388 } 389 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 390 return 0; 391 392 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 393 rev->major, rev->minor, rev->subminor, req_rev->major, 394 req_rev->minor, req_rev->subminor); 395 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 396 fw_filename); 397 398 err = request_firmware_direct(&firmware, fw_filename, 399 mlxsw_sp->bus_info->dev); 400 if (err) { 401 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 402 fw_filename); 403 return err; 404 } 405 406 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 407 release_firmware(firmware); 408 if (err) 409 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 410 411 /* On FW flash success, tell the caller FW reset is needed 412 * if current FW supports it. 413 */ 414 if (rev->minor >= req_rev->can_reset_minor) 415 return err ? err : -EAGAIN; 416 else 417 return 0; 418 } 419 420 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 421 const char *file_name, const char *component, 422 struct netlink_ext_ack *extack) 423 { 424 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 425 const struct firmware *firmware; 426 int err; 427 428 if (component) 429 return -EOPNOTSUPP; 430 431 err = request_firmware_direct(&firmware, file_name, 432 mlxsw_sp->bus_info->dev); 433 if (err) 434 return err; 435 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 436 release_firmware(firmware); 437 438 return err; 439 } 440 441 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 442 unsigned int counter_index, u64 *packets, 443 u64 *bytes) 444 { 445 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 446 int err; 447 448 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 449 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 450 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 451 if (err) 452 return err; 453 if (packets) 454 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 455 if (bytes) 456 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 457 return 0; 458 } 459 460 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 461 unsigned int counter_index) 462 { 463 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 464 465 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 466 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 467 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 468 } 469 470 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 471 unsigned int *p_counter_index) 472 { 473 int err; 474 475 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 476 p_counter_index); 477 if (err) 478 return err; 479 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 480 if (err) 481 goto err_counter_clear; 482 return 0; 483 484 err_counter_clear: 485 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 486 *p_counter_index); 487 return err; 488 } 489 490 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 491 unsigned int counter_index) 492 { 493 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 494 counter_index); 495 } 496 497 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 498 const struct mlxsw_tx_info *tx_info) 499 { 500 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 501 502 memset(txhdr, 0, MLXSW_TXHDR_LEN); 503 504 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 505 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 506 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 507 mlxsw_tx_hdr_swid_set(txhdr, 0); 508 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 509 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 510 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 511 } 512 513 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 514 { 515 switch (state) { 516 case BR_STATE_FORWARDING: 517 return MLXSW_REG_SPMS_STATE_FORWARDING; 518 case BR_STATE_LEARNING: 519 return MLXSW_REG_SPMS_STATE_LEARNING; 520 case BR_STATE_LISTENING: /* fall-through */ 521 case BR_STATE_DISABLED: /* fall-through */ 522 case BR_STATE_BLOCKING: 523 return MLXSW_REG_SPMS_STATE_DISCARDING; 524 default: 525 BUG(); 526 } 527 } 528 529 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 530 u8 state) 531 { 532 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 534 char *spms_pl; 535 int err; 536 537 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 538 if (!spms_pl) 539 return -ENOMEM; 540 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 541 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 542 543 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 544 kfree(spms_pl); 545 return err; 546 } 547 548 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 549 { 550 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 551 int err; 552 553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 554 if (err) 555 return err; 556 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 557 return 0; 558 } 559 560 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 561 bool is_up) 562 { 563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 564 char paos_pl[MLXSW_REG_PAOS_LEN]; 565 566 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 567 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 568 MLXSW_PORT_ADMIN_STATUS_DOWN); 569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 570 } 571 572 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 573 unsigned char *addr) 574 { 575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 576 char ppad_pl[MLXSW_REG_PPAD_LEN]; 577 578 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 579 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 581 } 582 583 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 584 { 585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 586 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 587 588 ether_addr_copy(addr, mlxsw_sp->base_mac); 589 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 590 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 591 } 592 593 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 594 { 595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 596 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 597 int max_mtu; 598 int err; 599 600 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 601 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 602 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 603 if (err) 604 return err; 605 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 606 607 if (mtu > max_mtu) 608 return -EINVAL; 609 610 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 612 } 613 614 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 615 { 616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 617 char pspa_pl[MLXSW_REG_PSPA_LEN]; 618 619 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 621 } 622 623 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 624 { 625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 626 char svpe_pl[MLXSW_REG_SVPE_LEN]; 627 628 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 630 } 631 632 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 633 bool learn_enable) 634 { 635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 636 char *spvmlr_pl; 637 int err; 638 639 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 640 if (!spvmlr_pl) 641 return -ENOMEM; 642 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 643 learn_enable); 644 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 645 kfree(spvmlr_pl); 646 return err; 647 } 648 649 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 650 u16 vid) 651 { 652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 653 char spvid_pl[MLXSW_REG_SPVID_LEN]; 654 655 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 657 } 658 659 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 660 bool allow) 661 { 662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 663 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 664 665 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 667 } 668 669 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 670 { 671 int err; 672 673 if (!vid) { 674 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 675 if (err) 676 return err; 677 } else { 678 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 679 if (err) 680 return err; 681 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 682 if (err) 683 goto err_port_allow_untagged_set; 684 } 685 686 mlxsw_sp_port->pvid = vid; 687 return 0; 688 689 err_port_allow_untagged_set: 690 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 691 return err; 692 } 693 694 static int 695 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 696 { 697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 698 char sspr_pl[MLXSW_REG_SSPR_LEN]; 699 700 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 702 } 703 704 static int 705 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 706 struct mlxsw_sp_port_mapping *port_mapping) 707 { 708 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 709 bool separate_rxtx; 710 u8 module; 711 u8 width; 712 int err; 713 int i; 714 715 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 716 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 717 if (err) 718 return err; 719 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 720 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 721 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 722 723 if (width && !is_power_of_2(width)) { 724 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 725 local_port); 726 return -EINVAL; 727 } 728 729 for (i = 0; i < width; i++) { 730 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 732 local_port); 733 return -EINVAL; 734 } 735 if (separate_rxtx && 736 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 737 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 738 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 739 local_port); 740 return -EINVAL; 741 } 742 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 744 local_port); 745 return -EINVAL; 746 } 747 } 748 749 port_mapping->module = module; 750 port_mapping->width = width; 751 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 752 return 0; 753 } 754 755 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 756 { 757 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 759 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 760 int i; 761 762 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 763 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 764 for (i = 0; i < port_mapping->width; i++) { 765 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 766 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 767 } 768 769 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 770 } 771 772 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 773 { 774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 775 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 776 777 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 778 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 780 } 781 782 static int mlxsw_sp_port_open(struct net_device *dev) 783 { 784 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 785 int err; 786 787 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 788 if (err) 789 return err; 790 netif_start_queue(dev); 791 return 0; 792 } 793 794 static int mlxsw_sp_port_stop(struct net_device *dev) 795 { 796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 797 798 netif_stop_queue(dev); 799 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 800 } 801 802 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 803 struct net_device *dev) 804 { 805 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 808 const struct mlxsw_tx_info tx_info = { 809 .local_port = mlxsw_sp_port->local_port, 810 .is_emad = false, 811 }; 812 u64 len; 813 int err; 814 815 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 816 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 817 dev_kfree_skb_any(skb); 818 return NETDEV_TX_OK; 819 } 820 821 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 822 823 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 824 return NETDEV_TX_BUSY; 825 826 if (eth_skb_pad(skb)) { 827 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 828 return NETDEV_TX_OK; 829 } 830 831 mlxsw_sp_txhdr_construct(skb, &tx_info); 832 /* TX header is consumed by HW on the way so we shouldn't count its 833 * bytes as being sent. 834 */ 835 len = skb->len - MLXSW_TXHDR_LEN; 836 837 /* Due to a race we might fail here because of a full queue. In that 838 * unlikely case we simply drop the packet. 839 */ 840 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 841 842 if (!err) { 843 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 844 u64_stats_update_begin(&pcpu_stats->syncp); 845 pcpu_stats->tx_packets++; 846 pcpu_stats->tx_bytes += len; 847 u64_stats_update_end(&pcpu_stats->syncp); 848 } else { 849 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 850 dev_kfree_skb_any(skb); 851 } 852 return NETDEV_TX_OK; 853 } 854 855 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 856 { 857 } 858 859 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 860 { 861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 862 struct sockaddr *addr = p; 863 int err; 864 865 if (!is_valid_ether_addr(addr->sa_data)) 866 return -EADDRNOTAVAIL; 867 868 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 869 if (err) 870 return err; 871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 872 return 0; 873 } 874 875 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 876 int mtu) 877 { 878 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 879 } 880 881 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 882 883 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 884 u16 delay) 885 { 886 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 887 BITS_PER_BYTE)); 888 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 889 mtu); 890 } 891 892 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 893 * Assumes 100m cable and maximum MTU. 894 */ 895 #define MLXSW_SP_PAUSE_DELAY 58752 896 897 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 898 u16 delay, bool pfc, bool pause) 899 { 900 if (pfc) 901 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 902 else if (pause) 903 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 904 else 905 return 0; 906 } 907 908 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 909 bool lossy) 910 { 911 if (lossy) 912 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 913 else 914 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 915 thres); 916 } 917 918 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 919 u8 *prio_tc, bool pause_en, 920 struct ieee_pfc *my_pfc) 921 { 922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 923 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 924 u16 delay = !!my_pfc ? my_pfc->delay : 0; 925 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 926 u32 taken_headroom_cells = 0; 927 u32 max_headroom_cells; 928 int i, j, err; 929 930 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 931 932 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 933 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 934 if (err) 935 return err; 936 937 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 938 bool configure = false; 939 bool pfc = false; 940 u16 thres_cells; 941 u16 delay_cells; 942 u16 total_cells; 943 bool lossy; 944 945 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 946 if (prio_tc[j] == i) { 947 pfc = pfc_en & BIT(j); 948 configure = true; 949 break; 950 } 951 } 952 953 if (!configure) 954 continue; 955 956 lossy = !(pfc || pause_en); 957 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 958 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); 959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 960 pfc, pause_en); 961 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); 962 total_cells = thres_cells + delay_cells; 963 964 taken_headroom_cells += total_cells; 965 if (taken_headroom_cells > max_headroom_cells) 966 return -ENOBUFS; 967 968 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 969 thres_cells, lossy); 970 } 971 972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 973 } 974 975 int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 976 int mtu, bool pause_en) 977 { 978 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 979 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 980 struct ieee_pfc *my_pfc; 981 u8 *prio_tc; 982 983 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 984 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 985 986 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 987 pause_en, my_pfc); 988 } 989 990 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 991 { 992 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 993 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 994 int err; 995 996 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 997 if (err) 998 return err; 999 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1000 if (err) 1001 goto err_span_port_mtu_update; 1002 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1003 if (err) 1004 goto err_port_mtu_set; 1005 dev->mtu = mtu; 1006 return 0; 1007 1008 err_port_mtu_set: 1009 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1010 err_span_port_mtu_update: 1011 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1012 return err; 1013 } 1014 1015 static int 1016 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1017 struct rtnl_link_stats64 *stats) 1018 { 1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1020 struct mlxsw_sp_port_pcpu_stats *p; 1021 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1022 u32 tx_dropped = 0; 1023 unsigned int start; 1024 int i; 1025 1026 for_each_possible_cpu(i) { 1027 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1028 do { 1029 start = u64_stats_fetch_begin_irq(&p->syncp); 1030 rx_packets = p->rx_packets; 1031 rx_bytes = p->rx_bytes; 1032 tx_packets = p->tx_packets; 1033 tx_bytes = p->tx_bytes; 1034 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1035 1036 stats->rx_packets += rx_packets; 1037 stats->rx_bytes += rx_bytes; 1038 stats->tx_packets += tx_packets; 1039 stats->tx_bytes += tx_bytes; 1040 /* tx_dropped is u32, updated without syncp protection. */ 1041 tx_dropped += p->tx_dropped; 1042 } 1043 stats->tx_dropped = tx_dropped; 1044 return 0; 1045 } 1046 1047 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1048 { 1049 switch (attr_id) { 1050 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1051 return true; 1052 } 1053 1054 return false; 1055 } 1056 1057 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1058 void *sp) 1059 { 1060 switch (attr_id) { 1061 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1062 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1063 } 1064 1065 return -EINVAL; 1066 } 1067 1068 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1069 int prio, char *ppcnt_pl) 1070 { 1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1073 1074 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1075 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1076 } 1077 1078 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1079 struct rtnl_link_stats64 *stats) 1080 { 1081 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1082 int err; 1083 1084 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1085 0, ppcnt_pl); 1086 if (err) 1087 goto out; 1088 1089 stats->tx_packets = 1090 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1091 stats->rx_packets = 1092 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1093 stats->tx_bytes = 1094 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1095 stats->rx_bytes = 1096 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1097 stats->multicast = 1098 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1099 1100 stats->rx_crc_errors = 1101 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1102 stats->rx_frame_errors = 1103 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1104 1105 stats->rx_length_errors = ( 1106 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1108 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1109 1110 stats->rx_errors = (stats->rx_crc_errors + 1111 stats->rx_frame_errors + stats->rx_length_errors); 1112 1113 out: 1114 return err; 1115 } 1116 1117 static void 1118 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1119 struct mlxsw_sp_port_xstats *xstats) 1120 { 1121 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1122 int err, i; 1123 1124 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1125 ppcnt_pl); 1126 if (!err) 1127 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1128 1129 for (i = 0; i < TC_MAX_QUEUE; i++) { 1130 err = mlxsw_sp_port_get_stats_raw(dev, 1131 MLXSW_REG_PPCNT_TC_CONG_TC, 1132 i, ppcnt_pl); 1133 if (!err) 1134 xstats->wred_drop[i] = 1135 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1136 1137 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1138 i, ppcnt_pl); 1139 if (err) 1140 continue; 1141 1142 xstats->backlog[i] = 1143 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1144 xstats->tail_drop[i] = 1145 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1146 } 1147 1148 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1149 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1150 i, ppcnt_pl); 1151 if (err) 1152 continue; 1153 1154 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1155 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1156 } 1157 } 1158 1159 static void update_stats_cache(struct work_struct *work) 1160 { 1161 struct mlxsw_sp_port *mlxsw_sp_port = 1162 container_of(work, struct mlxsw_sp_port, 1163 periodic_hw_stats.update_dw.work); 1164 1165 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1166 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1167 * necessary when port goes down. 1168 */ 1169 goto out; 1170 1171 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1172 &mlxsw_sp_port->periodic_hw_stats.stats); 1173 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1174 &mlxsw_sp_port->periodic_hw_stats.xstats); 1175 1176 out: 1177 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1178 MLXSW_HW_STATS_UPDATE_TIME); 1179 } 1180 1181 /* Return the stats from a cache that is updated periodically, 1182 * as this function might get called in an atomic context. 1183 */ 1184 static void 1185 mlxsw_sp_port_get_stats64(struct net_device *dev, 1186 struct rtnl_link_stats64 *stats) 1187 { 1188 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1189 1190 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1191 } 1192 1193 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1194 u16 vid_begin, u16 vid_end, 1195 bool is_member, bool untagged) 1196 { 1197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1198 char *spvm_pl; 1199 int err; 1200 1201 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1202 if (!spvm_pl) 1203 return -ENOMEM; 1204 1205 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1206 vid_end, is_member, untagged); 1207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1208 kfree(spvm_pl); 1209 return err; 1210 } 1211 1212 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1213 u16 vid_end, bool is_member, bool untagged) 1214 { 1215 u16 vid, vid_e; 1216 int err; 1217 1218 for (vid = vid_begin; vid <= vid_end; 1219 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1220 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1221 vid_end); 1222 1223 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1224 is_member, untagged); 1225 if (err) 1226 return err; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1233 bool flush_default) 1234 { 1235 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1236 1237 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1238 &mlxsw_sp_port->vlans_list, list) { 1239 if (!flush_default && 1240 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1241 continue; 1242 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1243 } 1244 } 1245 1246 static void 1247 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1248 { 1249 if (mlxsw_sp_port_vlan->bridge_port) 1250 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1251 else if (mlxsw_sp_port_vlan->fid) 1252 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1253 } 1254 1255 struct mlxsw_sp_port_vlan * 1256 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1257 { 1258 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1259 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1260 int err; 1261 1262 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1263 if (mlxsw_sp_port_vlan) 1264 return ERR_PTR(-EEXIST); 1265 1266 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1267 if (err) 1268 return ERR_PTR(err); 1269 1270 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1271 if (!mlxsw_sp_port_vlan) { 1272 err = -ENOMEM; 1273 goto err_port_vlan_alloc; 1274 } 1275 1276 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1277 mlxsw_sp_port_vlan->vid = vid; 1278 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1279 1280 return mlxsw_sp_port_vlan; 1281 1282 err_port_vlan_alloc: 1283 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1284 return ERR_PTR(err); 1285 } 1286 1287 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1288 { 1289 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1290 u16 vid = mlxsw_sp_port_vlan->vid; 1291 1292 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1293 list_del(&mlxsw_sp_port_vlan->list); 1294 kfree(mlxsw_sp_port_vlan); 1295 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1296 } 1297 1298 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1299 __be16 __always_unused proto, u16 vid) 1300 { 1301 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1302 1303 /* VLAN 0 is added to HW filter when device goes up, but it is 1304 * reserved in our case, so simply return. 1305 */ 1306 if (!vid) 1307 return 0; 1308 1309 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1310 } 1311 1312 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1313 __be16 __always_unused proto, u16 vid) 1314 { 1315 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1316 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1317 1318 /* VLAN 0 is removed from HW filter when device goes down, but 1319 * it is reserved in our case, so simply return. 1320 */ 1321 if (!vid) 1322 return 0; 1323 1324 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1325 if (!mlxsw_sp_port_vlan) 1326 return 0; 1327 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1328 1329 return 0; 1330 } 1331 1332 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1333 struct flow_block_offload *f) 1334 { 1335 switch (f->binder_type) { 1336 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1337 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1338 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1339 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1340 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1341 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1342 default: 1343 return -EOPNOTSUPP; 1344 } 1345 } 1346 1347 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1348 void *type_data) 1349 { 1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1351 1352 switch (type) { 1353 case TC_SETUP_BLOCK: 1354 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1355 case TC_SETUP_QDISC_RED: 1356 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1357 case TC_SETUP_QDISC_PRIO: 1358 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1359 case TC_SETUP_QDISC_ETS: 1360 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1361 case TC_SETUP_QDISC_TBF: 1362 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1363 case TC_SETUP_QDISC_FIFO: 1364 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1365 default: 1366 return -EOPNOTSUPP; 1367 } 1368 } 1369 1370 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1371 { 1372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1373 1374 if (!enable) { 1375 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1376 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1377 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1378 return -EINVAL; 1379 } 1380 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1381 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1382 } else { 1383 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1384 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1385 } 1386 return 0; 1387 } 1388 1389 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1390 { 1391 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1392 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1393 int err; 1394 1395 if (netif_running(dev)) 1396 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1397 1398 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1399 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1400 pplr_pl); 1401 1402 if (netif_running(dev)) 1403 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1404 1405 return err; 1406 } 1407 1408 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1409 1410 static int mlxsw_sp_handle_feature(struct net_device *dev, 1411 netdev_features_t wanted_features, 1412 netdev_features_t feature, 1413 mlxsw_sp_feature_handler feature_handler) 1414 { 1415 netdev_features_t changes = wanted_features ^ dev->features; 1416 bool enable = !!(wanted_features & feature); 1417 int err; 1418 1419 if (!(changes & feature)) 1420 return 0; 1421 1422 err = feature_handler(dev, enable); 1423 if (err) { 1424 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1425 enable ? "Enable" : "Disable", &feature, err); 1426 return err; 1427 } 1428 1429 if (enable) 1430 dev->features |= feature; 1431 else 1432 dev->features &= ~feature; 1433 1434 return 0; 1435 } 1436 static int mlxsw_sp_set_features(struct net_device *dev, 1437 netdev_features_t features) 1438 { 1439 netdev_features_t oper_features = dev->features; 1440 int err = 0; 1441 1442 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1443 mlxsw_sp_feature_hw_tc); 1444 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1445 mlxsw_sp_feature_loopback); 1446 1447 if (err) { 1448 dev->features = oper_features; 1449 return -EINVAL; 1450 } 1451 1452 return 0; 1453 } 1454 1455 static struct devlink_port * 1456 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1457 { 1458 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1460 1461 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1462 mlxsw_sp_port->local_port); 1463 } 1464 1465 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1466 struct ifreq *ifr) 1467 { 1468 struct hwtstamp_config config; 1469 int err; 1470 1471 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1472 return -EFAULT; 1473 1474 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1475 &config); 1476 if (err) 1477 return err; 1478 1479 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1480 return -EFAULT; 1481 1482 return 0; 1483 } 1484 1485 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1486 struct ifreq *ifr) 1487 { 1488 struct hwtstamp_config config; 1489 int err; 1490 1491 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1492 &config); 1493 if (err) 1494 return err; 1495 1496 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1497 return -EFAULT; 1498 1499 return 0; 1500 } 1501 1502 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1503 { 1504 struct hwtstamp_config config = {0}; 1505 1506 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1507 } 1508 1509 static int 1510 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1511 { 1512 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1513 1514 switch (cmd) { 1515 case SIOCSHWTSTAMP: 1516 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1517 case SIOCGHWTSTAMP: 1518 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1519 default: 1520 return -EOPNOTSUPP; 1521 } 1522 } 1523 1524 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1525 .ndo_open = mlxsw_sp_port_open, 1526 .ndo_stop = mlxsw_sp_port_stop, 1527 .ndo_start_xmit = mlxsw_sp_port_xmit, 1528 .ndo_setup_tc = mlxsw_sp_setup_tc, 1529 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1530 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1531 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1532 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1533 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1534 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1535 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1536 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1537 .ndo_set_features = mlxsw_sp_set_features, 1538 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1539 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1540 }; 1541 1542 static int 1543 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1544 { 1545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1546 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1547 const struct mlxsw_sp_port_type_speed_ops *ops; 1548 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1549 int err; 1550 1551 ops = mlxsw_sp->port_type_speed_ops; 1552 1553 /* Set advertised speeds to supported speeds. */ 1554 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1555 0, false); 1556 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1557 if (err) 1558 return err; 1559 1560 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1561 ð_proto_admin, ð_proto_oper); 1562 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1563 eth_proto_cap, mlxsw_sp_port->link.autoneg); 1564 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1565 } 1566 1567 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1568 { 1569 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1571 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1572 u32 eth_proto_oper; 1573 int err; 1574 1575 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1576 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1577 mlxsw_sp_port->local_port, 0, 1578 false); 1579 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1580 if (err) 1581 return err; 1582 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1583 ð_proto_oper); 1584 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1585 return 0; 1586 } 1587 1588 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1589 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1590 bool dwrr, u8 dwrr_weight) 1591 { 1592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1593 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1594 1595 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1596 next_index); 1597 mlxsw_reg_qeec_de_set(qeec_pl, true); 1598 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1599 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1601 } 1602 1603 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1604 enum mlxsw_reg_qeec_hr hr, u8 index, 1605 u8 next_index, u32 maxrate, u8 burst_size) 1606 { 1607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1608 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1609 1610 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1611 next_index); 1612 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1613 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1614 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1616 } 1617 1618 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1619 enum mlxsw_reg_qeec_hr hr, u8 index, 1620 u8 next_index, u32 minrate) 1621 { 1622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1623 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1624 1625 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1626 next_index); 1627 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1628 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1629 1630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1631 } 1632 1633 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1634 u8 switch_prio, u8 tclass) 1635 { 1636 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1637 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1638 1639 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1640 tclass); 1641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1642 } 1643 1644 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1645 { 1646 int err, i; 1647 1648 /* Setup the elements hierarcy, so that each TC is linked to 1649 * one subgroup, which are all member in the same group. 1650 */ 1651 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1652 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1653 if (err) 1654 return err; 1655 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1656 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1657 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1658 0, false, 0); 1659 if (err) 1660 return err; 1661 } 1662 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1663 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1664 MLXSW_REG_QEEC_HR_TC, i, i, 1665 false, 0); 1666 if (err) 1667 return err; 1668 1669 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1670 MLXSW_REG_QEEC_HR_TC, 1671 i + 8, i, 1672 true, 100); 1673 if (err) 1674 return err; 1675 } 1676 1677 /* Make sure the max shaper is disabled in all hierarchies that support 1678 * it. Note that this disables ptps (PTP shaper), but that is intended 1679 * for the initial configuration. 1680 */ 1681 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1682 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1683 MLXSW_REG_QEEC_MAS_DIS, 0); 1684 if (err) 1685 return err; 1686 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1687 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1688 MLXSW_REG_QEEC_HR_SUBGROUP, 1689 i, 0, 1690 MLXSW_REG_QEEC_MAS_DIS, 0); 1691 if (err) 1692 return err; 1693 } 1694 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1695 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1696 MLXSW_REG_QEEC_HR_TC, 1697 i, i, 1698 MLXSW_REG_QEEC_MAS_DIS, 0); 1699 if (err) 1700 return err; 1701 1702 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1703 MLXSW_REG_QEEC_HR_TC, 1704 i + 8, i, 1705 MLXSW_REG_QEEC_MAS_DIS, 0); 1706 if (err) 1707 return err; 1708 } 1709 1710 /* Configure the min shaper for multicast TCs. */ 1711 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1712 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1713 MLXSW_REG_QEEC_HR_TC, 1714 i + 8, i, 1715 MLXSW_REG_QEEC_MIS_MIN); 1716 if (err) 1717 return err; 1718 } 1719 1720 /* Map all priorities to traffic class 0. */ 1721 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1722 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1723 if (err) 1724 return err; 1725 } 1726 1727 return 0; 1728 } 1729 1730 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1731 bool enable) 1732 { 1733 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1734 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1735 1736 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1738 } 1739 1740 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1741 u8 split_base_local_port, 1742 struct mlxsw_sp_port_mapping *port_mapping) 1743 { 1744 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1745 bool split = !!split_base_local_port; 1746 struct mlxsw_sp_port *mlxsw_sp_port; 1747 u32 lanes = port_mapping->width; 1748 struct net_device *dev; 1749 bool splittable; 1750 int err; 1751 1752 splittable = lanes > 1 && !split; 1753 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1754 port_mapping->module + 1, split, 1755 port_mapping->lane / lanes, 1756 splittable, lanes, 1757 mlxsw_sp->base_mac, 1758 sizeof(mlxsw_sp->base_mac)); 1759 if (err) { 1760 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1761 local_port); 1762 return err; 1763 } 1764 1765 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1766 if (!dev) { 1767 err = -ENOMEM; 1768 goto err_alloc_etherdev; 1769 } 1770 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1771 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1772 mlxsw_sp_port = netdev_priv(dev); 1773 mlxsw_sp_port->dev = dev; 1774 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1775 mlxsw_sp_port->local_port = local_port; 1776 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1777 mlxsw_sp_port->split = split; 1778 mlxsw_sp_port->split_base_local_port = split_base_local_port; 1779 mlxsw_sp_port->mapping = *port_mapping; 1780 mlxsw_sp_port->link.autoneg = 1; 1781 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1782 1783 mlxsw_sp_port->pcpu_stats = 1784 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1785 if (!mlxsw_sp_port->pcpu_stats) { 1786 err = -ENOMEM; 1787 goto err_alloc_stats; 1788 } 1789 1790 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1791 &update_stats_cache); 1792 1793 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1794 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1795 1796 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 1797 if (err) { 1798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1799 mlxsw_sp_port->local_port); 1800 goto err_port_module_map; 1801 } 1802 1803 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1804 if (err) { 1805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1806 mlxsw_sp_port->local_port); 1807 goto err_port_swid_set; 1808 } 1809 1810 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1811 if (err) { 1812 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1813 mlxsw_sp_port->local_port); 1814 goto err_dev_addr_init; 1815 } 1816 1817 netif_carrier_off(dev); 1818 1819 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1820 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1821 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1822 1823 dev->min_mtu = 0; 1824 dev->max_mtu = ETH_MAX_MTU; 1825 1826 /* Each packet needs to have a Tx header (metadata) on top all other 1827 * headers. 1828 */ 1829 dev->needed_headroom = MLXSW_TXHDR_LEN; 1830 1831 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1832 if (err) { 1833 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1834 mlxsw_sp_port->local_port); 1835 goto err_port_system_port_mapping_set; 1836 } 1837 1838 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1839 if (err) { 1840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1841 mlxsw_sp_port->local_port); 1842 goto err_port_speed_by_width_set; 1843 } 1844 1845 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1846 if (err) { 1847 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1848 mlxsw_sp_port->local_port); 1849 goto err_port_mtu_set; 1850 } 1851 1852 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1853 if (err) 1854 goto err_port_admin_status_set; 1855 1856 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1857 if (err) { 1858 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1859 mlxsw_sp_port->local_port); 1860 goto err_port_buffers_init; 1861 } 1862 1863 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1864 if (err) { 1865 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1866 mlxsw_sp_port->local_port); 1867 goto err_port_ets_init; 1868 } 1869 1870 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1871 if (err) { 1872 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1873 mlxsw_sp_port->local_port); 1874 goto err_port_tc_mc_mode; 1875 } 1876 1877 /* ETS and buffers must be initialized before DCB. */ 1878 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1879 if (err) { 1880 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1881 mlxsw_sp_port->local_port); 1882 goto err_port_dcb_init; 1883 } 1884 1885 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1886 if (err) { 1887 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1888 mlxsw_sp_port->local_port); 1889 goto err_port_fids_init; 1890 } 1891 1892 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1893 if (err) { 1894 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1895 mlxsw_sp_port->local_port); 1896 goto err_port_qdiscs_init; 1897 } 1898 1899 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1900 false); 1901 if (err) { 1902 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1903 mlxsw_sp_port->local_port); 1904 goto err_port_vlan_clear; 1905 } 1906 1907 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1908 if (err) { 1909 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1910 mlxsw_sp_port->local_port); 1911 goto err_port_nve_init; 1912 } 1913 1914 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 1915 if (err) { 1916 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1917 mlxsw_sp_port->local_port); 1918 goto err_port_pvid_set; 1919 } 1920 1921 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1922 MLXSW_SP_DEFAULT_VID); 1923 if (IS_ERR(mlxsw_sp_port_vlan)) { 1924 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1925 mlxsw_sp_port->local_port); 1926 err = PTR_ERR(mlxsw_sp_port_vlan); 1927 goto err_port_vlan_create; 1928 } 1929 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1930 1931 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1932 mlxsw_sp->ptp_ops->shaper_work); 1933 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 1934 mlxsw_sp_span_speed_update_work); 1935 1936 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1937 err = register_netdev(dev); 1938 if (err) { 1939 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1940 mlxsw_sp_port->local_port); 1941 goto err_register_netdev; 1942 } 1943 1944 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1945 mlxsw_sp_port, dev); 1946 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1947 return 0; 1948 1949 err_register_netdev: 1950 mlxsw_sp->ports[local_port] = NULL; 1951 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1952 err_port_vlan_create: 1953 err_port_pvid_set: 1954 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1955 err_port_nve_init: 1956 err_port_vlan_clear: 1957 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1958 err_port_qdiscs_init: 1959 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1960 err_port_fids_init: 1961 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1962 err_port_dcb_init: 1963 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1964 err_port_tc_mc_mode: 1965 err_port_ets_init: 1966 err_port_buffers_init: 1967 err_port_admin_status_set: 1968 err_port_mtu_set: 1969 err_port_speed_by_width_set: 1970 err_port_system_port_mapping_set: 1971 err_dev_addr_init: 1972 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1973 err_port_swid_set: 1974 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1975 err_port_module_map: 1976 free_percpu(mlxsw_sp_port->pcpu_stats); 1977 err_alloc_stats: 1978 free_netdev(dev); 1979 err_alloc_etherdev: 1980 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1981 return err; 1982 } 1983 1984 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1985 { 1986 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1987 1988 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1989 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 1990 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1991 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1992 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1993 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1994 mlxsw_sp->ports[local_port] = NULL; 1995 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1996 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1997 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1998 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1999 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2000 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 2001 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2002 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 2003 free_percpu(mlxsw_sp_port->pcpu_stats); 2004 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 2005 free_netdev(mlxsw_sp_port->dev); 2006 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2007 } 2008 2009 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 2010 { 2011 struct mlxsw_sp_port *mlxsw_sp_port; 2012 int err; 2013 2014 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 2015 if (!mlxsw_sp_port) 2016 return -ENOMEM; 2017 2018 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2019 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 2020 2021 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 2022 mlxsw_sp_port, 2023 mlxsw_sp->base_mac, 2024 sizeof(mlxsw_sp->base_mac)); 2025 if (err) { 2026 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 2027 goto err_core_cpu_port_init; 2028 } 2029 2030 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 2031 return 0; 2032 2033 err_core_cpu_port_init: 2034 kfree(mlxsw_sp_port); 2035 return err; 2036 } 2037 2038 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 2039 { 2040 struct mlxsw_sp_port *mlxsw_sp_port = 2041 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 2042 2043 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 2044 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 2045 kfree(mlxsw_sp_port); 2046 } 2047 2048 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2049 { 2050 return mlxsw_sp->ports[local_port] != NULL; 2051 } 2052 2053 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2054 { 2055 int i; 2056 2057 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 2058 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2059 mlxsw_sp_port_remove(mlxsw_sp, i); 2060 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2061 kfree(mlxsw_sp->ports); 2062 mlxsw_sp->ports = NULL; 2063 } 2064 2065 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2066 { 2067 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2068 struct mlxsw_sp_port_mapping *port_mapping; 2069 size_t alloc_size; 2070 int i; 2071 int err; 2072 2073 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2074 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2075 if (!mlxsw_sp->ports) 2076 return -ENOMEM; 2077 2078 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2079 if (err) 2080 goto err_cpu_port_create; 2081 2082 for (i = 1; i < max_ports; i++) { 2083 port_mapping = mlxsw_sp->port_mapping[i]; 2084 if (!port_mapping) 2085 continue; 2086 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 2087 if (err) 2088 goto err_port_create; 2089 } 2090 return 0; 2091 2092 err_port_create: 2093 for (i--; i >= 1; i--) 2094 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2095 mlxsw_sp_port_remove(mlxsw_sp, i); 2096 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2097 err_cpu_port_create: 2098 kfree(mlxsw_sp->ports); 2099 mlxsw_sp->ports = NULL; 2100 return err; 2101 } 2102 2103 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2104 { 2105 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2106 struct mlxsw_sp_port_mapping port_mapping; 2107 int i; 2108 int err; 2109 2110 mlxsw_sp->port_mapping = kcalloc(max_ports, 2111 sizeof(struct mlxsw_sp_port_mapping *), 2112 GFP_KERNEL); 2113 if (!mlxsw_sp->port_mapping) 2114 return -ENOMEM; 2115 2116 for (i = 1; i < max_ports; i++) { 2117 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 2118 if (err) 2119 goto err_port_module_info_get; 2120 if (!port_mapping.width) 2121 continue; 2122 2123 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 2124 sizeof(port_mapping), 2125 GFP_KERNEL); 2126 if (!mlxsw_sp->port_mapping[i]) { 2127 err = -ENOMEM; 2128 goto err_port_module_info_dup; 2129 } 2130 } 2131 return 0; 2132 2133 err_port_module_info_get: 2134 err_port_module_info_dup: 2135 for (i--; i >= 1; i--) 2136 kfree(mlxsw_sp->port_mapping[i]); 2137 kfree(mlxsw_sp->port_mapping); 2138 return err; 2139 } 2140 2141 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2142 { 2143 int i; 2144 2145 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 2146 kfree(mlxsw_sp->port_mapping[i]); 2147 kfree(mlxsw_sp->port_mapping); 2148 } 2149 2150 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 2151 { 2152 u8 offset = (local_port - 1) % max_width; 2153 2154 return local_port - offset; 2155 } 2156 2157 static int 2158 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 2159 struct mlxsw_sp_port_mapping *port_mapping, 2160 unsigned int count, u8 offset) 2161 { 2162 struct mlxsw_sp_port_mapping split_port_mapping; 2163 int err, i; 2164 2165 split_port_mapping = *port_mapping; 2166 split_port_mapping.width /= count; 2167 for (i = 0; i < count; i++) { 2168 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 2169 base_port, &split_port_mapping); 2170 if (err) 2171 goto err_port_create; 2172 split_port_mapping.lane += split_port_mapping.width; 2173 } 2174 2175 return 0; 2176 2177 err_port_create: 2178 for (i--; i >= 0; i--) 2179 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2180 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2181 return err; 2182 } 2183 2184 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2185 u8 base_port, 2186 unsigned int count, u8 offset) 2187 { 2188 struct mlxsw_sp_port_mapping *port_mapping; 2189 int i; 2190 2191 /* Go over original unsplit ports in the gap and recreate them. */ 2192 for (i = 0; i < count * offset; i++) { 2193 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 2194 if (!port_mapping) 2195 continue; 2196 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 2197 } 2198 } 2199 2200 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 2201 unsigned int count, 2202 unsigned int max_width) 2203 { 2204 enum mlxsw_res_id local_ports_in_x_res_id; 2205 int split_width = max_width / count; 2206 2207 if (split_width == 1) 2208 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 2209 else if (split_width == 2) 2210 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 2211 else if (split_width == 4) 2212 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 2213 else 2214 return -EINVAL; 2215 2216 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 2217 return -EINVAL; 2218 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 2219 } 2220 2221 static struct mlxsw_sp_port * 2222 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2223 { 2224 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2225 return mlxsw_sp->ports[local_port]; 2226 return NULL; 2227 } 2228 2229 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2230 unsigned int count, 2231 struct netlink_ext_ack *extack) 2232 { 2233 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2234 struct mlxsw_sp_port_mapping port_mapping; 2235 struct mlxsw_sp_port *mlxsw_sp_port; 2236 int max_width; 2237 u8 base_port; 2238 int offset; 2239 int i; 2240 int err; 2241 2242 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2243 if (!mlxsw_sp_port) { 2244 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2245 local_port); 2246 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2247 return -EINVAL; 2248 } 2249 2250 max_width = mlxsw_core_module_max_width(mlxsw_core, 2251 mlxsw_sp_port->mapping.module); 2252 if (max_width < 0) { 2253 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2254 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2255 return max_width; 2256 } 2257 2258 /* Split port with non-max cannot be split. */ 2259 if (mlxsw_sp_port->mapping.width != max_width) { 2260 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 2261 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 2262 return -EINVAL; 2263 } 2264 2265 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2266 if (offset < 0) { 2267 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2268 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2269 return -EINVAL; 2270 } 2271 2272 /* Only in case max split is being done, the local port and 2273 * base port may differ. 2274 */ 2275 base_port = count == max_width ? 2276 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 2277 local_port; 2278 2279 for (i = 0; i < count * offset; i++) { 2280 /* Expect base port to exist and also the one in the middle in 2281 * case of maximal split count. 2282 */ 2283 if (i == 0 || (count == max_width && i == count / 2)) 2284 continue; 2285 2286 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 2287 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2288 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 2289 return -EINVAL; 2290 } 2291 } 2292 2293 port_mapping = mlxsw_sp_port->mapping; 2294 2295 for (i = 0; i < count; i++) 2296 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2297 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2298 2299 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 2300 count, offset); 2301 if (err) { 2302 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2303 goto err_port_split_create; 2304 } 2305 2306 return 0; 2307 2308 err_port_split_create: 2309 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2310 return err; 2311 } 2312 2313 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2314 struct netlink_ext_ack *extack) 2315 { 2316 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2317 struct mlxsw_sp_port *mlxsw_sp_port; 2318 unsigned int count; 2319 int max_width; 2320 u8 base_port; 2321 int offset; 2322 int i; 2323 2324 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2325 if (!mlxsw_sp_port) { 2326 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2327 local_port); 2328 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2329 return -EINVAL; 2330 } 2331 2332 if (!mlxsw_sp_port->split) { 2333 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 2334 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2335 return -EINVAL; 2336 } 2337 2338 max_width = mlxsw_core_module_max_width(mlxsw_core, 2339 mlxsw_sp_port->mapping.module); 2340 if (max_width < 0) { 2341 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2342 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2343 return max_width; 2344 } 2345 2346 count = max_width / mlxsw_sp_port->mapping.width; 2347 2348 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2349 if (WARN_ON(offset < 0)) { 2350 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2351 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2352 return -EINVAL; 2353 } 2354 2355 base_port = mlxsw_sp_port->split_base_local_port; 2356 2357 for (i = 0; i < count; i++) 2358 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2359 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2360 2361 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2362 2363 return 0; 2364 } 2365 2366 static void 2367 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2368 { 2369 int i; 2370 2371 for (i = 0; i < TC_MAX_QUEUE; i++) 2372 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2373 } 2374 2375 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2376 char *pude_pl, void *priv) 2377 { 2378 struct mlxsw_sp *mlxsw_sp = priv; 2379 struct mlxsw_sp_port *mlxsw_sp_port; 2380 enum mlxsw_reg_pude_oper_status status; 2381 u8 local_port; 2382 2383 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2384 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2385 if (!mlxsw_sp_port) 2386 return; 2387 2388 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2389 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2390 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2391 netif_carrier_on(mlxsw_sp_port->dev); 2392 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2393 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 2394 } else { 2395 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2396 netif_carrier_off(mlxsw_sp_port->dev); 2397 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2398 } 2399 } 2400 2401 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2402 char *mtpptr_pl, bool ingress) 2403 { 2404 u8 local_port; 2405 u8 num_rec; 2406 int i; 2407 2408 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2409 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2410 for (i = 0; i < num_rec; i++) { 2411 u8 domain_number; 2412 u8 message_type; 2413 u16 sequence_id; 2414 u64 timestamp; 2415 2416 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2417 &domain_number, &sequence_id, 2418 ×tamp); 2419 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2420 message_type, domain_number, 2421 sequence_id, timestamp); 2422 } 2423 } 2424 2425 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2426 char *mtpptr_pl, void *priv) 2427 { 2428 struct mlxsw_sp *mlxsw_sp = priv; 2429 2430 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2431 } 2432 2433 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2434 char *mtpptr_pl, void *priv) 2435 { 2436 struct mlxsw_sp *mlxsw_sp = priv; 2437 2438 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2439 } 2440 2441 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2442 u8 local_port, void *priv) 2443 { 2444 struct mlxsw_sp *mlxsw_sp = priv; 2445 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2446 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2447 2448 if (unlikely(!mlxsw_sp_port)) { 2449 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2450 local_port); 2451 return; 2452 } 2453 2454 skb->dev = mlxsw_sp_port->dev; 2455 2456 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2457 u64_stats_update_begin(&pcpu_stats->syncp); 2458 pcpu_stats->rx_packets++; 2459 pcpu_stats->rx_bytes += skb->len; 2460 u64_stats_update_end(&pcpu_stats->syncp); 2461 2462 skb->protocol = eth_type_trans(skb, skb->dev); 2463 netif_receive_skb(skb); 2464 } 2465 2466 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2467 void *priv) 2468 { 2469 skb->offload_fwd_mark = 1; 2470 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2471 } 2472 2473 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2474 u8 local_port, void *priv) 2475 { 2476 skb->offload_l3_fwd_mark = 1; 2477 skb->offload_fwd_mark = 1; 2478 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2479 } 2480 2481 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2482 u8 local_port) 2483 { 2484 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2485 } 2486 2487 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2488 u8 local_port) 2489 { 2490 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2491 struct mlxsw_sp_port_sample *sample; 2492 u32 size; 2493 2494 if (unlikely(!mlxsw_sp_port)) { 2495 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 2496 local_port); 2497 goto out; 2498 } 2499 2500 rcu_read_lock(); 2501 sample = rcu_dereference(mlxsw_sp_port->sample); 2502 if (!sample) 2503 goto out_unlock; 2504 size = sample->truncate ? sample->trunc_size : skb->len; 2505 psample_sample_packet(sample->psample_group, skb, size, 2506 mlxsw_sp_port->dev->ifindex, 0, sample->rate); 2507 out_unlock: 2508 rcu_read_unlock(); 2509 out: 2510 consume_skb(skb); 2511 } 2512 2513 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2514 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2515 _is_ctrl, SP_##_trap_group, DISCARD) 2516 2517 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2518 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2519 _is_ctrl, SP_##_trap_group, DISCARD) 2520 2521 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2522 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2523 _is_ctrl, SP_##_trap_group, DISCARD) 2524 2525 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2526 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2527 2528 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2529 /* Events */ 2530 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2531 /* L2 traps */ 2532 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2533 /* L3 traps */ 2534 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2535 false), 2536 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2537 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2538 false), 2539 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2540 ROUTER_EXP, false), 2541 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2542 ROUTER_EXP, false), 2543 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2544 ROUTER_EXP, false), 2545 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2546 ROUTER_EXP, false), 2547 /* Multicast Router Traps */ 2548 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2549 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2550 /* NVE traps */ 2551 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2552 }; 2553 2554 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2555 /* Events */ 2556 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2557 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2558 }; 2559 2560 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2561 { 2562 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2563 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2564 enum mlxsw_reg_qpcr_ir_units ir_units; 2565 int max_cpu_policers; 2566 bool is_bytes; 2567 u8 burst_size; 2568 u32 rate; 2569 int i, err; 2570 2571 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2572 return -EIO; 2573 2574 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2575 2576 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2577 for (i = 0; i < max_cpu_policers; i++) { 2578 is_bytes = false; 2579 switch (i) { 2580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2583 rate = 1024; 2584 burst_size = 7; 2585 break; 2586 default: 2587 continue; 2588 } 2589 2590 __set_bit(i, mlxsw_sp->trap->policers_usage); 2591 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2592 burst_size); 2593 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2594 if (err) 2595 return err; 2596 } 2597 2598 return 0; 2599 } 2600 2601 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2602 { 2603 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2604 enum mlxsw_reg_htgt_trap_group i; 2605 int max_cpu_policers; 2606 int max_trap_groups; 2607 u8 priority, tc; 2608 u16 policer_id; 2609 int err; 2610 2611 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2612 return -EIO; 2613 2614 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2615 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2616 2617 for (i = 0; i < max_trap_groups; i++) { 2618 policer_id = i; 2619 switch (i) { 2620 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2621 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2623 priority = 1; 2624 tc = 1; 2625 break; 2626 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2627 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2628 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2629 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2630 break; 2631 default: 2632 continue; 2633 } 2634 2635 if (max_cpu_policers <= policer_id && 2636 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2637 return -EIO; 2638 2639 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2640 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2641 if (err) 2642 return err; 2643 } 2644 2645 return 0; 2646 } 2647 2648 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2649 const struct mlxsw_listener listeners[], 2650 size_t listeners_count) 2651 { 2652 int i; 2653 int err; 2654 2655 for (i = 0; i < listeners_count; i++) { 2656 err = mlxsw_core_trap_register(mlxsw_sp->core, 2657 &listeners[i], 2658 mlxsw_sp); 2659 if (err) 2660 goto err_listener_register; 2661 2662 } 2663 return 0; 2664 2665 err_listener_register: 2666 for (i--; i >= 0; i--) { 2667 mlxsw_core_trap_unregister(mlxsw_sp->core, 2668 &listeners[i], 2669 mlxsw_sp); 2670 } 2671 return err; 2672 } 2673 2674 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2675 const struct mlxsw_listener listeners[], 2676 size_t listeners_count) 2677 { 2678 int i; 2679 2680 for (i = 0; i < listeners_count; i++) { 2681 mlxsw_core_trap_unregister(mlxsw_sp->core, 2682 &listeners[i], 2683 mlxsw_sp); 2684 } 2685 } 2686 2687 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2688 { 2689 struct mlxsw_sp_trap *trap; 2690 u64 max_policers; 2691 int err; 2692 2693 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2694 return -EIO; 2695 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2696 trap = kzalloc(struct_size(trap, policers_usage, 2697 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2698 if (!trap) 2699 return -ENOMEM; 2700 trap->max_policers = max_policers; 2701 mlxsw_sp->trap = trap; 2702 2703 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2704 if (err) 2705 goto err_cpu_policers_set; 2706 2707 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2708 if (err) 2709 goto err_trap_groups_set; 2710 2711 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2712 ARRAY_SIZE(mlxsw_sp_listener)); 2713 if (err) 2714 goto err_traps_register; 2715 2716 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2717 mlxsw_sp->listeners_count); 2718 if (err) 2719 goto err_extra_traps_init; 2720 2721 return 0; 2722 2723 err_extra_traps_init: 2724 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2725 ARRAY_SIZE(mlxsw_sp_listener)); 2726 err_traps_register: 2727 err_trap_groups_set: 2728 err_cpu_policers_set: 2729 kfree(trap); 2730 return err; 2731 } 2732 2733 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2734 { 2735 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2736 mlxsw_sp->listeners_count); 2737 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2738 ARRAY_SIZE(mlxsw_sp_listener)); 2739 kfree(mlxsw_sp->trap); 2740 } 2741 2742 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2743 2744 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2745 { 2746 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2747 u32 seed; 2748 int err; 2749 2750 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2751 MLXSW_SP_LAG_SEED_INIT); 2752 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2753 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2754 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2755 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2756 MLXSW_REG_SLCR_LAG_HASH_SIP | 2757 MLXSW_REG_SLCR_LAG_HASH_DIP | 2758 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2759 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2760 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2761 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2762 if (err) 2763 return err; 2764 2765 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2766 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2767 return -EIO; 2768 2769 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2770 sizeof(struct mlxsw_sp_upper), 2771 GFP_KERNEL); 2772 if (!mlxsw_sp->lags) 2773 return -ENOMEM; 2774 2775 return 0; 2776 } 2777 2778 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2779 { 2780 kfree(mlxsw_sp->lags); 2781 } 2782 2783 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2784 { 2785 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2786 2787 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2788 MLXSW_REG_HTGT_INVALID_POLICER, 2789 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2790 MLXSW_REG_HTGT_DEFAULT_TC); 2791 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2792 } 2793 2794 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2795 .clock_init = mlxsw_sp1_ptp_clock_init, 2796 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2797 .init = mlxsw_sp1_ptp_init, 2798 .fini = mlxsw_sp1_ptp_fini, 2799 .receive = mlxsw_sp1_ptp_receive, 2800 .transmitted = mlxsw_sp1_ptp_transmitted, 2801 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2802 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2803 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2804 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2805 .get_stats_count = mlxsw_sp1_get_stats_count, 2806 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2807 .get_stats = mlxsw_sp1_get_stats, 2808 }; 2809 2810 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2811 .clock_init = mlxsw_sp2_ptp_clock_init, 2812 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2813 .init = mlxsw_sp2_ptp_init, 2814 .fini = mlxsw_sp2_ptp_fini, 2815 .receive = mlxsw_sp2_ptp_receive, 2816 .transmitted = mlxsw_sp2_ptp_transmitted, 2817 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2818 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2819 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2820 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2821 .get_stats_count = mlxsw_sp2_get_stats_count, 2822 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2823 .get_stats = mlxsw_sp2_get_stats, 2824 }; 2825 2826 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2827 unsigned long event, void *ptr); 2828 2829 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2830 const struct mlxsw_bus_info *mlxsw_bus_info, 2831 struct netlink_ext_ack *extack) 2832 { 2833 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2834 int err; 2835 2836 mlxsw_sp->core = mlxsw_core; 2837 mlxsw_sp->bus_info = mlxsw_bus_info; 2838 2839 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 2840 if (err) 2841 return err; 2842 2843 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2844 2845 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2846 if (err) { 2847 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2848 return err; 2849 } 2850 2851 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2852 if (err) { 2853 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2854 return err; 2855 } 2856 2857 err = mlxsw_sp_fids_init(mlxsw_sp); 2858 if (err) { 2859 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2860 goto err_fids_init; 2861 } 2862 2863 err = mlxsw_sp_policers_init(mlxsw_sp); 2864 if (err) { 2865 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2866 goto err_policers_init; 2867 } 2868 2869 err = mlxsw_sp_traps_init(mlxsw_sp); 2870 if (err) { 2871 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2872 goto err_traps_init; 2873 } 2874 2875 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2876 if (err) { 2877 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2878 goto err_devlink_traps_init; 2879 } 2880 2881 err = mlxsw_sp_buffers_init(mlxsw_sp); 2882 if (err) { 2883 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2884 goto err_buffers_init; 2885 } 2886 2887 err = mlxsw_sp_lag_init(mlxsw_sp); 2888 if (err) { 2889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2890 goto err_lag_init; 2891 } 2892 2893 /* Initialize SPAN before router and switchdev, so that those components 2894 * can call mlxsw_sp_span_respin(). 2895 */ 2896 err = mlxsw_sp_span_init(mlxsw_sp); 2897 if (err) { 2898 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2899 goto err_span_init; 2900 } 2901 2902 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2903 if (err) { 2904 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2905 goto err_switchdev_init; 2906 } 2907 2908 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2909 if (err) { 2910 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2911 goto err_counter_pool_init; 2912 } 2913 2914 err = mlxsw_sp_afa_init(mlxsw_sp); 2915 if (err) { 2916 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2917 goto err_afa_init; 2918 } 2919 2920 err = mlxsw_sp_nve_init(mlxsw_sp); 2921 if (err) { 2922 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2923 goto err_nve_init; 2924 } 2925 2926 err = mlxsw_sp_acl_init(mlxsw_sp); 2927 if (err) { 2928 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2929 goto err_acl_init; 2930 } 2931 2932 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2933 if (err) { 2934 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2935 goto err_router_init; 2936 } 2937 2938 if (mlxsw_sp->bus_info->read_frc_capable) { 2939 /* NULL is a valid return value from clock_init */ 2940 mlxsw_sp->clock = 2941 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2942 mlxsw_sp->bus_info->dev); 2943 if (IS_ERR(mlxsw_sp->clock)) { 2944 err = PTR_ERR(mlxsw_sp->clock); 2945 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2946 goto err_ptp_clock_init; 2947 } 2948 } 2949 2950 if (mlxsw_sp->clock) { 2951 /* NULL is a valid return value from ptp_ops->init */ 2952 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2953 if (IS_ERR(mlxsw_sp->ptp_state)) { 2954 err = PTR_ERR(mlxsw_sp->ptp_state); 2955 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2956 goto err_ptp_init; 2957 } 2958 } 2959 2960 /* Initialize netdevice notifier after router and SPAN is initialized, 2961 * so that the event handler can use router structures and call SPAN 2962 * respin. 2963 */ 2964 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2965 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2966 &mlxsw_sp->netdevice_nb); 2967 if (err) { 2968 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2969 goto err_netdev_notifier; 2970 } 2971 2972 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2973 if (err) { 2974 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2975 goto err_dpipe_init; 2976 } 2977 2978 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2979 if (err) { 2980 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2981 goto err_port_module_info_init; 2982 } 2983 2984 err = mlxsw_sp_ports_create(mlxsw_sp); 2985 if (err) { 2986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2987 goto err_ports_create; 2988 } 2989 2990 return 0; 2991 2992 err_ports_create: 2993 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2994 err_port_module_info_init: 2995 mlxsw_sp_dpipe_fini(mlxsw_sp); 2996 err_dpipe_init: 2997 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2998 &mlxsw_sp->netdevice_nb); 2999 err_netdev_notifier: 3000 if (mlxsw_sp->clock) 3001 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3002 err_ptp_init: 3003 if (mlxsw_sp->clock) 3004 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3005 err_ptp_clock_init: 3006 mlxsw_sp_router_fini(mlxsw_sp); 3007 err_router_init: 3008 mlxsw_sp_acl_fini(mlxsw_sp); 3009 err_acl_init: 3010 mlxsw_sp_nve_fini(mlxsw_sp); 3011 err_nve_init: 3012 mlxsw_sp_afa_fini(mlxsw_sp); 3013 err_afa_init: 3014 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3015 err_counter_pool_init: 3016 mlxsw_sp_switchdev_fini(mlxsw_sp); 3017 err_switchdev_init: 3018 mlxsw_sp_span_fini(mlxsw_sp); 3019 err_span_init: 3020 mlxsw_sp_lag_fini(mlxsw_sp); 3021 err_lag_init: 3022 mlxsw_sp_buffers_fini(mlxsw_sp); 3023 err_buffers_init: 3024 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3025 err_devlink_traps_init: 3026 mlxsw_sp_traps_fini(mlxsw_sp); 3027 err_traps_init: 3028 mlxsw_sp_policers_fini(mlxsw_sp); 3029 err_policers_init: 3030 mlxsw_sp_fids_fini(mlxsw_sp); 3031 err_fids_init: 3032 mlxsw_sp_kvdl_fini(mlxsw_sp); 3033 return err; 3034 } 3035 3036 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3037 const struct mlxsw_bus_info *mlxsw_bus_info, 3038 struct netlink_ext_ack *extack) 3039 { 3040 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3041 3042 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 3043 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 3044 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3045 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3046 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3047 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3048 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3049 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3050 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3051 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3052 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 3053 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3054 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3055 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3056 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3057 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3058 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3059 mlxsw_sp->listeners = mlxsw_sp1_listener; 3060 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3061 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3062 3063 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3064 } 3065 3066 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3067 const struct mlxsw_bus_info *mlxsw_bus_info, 3068 struct netlink_ext_ack *extack) 3069 { 3070 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3071 3072 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 3073 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 3074 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3075 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3076 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3077 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3078 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3079 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3080 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3081 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3082 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 3083 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3084 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3085 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3086 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3087 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3088 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3089 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3090 3091 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3092 } 3093 3094 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3095 const struct mlxsw_bus_info *mlxsw_bus_info, 3096 struct netlink_ext_ack *extack) 3097 { 3098 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3099 3100 mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev; 3101 mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME; 3102 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3103 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3104 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3105 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3106 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3107 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3108 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3109 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3110 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 3111 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3112 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3113 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3114 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3115 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3116 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3117 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3118 3119 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3120 } 3121 3122 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3123 { 3124 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3125 3126 mlxsw_sp_ports_remove(mlxsw_sp); 3127 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3128 mlxsw_sp_dpipe_fini(mlxsw_sp); 3129 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3130 &mlxsw_sp->netdevice_nb); 3131 if (mlxsw_sp->clock) { 3132 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3133 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3134 } 3135 mlxsw_sp_router_fini(mlxsw_sp); 3136 mlxsw_sp_acl_fini(mlxsw_sp); 3137 mlxsw_sp_nve_fini(mlxsw_sp); 3138 mlxsw_sp_afa_fini(mlxsw_sp); 3139 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3140 mlxsw_sp_switchdev_fini(mlxsw_sp); 3141 mlxsw_sp_span_fini(mlxsw_sp); 3142 mlxsw_sp_lag_fini(mlxsw_sp); 3143 mlxsw_sp_buffers_fini(mlxsw_sp); 3144 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3145 mlxsw_sp_traps_fini(mlxsw_sp); 3146 mlxsw_sp_policers_fini(mlxsw_sp); 3147 mlxsw_sp_fids_fini(mlxsw_sp); 3148 mlxsw_sp_kvdl_fini(mlxsw_sp); 3149 } 3150 3151 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3152 * 802.1Q FIDs 3153 */ 3154 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3155 VLAN_VID_MASK - 1) 3156 3157 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3158 .used_max_mid = 1, 3159 .max_mid = MLXSW_SP_MID_MAX, 3160 .used_flood_tables = 1, 3161 .used_flood_mode = 1, 3162 .flood_mode = 3, 3163 .max_fid_flood_tables = 3, 3164 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3165 .used_max_ib_mc = 1, 3166 .max_ib_mc = 0, 3167 .used_max_pkey = 1, 3168 .max_pkey = 0, 3169 .used_kvd_sizes = 1, 3170 .kvd_hash_single_parts = 59, 3171 .kvd_hash_double_parts = 41, 3172 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3173 .swid_config = { 3174 { 3175 .used_type = 1, 3176 .type = MLXSW_PORT_SWID_TYPE_ETH, 3177 } 3178 }, 3179 }; 3180 3181 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3182 .used_max_mid = 1, 3183 .max_mid = MLXSW_SP_MID_MAX, 3184 .used_flood_tables = 1, 3185 .used_flood_mode = 1, 3186 .flood_mode = 3, 3187 .max_fid_flood_tables = 3, 3188 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3189 .used_max_ib_mc = 1, 3190 .max_ib_mc = 0, 3191 .used_max_pkey = 1, 3192 .max_pkey = 0, 3193 .swid_config = { 3194 { 3195 .used_type = 1, 3196 .type = MLXSW_PORT_SWID_TYPE_ETH, 3197 } 3198 }, 3199 }; 3200 3201 static void 3202 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3203 struct devlink_resource_size_params *kvd_size_params, 3204 struct devlink_resource_size_params *linear_size_params, 3205 struct devlink_resource_size_params *hash_double_size_params, 3206 struct devlink_resource_size_params *hash_single_size_params) 3207 { 3208 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3209 KVD_SINGLE_MIN_SIZE); 3210 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3211 KVD_DOUBLE_MIN_SIZE); 3212 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3213 u32 linear_size_min = 0; 3214 3215 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3216 MLXSW_SP_KVD_GRANULARITY, 3217 DEVLINK_RESOURCE_UNIT_ENTRY); 3218 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3219 kvd_size - single_size_min - 3220 double_size_min, 3221 MLXSW_SP_KVD_GRANULARITY, 3222 DEVLINK_RESOURCE_UNIT_ENTRY); 3223 devlink_resource_size_params_init(hash_double_size_params, 3224 double_size_min, 3225 kvd_size - single_size_min - 3226 linear_size_min, 3227 MLXSW_SP_KVD_GRANULARITY, 3228 DEVLINK_RESOURCE_UNIT_ENTRY); 3229 devlink_resource_size_params_init(hash_single_size_params, 3230 single_size_min, 3231 kvd_size - double_size_min - 3232 linear_size_min, 3233 MLXSW_SP_KVD_GRANULARITY, 3234 DEVLINK_RESOURCE_UNIT_ENTRY); 3235 } 3236 3237 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3238 { 3239 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3240 struct devlink_resource_size_params hash_single_size_params; 3241 struct devlink_resource_size_params hash_double_size_params; 3242 struct devlink_resource_size_params linear_size_params; 3243 struct devlink_resource_size_params kvd_size_params; 3244 u32 kvd_size, single_size, double_size, linear_size; 3245 const struct mlxsw_config_profile *profile; 3246 int err; 3247 3248 profile = &mlxsw_sp1_config_profile; 3249 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3250 return -EIO; 3251 3252 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3253 &linear_size_params, 3254 &hash_double_size_params, 3255 &hash_single_size_params); 3256 3257 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3258 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3259 kvd_size, MLXSW_SP_RESOURCE_KVD, 3260 DEVLINK_RESOURCE_ID_PARENT_TOP, 3261 &kvd_size_params); 3262 if (err) 3263 return err; 3264 3265 linear_size = profile->kvd_linear_size; 3266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3267 linear_size, 3268 MLXSW_SP_RESOURCE_KVD_LINEAR, 3269 MLXSW_SP_RESOURCE_KVD, 3270 &linear_size_params); 3271 if (err) 3272 return err; 3273 3274 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3275 if (err) 3276 return err; 3277 3278 double_size = kvd_size - linear_size; 3279 double_size *= profile->kvd_hash_double_parts; 3280 double_size /= profile->kvd_hash_double_parts + 3281 profile->kvd_hash_single_parts; 3282 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3283 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3284 double_size, 3285 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3286 MLXSW_SP_RESOURCE_KVD, 3287 &hash_double_size_params); 3288 if (err) 3289 return err; 3290 3291 single_size = kvd_size - double_size - linear_size; 3292 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3293 single_size, 3294 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3295 MLXSW_SP_RESOURCE_KVD, 3296 &hash_single_size_params); 3297 if (err) 3298 return err; 3299 3300 return 0; 3301 } 3302 3303 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3304 { 3305 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3306 struct devlink_resource_size_params kvd_size_params; 3307 u32 kvd_size; 3308 3309 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3310 return -EIO; 3311 3312 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3313 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3314 MLXSW_SP_KVD_GRANULARITY, 3315 DEVLINK_RESOURCE_UNIT_ENTRY); 3316 3317 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3318 kvd_size, MLXSW_SP_RESOURCE_KVD, 3319 DEVLINK_RESOURCE_ID_PARENT_TOP, 3320 &kvd_size_params); 3321 } 3322 3323 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3324 { 3325 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3326 struct devlink_resource_size_params span_size_params; 3327 u32 max_span; 3328 3329 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3330 return -EIO; 3331 3332 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3333 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3334 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3335 3336 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3337 max_span, MLXSW_SP_RESOURCE_SPAN, 3338 DEVLINK_RESOURCE_ID_PARENT_TOP, 3339 &span_size_params); 3340 } 3341 3342 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3343 { 3344 int err; 3345 3346 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3347 if (err) 3348 return err; 3349 3350 err = mlxsw_sp_resources_span_register(mlxsw_core); 3351 if (err) 3352 goto err_resources_span_register; 3353 3354 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3355 if (err) 3356 goto err_resources_counter_register; 3357 3358 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3359 if (err) 3360 goto err_resources_counter_register; 3361 3362 return 0; 3363 3364 err_resources_counter_register: 3365 err_resources_span_register: 3366 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3367 return err; 3368 } 3369 3370 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3371 { 3372 int err; 3373 3374 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3375 if (err) 3376 return err; 3377 3378 err = mlxsw_sp_resources_span_register(mlxsw_core); 3379 if (err) 3380 goto err_resources_span_register; 3381 3382 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3383 if (err) 3384 goto err_resources_counter_register; 3385 3386 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3387 if (err) 3388 goto err_resources_counter_register; 3389 3390 return 0; 3391 3392 err_resources_counter_register: 3393 err_resources_span_register: 3394 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3395 return err; 3396 } 3397 3398 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3399 const struct mlxsw_config_profile *profile, 3400 u64 *p_single_size, u64 *p_double_size, 3401 u64 *p_linear_size) 3402 { 3403 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3404 u32 double_size; 3405 int err; 3406 3407 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3408 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3409 return -EIO; 3410 3411 /* The hash part is what left of the kvd without the 3412 * linear part. It is split to the single size and 3413 * double size by the parts ratio from the profile. 3414 * Both sizes must be a multiplications of the 3415 * granularity from the profile. In case the user 3416 * provided the sizes they are obtained via devlink. 3417 */ 3418 err = devlink_resource_size_get(devlink, 3419 MLXSW_SP_RESOURCE_KVD_LINEAR, 3420 p_linear_size); 3421 if (err) 3422 *p_linear_size = profile->kvd_linear_size; 3423 3424 err = devlink_resource_size_get(devlink, 3425 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3426 p_double_size); 3427 if (err) { 3428 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3429 *p_linear_size; 3430 double_size *= profile->kvd_hash_double_parts; 3431 double_size /= profile->kvd_hash_double_parts + 3432 profile->kvd_hash_single_parts; 3433 *p_double_size = rounddown(double_size, 3434 MLXSW_SP_KVD_GRANULARITY); 3435 } 3436 3437 err = devlink_resource_size_get(devlink, 3438 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3439 p_single_size); 3440 if (err) 3441 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3442 *p_double_size - *p_linear_size; 3443 3444 /* Check results are legal. */ 3445 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3446 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3447 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3448 return -EIO; 3449 3450 return 0; 3451 } 3452 3453 static int 3454 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 3455 union devlink_param_value val, 3456 struct netlink_ext_ack *extack) 3457 { 3458 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 3459 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 3460 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 3461 return -EINVAL; 3462 } 3463 3464 return 0; 3465 } 3466 3467 static const struct devlink_param mlxsw_sp_devlink_params[] = { 3468 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 3469 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 3470 NULL, NULL, 3471 mlxsw_sp_devlink_param_fw_load_policy_validate), 3472 }; 3473 3474 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 3475 { 3476 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3477 union devlink_param_value value; 3478 int err; 3479 3480 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 3481 ARRAY_SIZE(mlxsw_sp_devlink_params)); 3482 if (err) 3483 return err; 3484 3485 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 3486 devlink_param_driverinit_value_set(devlink, 3487 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 3488 value); 3489 return 0; 3490 } 3491 3492 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 3493 { 3494 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3495 mlxsw_sp_devlink_params, 3496 ARRAY_SIZE(mlxsw_sp_devlink_params)); 3497 } 3498 3499 static int 3500 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3501 struct devlink_param_gset_ctx *ctx) 3502 { 3503 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3504 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3505 3506 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3507 return 0; 3508 } 3509 3510 static int 3511 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3512 struct devlink_param_gset_ctx *ctx) 3513 { 3514 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3515 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3516 3517 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3518 } 3519 3520 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3521 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3522 "acl_region_rehash_interval", 3523 DEVLINK_PARAM_TYPE_U32, 3524 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3525 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3526 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3527 NULL), 3528 }; 3529 3530 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3531 { 3532 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3533 union devlink_param_value value; 3534 int err; 3535 3536 err = mlxsw_sp_params_register(mlxsw_core); 3537 if (err) 3538 return err; 3539 3540 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3541 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3542 if (err) 3543 goto err_devlink_params_register; 3544 3545 value.vu32 = 0; 3546 devlink_param_driverinit_value_set(devlink, 3547 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3548 value); 3549 return 0; 3550 3551 err_devlink_params_register: 3552 mlxsw_sp_params_unregister(mlxsw_core); 3553 return err; 3554 } 3555 3556 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3557 { 3558 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3559 mlxsw_sp2_devlink_params, 3560 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3561 mlxsw_sp_params_unregister(mlxsw_core); 3562 } 3563 3564 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3565 struct sk_buff *skb, u8 local_port) 3566 { 3567 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3568 3569 skb_pull(skb, MLXSW_TXHDR_LEN); 3570 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3571 } 3572 3573 static struct mlxsw_driver mlxsw_sp1_driver = { 3574 .kind = mlxsw_sp1_driver_name, 3575 .priv_size = sizeof(struct mlxsw_sp), 3576 .init = mlxsw_sp1_init, 3577 .fini = mlxsw_sp_fini, 3578 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3579 .port_split = mlxsw_sp_port_split, 3580 .port_unsplit = mlxsw_sp_port_unsplit, 3581 .sb_pool_get = mlxsw_sp_sb_pool_get, 3582 .sb_pool_set = mlxsw_sp_sb_pool_set, 3583 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3584 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3585 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3586 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3587 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3588 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3589 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3590 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3591 .flash_update = mlxsw_sp_flash_update, 3592 .trap_init = mlxsw_sp_trap_init, 3593 .trap_fini = mlxsw_sp_trap_fini, 3594 .trap_action_set = mlxsw_sp_trap_action_set, 3595 .trap_group_init = mlxsw_sp_trap_group_init, 3596 .trap_group_set = mlxsw_sp_trap_group_set, 3597 .trap_policer_init = mlxsw_sp_trap_policer_init, 3598 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3599 .trap_policer_set = mlxsw_sp_trap_policer_set, 3600 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3601 .txhdr_construct = mlxsw_sp_txhdr_construct, 3602 .resources_register = mlxsw_sp1_resources_register, 3603 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3604 .params_register = mlxsw_sp_params_register, 3605 .params_unregister = mlxsw_sp_params_unregister, 3606 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3607 .txhdr_len = MLXSW_TXHDR_LEN, 3608 .profile = &mlxsw_sp1_config_profile, 3609 .res_query_enabled = true, 3610 }; 3611 3612 static struct mlxsw_driver mlxsw_sp2_driver = { 3613 .kind = mlxsw_sp2_driver_name, 3614 .priv_size = sizeof(struct mlxsw_sp), 3615 .init = mlxsw_sp2_init, 3616 .fini = mlxsw_sp_fini, 3617 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3618 .port_split = mlxsw_sp_port_split, 3619 .port_unsplit = mlxsw_sp_port_unsplit, 3620 .sb_pool_get = mlxsw_sp_sb_pool_get, 3621 .sb_pool_set = mlxsw_sp_sb_pool_set, 3622 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3623 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3624 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3625 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3626 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3627 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3628 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3629 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3630 .flash_update = mlxsw_sp_flash_update, 3631 .trap_init = mlxsw_sp_trap_init, 3632 .trap_fini = mlxsw_sp_trap_fini, 3633 .trap_action_set = mlxsw_sp_trap_action_set, 3634 .trap_group_init = mlxsw_sp_trap_group_init, 3635 .trap_group_set = mlxsw_sp_trap_group_set, 3636 .trap_policer_init = mlxsw_sp_trap_policer_init, 3637 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3638 .trap_policer_set = mlxsw_sp_trap_policer_set, 3639 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3640 .txhdr_construct = mlxsw_sp_txhdr_construct, 3641 .resources_register = mlxsw_sp2_resources_register, 3642 .params_register = mlxsw_sp2_params_register, 3643 .params_unregister = mlxsw_sp2_params_unregister, 3644 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3645 .txhdr_len = MLXSW_TXHDR_LEN, 3646 .profile = &mlxsw_sp2_config_profile, 3647 .res_query_enabled = true, 3648 }; 3649 3650 static struct mlxsw_driver mlxsw_sp3_driver = { 3651 .kind = mlxsw_sp3_driver_name, 3652 .priv_size = sizeof(struct mlxsw_sp), 3653 .init = mlxsw_sp3_init, 3654 .fini = mlxsw_sp_fini, 3655 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3656 .port_split = mlxsw_sp_port_split, 3657 .port_unsplit = mlxsw_sp_port_unsplit, 3658 .sb_pool_get = mlxsw_sp_sb_pool_get, 3659 .sb_pool_set = mlxsw_sp_sb_pool_set, 3660 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3661 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3662 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3663 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3664 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3665 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3666 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3667 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3668 .flash_update = mlxsw_sp_flash_update, 3669 .trap_init = mlxsw_sp_trap_init, 3670 .trap_fini = mlxsw_sp_trap_fini, 3671 .trap_action_set = mlxsw_sp_trap_action_set, 3672 .trap_group_init = mlxsw_sp_trap_group_init, 3673 .trap_group_set = mlxsw_sp_trap_group_set, 3674 .trap_policer_init = mlxsw_sp_trap_policer_init, 3675 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3676 .trap_policer_set = mlxsw_sp_trap_policer_set, 3677 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3678 .txhdr_construct = mlxsw_sp_txhdr_construct, 3679 .resources_register = mlxsw_sp2_resources_register, 3680 .params_register = mlxsw_sp2_params_register, 3681 .params_unregister = mlxsw_sp2_params_unregister, 3682 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3683 .txhdr_len = MLXSW_TXHDR_LEN, 3684 .profile = &mlxsw_sp2_config_profile, 3685 .res_query_enabled = true, 3686 }; 3687 3688 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3689 { 3690 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3691 } 3692 3693 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3694 { 3695 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3696 int ret = 0; 3697 3698 if (mlxsw_sp_port_dev_check(lower_dev)) { 3699 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3700 ret = 1; 3701 } 3702 3703 return ret; 3704 } 3705 3706 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3707 { 3708 struct mlxsw_sp_port *mlxsw_sp_port; 3709 3710 if (mlxsw_sp_port_dev_check(dev)) 3711 return netdev_priv(dev); 3712 3713 mlxsw_sp_port = NULL; 3714 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3715 3716 return mlxsw_sp_port; 3717 } 3718 3719 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3720 { 3721 struct mlxsw_sp_port *mlxsw_sp_port; 3722 3723 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3724 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3725 } 3726 3727 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3728 { 3729 struct mlxsw_sp_port *mlxsw_sp_port; 3730 3731 if (mlxsw_sp_port_dev_check(dev)) 3732 return netdev_priv(dev); 3733 3734 mlxsw_sp_port = NULL; 3735 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3736 &mlxsw_sp_port); 3737 3738 return mlxsw_sp_port; 3739 } 3740 3741 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3742 { 3743 struct mlxsw_sp_port *mlxsw_sp_port; 3744 3745 rcu_read_lock(); 3746 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3747 if (mlxsw_sp_port) 3748 dev_hold(mlxsw_sp_port->dev); 3749 rcu_read_unlock(); 3750 return mlxsw_sp_port; 3751 } 3752 3753 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3754 { 3755 dev_put(mlxsw_sp_port->dev); 3756 } 3757 3758 static void 3759 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3760 struct net_device *lag_dev) 3761 { 3762 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3763 struct net_device *upper_dev; 3764 struct list_head *iter; 3765 3766 if (netif_is_bridge_port(lag_dev)) 3767 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3768 3769 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3770 if (!netif_is_bridge_port(upper_dev)) 3771 continue; 3772 br_dev = netdev_master_upper_dev_get(upper_dev); 3773 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3774 } 3775 } 3776 3777 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3778 { 3779 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3780 3781 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3782 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3783 } 3784 3785 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3786 { 3787 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3788 3789 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3790 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3791 } 3792 3793 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3794 u16 lag_id, u8 port_index) 3795 { 3796 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3797 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3798 3799 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3800 lag_id, port_index); 3801 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3802 } 3803 3804 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3805 u16 lag_id) 3806 { 3807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3808 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3809 3810 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3811 lag_id); 3812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3813 } 3814 3815 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3816 u16 lag_id) 3817 { 3818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3819 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3820 3821 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3822 lag_id); 3823 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3824 } 3825 3826 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3827 u16 lag_id) 3828 { 3829 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3830 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3831 3832 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3833 lag_id); 3834 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3835 } 3836 3837 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3838 struct net_device *lag_dev, 3839 u16 *p_lag_id) 3840 { 3841 struct mlxsw_sp_upper *lag; 3842 int free_lag_id = -1; 3843 u64 max_lag; 3844 int i; 3845 3846 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3847 for (i = 0; i < max_lag; i++) { 3848 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3849 if (lag->ref_count) { 3850 if (lag->dev == lag_dev) { 3851 *p_lag_id = i; 3852 return 0; 3853 } 3854 } else if (free_lag_id < 0) { 3855 free_lag_id = i; 3856 } 3857 } 3858 if (free_lag_id < 0) 3859 return -EBUSY; 3860 *p_lag_id = free_lag_id; 3861 return 0; 3862 } 3863 3864 static bool 3865 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3866 struct net_device *lag_dev, 3867 struct netdev_lag_upper_info *lag_upper_info, 3868 struct netlink_ext_ack *extack) 3869 { 3870 u16 lag_id; 3871 3872 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3873 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3874 return false; 3875 } 3876 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3877 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3878 return false; 3879 } 3880 return true; 3881 } 3882 3883 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3884 u16 lag_id, u8 *p_port_index) 3885 { 3886 u64 max_lag_members; 3887 int i; 3888 3889 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3890 MAX_LAG_MEMBERS); 3891 for (i = 0; i < max_lag_members; i++) { 3892 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3893 *p_port_index = i; 3894 return 0; 3895 } 3896 } 3897 return -EBUSY; 3898 } 3899 3900 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3901 struct net_device *lag_dev) 3902 { 3903 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3904 struct mlxsw_sp_upper *lag; 3905 u16 lag_id; 3906 u8 port_index; 3907 int err; 3908 3909 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3910 if (err) 3911 return err; 3912 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3913 if (!lag->ref_count) { 3914 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3915 if (err) 3916 return err; 3917 lag->dev = lag_dev; 3918 } 3919 3920 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3921 if (err) 3922 return err; 3923 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3924 if (err) 3925 goto err_col_port_add; 3926 3927 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3928 mlxsw_sp_port->local_port); 3929 mlxsw_sp_port->lag_id = lag_id; 3930 mlxsw_sp_port->lagged = 1; 3931 lag->ref_count++; 3932 3933 /* Port is no longer usable as a router interface */ 3934 if (mlxsw_sp_port->default_vlan->fid) 3935 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3936 3937 return 0; 3938 3939 err_col_port_add: 3940 if (!lag->ref_count) 3941 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3942 return err; 3943 } 3944 3945 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3946 struct net_device *lag_dev) 3947 { 3948 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3949 u16 lag_id = mlxsw_sp_port->lag_id; 3950 struct mlxsw_sp_upper *lag; 3951 3952 if (!mlxsw_sp_port->lagged) 3953 return; 3954 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3955 WARN_ON(lag->ref_count == 0); 3956 3957 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3958 3959 /* Any VLANs configured on the port are no longer valid */ 3960 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3961 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3962 /* Make the LAG and its directly linked uppers leave bridges they 3963 * are memeber in 3964 */ 3965 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3966 3967 if (lag->ref_count == 1) 3968 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3969 3970 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3971 mlxsw_sp_port->local_port); 3972 mlxsw_sp_port->lagged = 0; 3973 lag->ref_count--; 3974 3975 /* Make sure untagged frames are allowed to ingress */ 3976 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3977 } 3978 3979 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3980 u16 lag_id) 3981 { 3982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3983 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3984 3985 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3986 mlxsw_sp_port->local_port); 3987 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3988 } 3989 3990 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3991 u16 lag_id) 3992 { 3993 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3994 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3995 3996 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3997 mlxsw_sp_port->local_port); 3998 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3999 } 4000 4001 static int 4002 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4003 { 4004 int err; 4005 4006 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4007 mlxsw_sp_port->lag_id); 4008 if (err) 4009 return err; 4010 4011 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4012 if (err) 4013 goto err_dist_port_add; 4014 4015 return 0; 4016 4017 err_dist_port_add: 4018 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4019 return err; 4020 } 4021 4022 static int 4023 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4024 { 4025 int err; 4026 4027 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4028 mlxsw_sp_port->lag_id); 4029 if (err) 4030 return err; 4031 4032 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4033 mlxsw_sp_port->lag_id); 4034 if (err) 4035 goto err_col_port_disable; 4036 4037 return 0; 4038 4039 err_col_port_disable: 4040 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4041 return err; 4042 } 4043 4044 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4045 struct netdev_lag_lower_state_info *info) 4046 { 4047 if (info->tx_enabled) 4048 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4049 else 4050 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4051 } 4052 4053 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4054 bool enable) 4055 { 4056 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4057 enum mlxsw_reg_spms_state spms_state; 4058 char *spms_pl; 4059 u16 vid; 4060 int err; 4061 4062 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4063 MLXSW_REG_SPMS_STATE_DISCARDING; 4064 4065 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4066 if (!spms_pl) 4067 return -ENOMEM; 4068 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4069 4070 for (vid = 0; vid < VLAN_N_VID; vid++) 4071 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4072 4073 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4074 kfree(spms_pl); 4075 return err; 4076 } 4077 4078 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4079 { 4080 u16 vid = 1; 4081 int err; 4082 4083 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4084 if (err) 4085 return err; 4086 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4087 if (err) 4088 goto err_port_stp_set; 4089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4090 true, false); 4091 if (err) 4092 goto err_port_vlan_set; 4093 4094 for (; vid <= VLAN_N_VID - 1; vid++) { 4095 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4096 vid, false); 4097 if (err) 4098 goto err_vid_learning_set; 4099 } 4100 4101 return 0; 4102 4103 err_vid_learning_set: 4104 for (vid--; vid >= 1; vid--) 4105 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4106 err_port_vlan_set: 4107 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4108 err_port_stp_set: 4109 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4110 return err; 4111 } 4112 4113 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4114 { 4115 u16 vid; 4116 4117 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4118 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4119 vid, true); 4120 4121 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4122 false, false); 4123 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4124 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4125 } 4126 4127 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4128 { 4129 unsigned int num_vxlans = 0; 4130 struct net_device *dev; 4131 struct list_head *iter; 4132 4133 netdev_for_each_lower_dev(br_dev, dev, iter) { 4134 if (netif_is_vxlan(dev)) 4135 num_vxlans++; 4136 } 4137 4138 return num_vxlans > 1; 4139 } 4140 4141 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4142 { 4143 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4144 struct net_device *dev; 4145 struct list_head *iter; 4146 4147 netdev_for_each_lower_dev(br_dev, dev, iter) { 4148 u16 pvid; 4149 int err; 4150 4151 if (!netif_is_vxlan(dev)) 4152 continue; 4153 4154 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4155 if (err || !pvid) 4156 continue; 4157 4158 if (test_and_set_bit(pvid, vlans)) 4159 return false; 4160 } 4161 4162 return true; 4163 } 4164 4165 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4166 struct netlink_ext_ack *extack) 4167 { 4168 if (br_multicast_enabled(br_dev)) { 4169 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4170 return false; 4171 } 4172 4173 if (!br_vlan_enabled(br_dev) && 4174 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4175 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4176 return false; 4177 } 4178 4179 if (br_vlan_enabled(br_dev) && 4180 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4181 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4182 return false; 4183 } 4184 4185 return true; 4186 } 4187 4188 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4189 struct net_device *dev, 4190 unsigned long event, void *ptr) 4191 { 4192 struct netdev_notifier_changeupper_info *info; 4193 struct mlxsw_sp_port *mlxsw_sp_port; 4194 struct netlink_ext_ack *extack; 4195 struct net_device *upper_dev; 4196 struct mlxsw_sp *mlxsw_sp; 4197 int err = 0; 4198 4199 mlxsw_sp_port = netdev_priv(dev); 4200 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4201 info = ptr; 4202 extack = netdev_notifier_info_to_extack(&info->info); 4203 4204 switch (event) { 4205 case NETDEV_PRECHANGEUPPER: 4206 upper_dev = info->upper_dev; 4207 if (!is_vlan_dev(upper_dev) && 4208 !netif_is_lag_master(upper_dev) && 4209 !netif_is_bridge_master(upper_dev) && 4210 !netif_is_ovs_master(upper_dev) && 4211 !netif_is_macvlan(upper_dev)) { 4212 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4213 return -EINVAL; 4214 } 4215 if (!info->linking) 4216 break; 4217 if (netif_is_bridge_master(upper_dev) && 4218 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4219 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4220 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4221 return -EOPNOTSUPP; 4222 if (netdev_has_any_upper_dev(upper_dev) && 4223 (!netif_is_bridge_master(upper_dev) || 4224 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4225 upper_dev))) { 4226 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4227 return -EINVAL; 4228 } 4229 if (netif_is_lag_master(upper_dev) && 4230 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4231 info->upper_info, extack)) 4232 return -EINVAL; 4233 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4234 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4235 return -EINVAL; 4236 } 4237 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4238 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4239 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4240 return -EINVAL; 4241 } 4242 if (netif_is_macvlan(upper_dev) && 4243 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4244 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4245 return -EOPNOTSUPP; 4246 } 4247 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4248 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4249 return -EINVAL; 4250 } 4251 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4252 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4253 return -EINVAL; 4254 } 4255 break; 4256 case NETDEV_CHANGEUPPER: 4257 upper_dev = info->upper_dev; 4258 if (netif_is_bridge_master(upper_dev)) { 4259 if (info->linking) 4260 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4261 lower_dev, 4262 upper_dev, 4263 extack); 4264 else 4265 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4266 lower_dev, 4267 upper_dev); 4268 } else if (netif_is_lag_master(upper_dev)) { 4269 if (info->linking) { 4270 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4271 upper_dev); 4272 } else { 4273 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4274 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4275 upper_dev); 4276 } 4277 } else if (netif_is_ovs_master(upper_dev)) { 4278 if (info->linking) 4279 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4280 else 4281 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4282 } else if (netif_is_macvlan(upper_dev)) { 4283 if (!info->linking) 4284 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4285 } else if (is_vlan_dev(upper_dev)) { 4286 struct net_device *br_dev; 4287 4288 if (!netif_is_bridge_port(upper_dev)) 4289 break; 4290 if (info->linking) 4291 break; 4292 br_dev = netdev_master_upper_dev_get(upper_dev); 4293 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4294 br_dev); 4295 } 4296 break; 4297 } 4298 4299 return err; 4300 } 4301 4302 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4303 unsigned long event, void *ptr) 4304 { 4305 struct netdev_notifier_changelowerstate_info *info; 4306 struct mlxsw_sp_port *mlxsw_sp_port; 4307 int err; 4308 4309 mlxsw_sp_port = netdev_priv(dev); 4310 info = ptr; 4311 4312 switch (event) { 4313 case NETDEV_CHANGELOWERSTATE: 4314 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4315 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4316 info->lower_state_info); 4317 if (err) 4318 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4319 } 4320 break; 4321 } 4322 4323 return 0; 4324 } 4325 4326 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4327 struct net_device *port_dev, 4328 unsigned long event, void *ptr) 4329 { 4330 switch (event) { 4331 case NETDEV_PRECHANGEUPPER: 4332 case NETDEV_CHANGEUPPER: 4333 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4334 event, ptr); 4335 case NETDEV_CHANGELOWERSTATE: 4336 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4337 ptr); 4338 } 4339 4340 return 0; 4341 } 4342 4343 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4344 unsigned long event, void *ptr) 4345 { 4346 struct net_device *dev; 4347 struct list_head *iter; 4348 int ret; 4349 4350 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4351 if (mlxsw_sp_port_dev_check(dev)) { 4352 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4353 ptr); 4354 if (ret) 4355 return ret; 4356 } 4357 } 4358 4359 return 0; 4360 } 4361 4362 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4363 struct net_device *dev, 4364 unsigned long event, void *ptr, 4365 u16 vid) 4366 { 4367 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4369 struct netdev_notifier_changeupper_info *info = ptr; 4370 struct netlink_ext_ack *extack; 4371 struct net_device *upper_dev; 4372 int err = 0; 4373 4374 extack = netdev_notifier_info_to_extack(&info->info); 4375 4376 switch (event) { 4377 case NETDEV_PRECHANGEUPPER: 4378 upper_dev = info->upper_dev; 4379 if (!netif_is_bridge_master(upper_dev) && 4380 !netif_is_macvlan(upper_dev)) { 4381 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4382 return -EINVAL; 4383 } 4384 if (!info->linking) 4385 break; 4386 if (netif_is_bridge_master(upper_dev) && 4387 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4388 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4389 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4390 return -EOPNOTSUPP; 4391 if (netdev_has_any_upper_dev(upper_dev) && 4392 (!netif_is_bridge_master(upper_dev) || 4393 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4394 upper_dev))) { 4395 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4396 return -EINVAL; 4397 } 4398 if (netif_is_macvlan(upper_dev) && 4399 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4400 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4401 return -EOPNOTSUPP; 4402 } 4403 break; 4404 case NETDEV_CHANGEUPPER: 4405 upper_dev = info->upper_dev; 4406 if (netif_is_bridge_master(upper_dev)) { 4407 if (info->linking) 4408 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4409 vlan_dev, 4410 upper_dev, 4411 extack); 4412 else 4413 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4414 vlan_dev, 4415 upper_dev); 4416 } else if (netif_is_macvlan(upper_dev)) { 4417 if (!info->linking) 4418 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4419 } else { 4420 err = -EINVAL; 4421 WARN_ON(1); 4422 } 4423 break; 4424 } 4425 4426 return err; 4427 } 4428 4429 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4430 struct net_device *lag_dev, 4431 unsigned long event, 4432 void *ptr, u16 vid) 4433 { 4434 struct net_device *dev; 4435 struct list_head *iter; 4436 int ret; 4437 4438 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4439 if (mlxsw_sp_port_dev_check(dev)) { 4440 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4441 event, ptr, 4442 vid); 4443 if (ret) 4444 return ret; 4445 } 4446 } 4447 4448 return 0; 4449 } 4450 4451 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4452 struct net_device *br_dev, 4453 unsigned long event, void *ptr, 4454 u16 vid) 4455 { 4456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4457 struct netdev_notifier_changeupper_info *info = ptr; 4458 struct netlink_ext_ack *extack; 4459 struct net_device *upper_dev; 4460 4461 if (!mlxsw_sp) 4462 return 0; 4463 4464 extack = netdev_notifier_info_to_extack(&info->info); 4465 4466 switch (event) { 4467 case NETDEV_PRECHANGEUPPER: 4468 upper_dev = info->upper_dev; 4469 if (!netif_is_macvlan(upper_dev)) { 4470 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4471 return -EOPNOTSUPP; 4472 } 4473 if (!info->linking) 4474 break; 4475 if (netif_is_macvlan(upper_dev) && 4476 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4477 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4478 return -EOPNOTSUPP; 4479 } 4480 break; 4481 case NETDEV_CHANGEUPPER: 4482 upper_dev = info->upper_dev; 4483 if (info->linking) 4484 break; 4485 if (netif_is_macvlan(upper_dev)) 4486 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4487 break; 4488 } 4489 4490 return 0; 4491 } 4492 4493 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4494 unsigned long event, void *ptr) 4495 { 4496 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4497 u16 vid = vlan_dev_vlan_id(vlan_dev); 4498 4499 if (mlxsw_sp_port_dev_check(real_dev)) 4500 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4501 event, ptr, vid); 4502 else if (netif_is_lag_master(real_dev)) 4503 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4504 real_dev, event, 4505 ptr, vid); 4506 else if (netif_is_bridge_master(real_dev)) 4507 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4508 event, ptr, vid); 4509 4510 return 0; 4511 } 4512 4513 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4514 unsigned long event, void *ptr) 4515 { 4516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4517 struct netdev_notifier_changeupper_info *info = ptr; 4518 struct netlink_ext_ack *extack; 4519 struct net_device *upper_dev; 4520 4521 if (!mlxsw_sp) 4522 return 0; 4523 4524 extack = netdev_notifier_info_to_extack(&info->info); 4525 4526 switch (event) { 4527 case NETDEV_PRECHANGEUPPER: 4528 upper_dev = info->upper_dev; 4529 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4530 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4531 return -EOPNOTSUPP; 4532 } 4533 if (!info->linking) 4534 break; 4535 if (netif_is_macvlan(upper_dev) && 4536 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4537 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4538 return -EOPNOTSUPP; 4539 } 4540 break; 4541 case NETDEV_CHANGEUPPER: 4542 upper_dev = info->upper_dev; 4543 if (info->linking) 4544 break; 4545 if (is_vlan_dev(upper_dev)) 4546 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4547 if (netif_is_macvlan(upper_dev)) 4548 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4549 break; 4550 } 4551 4552 return 0; 4553 } 4554 4555 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4556 unsigned long event, void *ptr) 4557 { 4558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4559 struct netdev_notifier_changeupper_info *info = ptr; 4560 struct netlink_ext_ack *extack; 4561 4562 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4563 return 0; 4564 4565 extack = netdev_notifier_info_to_extack(&info->info); 4566 4567 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4568 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4569 4570 return -EOPNOTSUPP; 4571 } 4572 4573 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4574 { 4575 struct netdev_notifier_changeupper_info *info = ptr; 4576 4577 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4578 return false; 4579 return netif_is_l3_master(info->upper_dev); 4580 } 4581 4582 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4583 struct net_device *dev, 4584 unsigned long event, void *ptr) 4585 { 4586 struct netdev_notifier_changeupper_info *cu_info; 4587 struct netdev_notifier_info *info = ptr; 4588 struct netlink_ext_ack *extack; 4589 struct net_device *upper_dev; 4590 4591 extack = netdev_notifier_info_to_extack(info); 4592 4593 switch (event) { 4594 case NETDEV_CHANGEUPPER: 4595 cu_info = container_of(info, 4596 struct netdev_notifier_changeupper_info, 4597 info); 4598 upper_dev = cu_info->upper_dev; 4599 if (!netif_is_bridge_master(upper_dev)) 4600 return 0; 4601 if (!mlxsw_sp_lower_get(upper_dev)) 4602 return 0; 4603 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4604 return -EOPNOTSUPP; 4605 if (cu_info->linking) { 4606 if (!netif_running(dev)) 4607 return 0; 4608 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4609 * device needs to be mapped to a VLAN, but at this 4610 * point no VLANs are configured on the VxLAN device 4611 */ 4612 if (br_vlan_enabled(upper_dev)) 4613 return 0; 4614 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4615 dev, 0, extack); 4616 } else { 4617 /* VLANs were already flushed, which triggered the 4618 * necessary cleanup 4619 */ 4620 if (br_vlan_enabled(upper_dev)) 4621 return 0; 4622 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4623 } 4624 break; 4625 case NETDEV_PRE_UP: 4626 upper_dev = netdev_master_upper_dev_get(dev); 4627 if (!upper_dev) 4628 return 0; 4629 if (!netif_is_bridge_master(upper_dev)) 4630 return 0; 4631 if (!mlxsw_sp_lower_get(upper_dev)) 4632 return 0; 4633 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4634 extack); 4635 case NETDEV_DOWN: 4636 upper_dev = netdev_master_upper_dev_get(dev); 4637 if (!upper_dev) 4638 return 0; 4639 if (!netif_is_bridge_master(upper_dev)) 4640 return 0; 4641 if (!mlxsw_sp_lower_get(upper_dev)) 4642 return 0; 4643 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4644 break; 4645 } 4646 4647 return 0; 4648 } 4649 4650 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4651 unsigned long event, void *ptr) 4652 { 4653 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4654 struct mlxsw_sp_span_entry *span_entry; 4655 struct mlxsw_sp *mlxsw_sp; 4656 int err = 0; 4657 4658 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4659 if (event == NETDEV_UNREGISTER) { 4660 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4661 if (span_entry) 4662 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4663 } 4664 mlxsw_sp_span_respin(mlxsw_sp); 4665 4666 if (netif_is_vxlan(dev)) 4667 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4668 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4669 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4670 event, ptr); 4671 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4672 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4673 event, ptr); 4674 else if (event == NETDEV_PRE_CHANGEADDR || 4675 event == NETDEV_CHANGEADDR || 4676 event == NETDEV_CHANGEMTU) 4677 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4678 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4679 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4680 else if (mlxsw_sp_port_dev_check(dev)) 4681 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4682 else if (netif_is_lag_master(dev)) 4683 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4684 else if (is_vlan_dev(dev)) 4685 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4686 else if (netif_is_bridge_master(dev)) 4687 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4688 else if (netif_is_macvlan(dev)) 4689 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4690 4691 return notifier_from_errno(err); 4692 } 4693 4694 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4695 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4696 }; 4697 4698 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4699 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4700 }; 4701 4702 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4703 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4704 {0, }, 4705 }; 4706 4707 static struct pci_driver mlxsw_sp1_pci_driver = { 4708 .name = mlxsw_sp1_driver_name, 4709 .id_table = mlxsw_sp1_pci_id_table, 4710 }; 4711 4712 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4713 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4714 {0, }, 4715 }; 4716 4717 static struct pci_driver mlxsw_sp2_pci_driver = { 4718 .name = mlxsw_sp2_driver_name, 4719 .id_table = mlxsw_sp2_pci_id_table, 4720 }; 4721 4722 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4723 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4724 {0, }, 4725 }; 4726 4727 static struct pci_driver mlxsw_sp3_pci_driver = { 4728 .name = mlxsw_sp3_driver_name, 4729 .id_table = mlxsw_sp3_pci_id_table, 4730 }; 4731 4732 static int __init mlxsw_sp_module_init(void) 4733 { 4734 int err; 4735 4736 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4737 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4738 4739 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4740 if (err) 4741 goto err_sp1_core_driver_register; 4742 4743 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4744 if (err) 4745 goto err_sp2_core_driver_register; 4746 4747 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4748 if (err) 4749 goto err_sp3_core_driver_register; 4750 4751 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4752 if (err) 4753 goto err_sp1_pci_driver_register; 4754 4755 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4756 if (err) 4757 goto err_sp2_pci_driver_register; 4758 4759 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4760 if (err) 4761 goto err_sp3_pci_driver_register; 4762 4763 return 0; 4764 4765 err_sp3_pci_driver_register: 4766 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4767 err_sp2_pci_driver_register: 4768 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4769 err_sp1_pci_driver_register: 4770 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4771 err_sp3_core_driver_register: 4772 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4773 err_sp2_core_driver_register: 4774 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4775 err_sp1_core_driver_register: 4776 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4777 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4778 return err; 4779 } 4780 4781 static void __exit mlxsw_sp_module_exit(void) 4782 { 4783 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4784 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4785 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4786 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4787 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4788 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4789 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4790 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4791 } 4792 4793 module_init(mlxsw_sp_module_init); 4794 module_exit(mlxsw_sp_module_exit); 4795 4796 MODULE_LICENSE("Dual BSD/GPL"); 4797 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4798 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4799 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4800 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4801 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4802 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4803 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4804 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4805