1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 #include "../mlxfw/mlxfw.h" 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 2714 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 #define MLXSW_SP2_FWREV_MINOR 2000 68 #define MLXSW_SP2_FWREV_SUBMINOR 2714 69 70 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 71 .major = MLXSW_SP2_FWREV_MAJOR, 72 .minor = MLXSW_SP2_FWREV_MINOR, 73 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 74 }; 75 76 #define MLXSW_SP2_FW_FILENAME \ 77 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 80 81 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 82 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 83 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 84 static const char mlxsw_sp_driver_version[] = "1.0"; 85 86 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 87 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 88 }; 89 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 90 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 91 }; 92 93 /* tx_hdr_version 94 * Tx header version. 95 * Must be set to 1. 96 */ 97 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 98 99 /* tx_hdr_ctl 100 * Packet control type. 101 * 0 - Ethernet control (e.g. EMADs, LACP) 102 * 1 - Ethernet data 103 */ 104 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 105 106 /* tx_hdr_proto 107 * Packet protocol type. Must be set to 1 (Ethernet). 108 */ 109 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 110 111 /* tx_hdr_rx_is_router 112 * Packet is sent from the router. Valid for data packets only. 113 */ 114 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 115 116 /* tx_hdr_fid_valid 117 * Indicates if the 'fid' field is valid and should be used for 118 * forwarding lookup. Valid for data packets only. 119 */ 120 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 121 122 /* tx_hdr_swid 123 * Switch partition ID. Must be set to 0. 124 */ 125 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 126 127 /* tx_hdr_control_tclass 128 * Indicates if the packet should use the control TClass and not one 129 * of the data TClasses. 130 */ 131 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 132 133 /* tx_hdr_etclass 134 * Egress TClass to be used on the egress device on the egress port. 135 */ 136 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 137 138 /* tx_hdr_port_mid 139 * Destination local port for unicast packets. 140 * Destination multicast ID for multicast packets. 141 * 142 * Control packets are directed to a specific egress port, while data 143 * packets are transmitted through the CPU port (0) into the switch partition, 144 * where forwarding rules are applied. 145 */ 146 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 147 148 /* tx_hdr_fid 149 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 150 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 151 * Valid for data packets only. 152 */ 153 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 154 155 /* tx_hdr_type 156 * 0 - Data packets 157 * 6 - Control packets 158 */ 159 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 160 161 struct mlxsw_sp_mlxfw_dev { 162 struct mlxfw_dev mlxfw_dev; 163 struct mlxsw_sp *mlxsw_sp; 164 }; 165 166 struct mlxsw_sp_ptp_ops { 167 struct mlxsw_sp_ptp_clock * 168 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 169 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 170 171 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 172 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 173 174 /* Notify a driver that a packet that might be PTP was received. Driver 175 * is responsible for freeing the passed-in SKB. 176 */ 177 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 178 u8 local_port); 179 180 /* Notify a driver that a timestamped packet was transmitted. Driver 181 * is responsible for freeing the passed-in SKB. 182 */ 183 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 184 u8 local_port); 185 186 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 187 struct hwtstamp_config *config); 188 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 189 struct hwtstamp_config *config); 190 void (*shaper_work)(struct work_struct *work); 191 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 192 struct ethtool_ts_info *info); 193 int (*get_stats_count)(void); 194 void (*get_stats_strings)(u8 **p); 195 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 196 u64 *data, int data_index); 197 }; 198 199 struct mlxsw_sp_span_ops { 200 u32 (*buffsize_get)(int mtu, u32 speed); 201 }; 202 203 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 204 u16 component_index, u32 *p_max_size, 205 u8 *p_align_bits, u16 *p_max_write_size) 206 { 207 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 208 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 210 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 211 int err; 212 213 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 214 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 215 if (err) 216 return err; 217 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 218 p_max_write_size); 219 220 *p_align_bits = max_t(u8, *p_align_bits, 2); 221 *p_max_write_size = min_t(u16, *p_max_write_size, 222 MLXSW_REG_MCDA_MAX_DATA_LEN); 223 return 0; 224 } 225 226 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 227 { 228 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 229 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 231 char mcc_pl[MLXSW_REG_MCC_LEN]; 232 u8 control_state; 233 int err; 234 235 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 236 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 if (err) 238 return err; 239 240 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 241 if (control_state != MLXFW_FSM_STATE_IDLE) 242 return -EBUSY; 243 244 mlxsw_reg_mcc_pack(mcc_pl, 245 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 246 0, *fwhandle, 0); 247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 248 } 249 250 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 251 u32 fwhandle, u16 component_index, 252 u32 component_size) 253 { 254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 257 char mcc_pl[MLXSW_REG_MCC_LEN]; 258 259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 260 component_index, fwhandle, component_size); 261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 262 } 263 264 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 265 u32 fwhandle, u8 *data, u16 size, 266 u32 offset) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcda_pl[MLXSW_REG_MCDA_LEN]; 272 273 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 275 } 276 277 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 278 u32 fwhandle, u16 component_index) 279 { 280 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 281 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 283 char mcc_pl[MLXSW_REG_MCC_LEN]; 284 285 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 286 component_index, fwhandle, 0); 287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 288 } 289 290 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 298 fwhandle, 0); 299 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 303 enum mlxfw_fsm_state *fsm_state, 304 enum mlxfw_fsm_state_err *fsm_state_err) 305 { 306 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 307 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 309 char mcc_pl[MLXSW_REG_MCC_LEN]; 310 u8 control_state; 311 u8 error_code; 312 int err; 313 314 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 315 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 316 if (err) 317 return err; 318 319 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 320 *fsm_state = control_state; 321 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 322 MLXFW_FSM_STATE_ERR_MAX); 323 return 0; 324 } 325 326 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 327 { 328 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 329 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 330 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 331 char mcc_pl[MLXSW_REG_MCC_LEN]; 332 333 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 334 fwhandle, 0); 335 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 336 } 337 338 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 339 { 340 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 341 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 343 char mcc_pl[MLXSW_REG_MCC_LEN]; 344 345 mlxsw_reg_mcc_pack(mcc_pl, 346 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 347 fwhandle, 0); 348 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 349 } 350 351 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 352 .component_query = mlxsw_sp_component_query, 353 .fsm_lock = mlxsw_sp_fsm_lock, 354 .fsm_component_update = mlxsw_sp_fsm_component_update, 355 .fsm_block_download = mlxsw_sp_fsm_block_download, 356 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 357 .fsm_activate = mlxsw_sp_fsm_activate, 358 .fsm_query_state = mlxsw_sp_fsm_query_state, 359 .fsm_cancel = mlxsw_sp_fsm_cancel, 360 .fsm_release = mlxsw_sp_fsm_release, 361 }; 362 363 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 364 const struct firmware *firmware, 365 struct netlink_ext_ack *extack) 366 { 367 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 368 .mlxfw_dev = { 369 .ops = &mlxsw_sp_mlxfw_dev_ops, 370 .psid = mlxsw_sp->bus_info->psid, 371 .psid_size = strlen(mlxsw_sp->bus_info->psid), 372 .devlink = priv_to_devlink(mlxsw_sp->core), 373 }, 374 .mlxsw_sp = mlxsw_sp 375 }; 376 int err; 377 378 mlxsw_core_fw_flash_start(mlxsw_sp->core); 379 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 380 firmware, extack); 381 mlxsw_core_fw_flash_end(mlxsw_sp->core); 382 383 return err; 384 } 385 386 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 387 { 388 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 389 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 390 const char *fw_filename = mlxsw_sp->fw_filename; 391 union devlink_param_value value; 392 const struct firmware *firmware; 393 int err; 394 395 /* Don't check if driver does not require it */ 396 if (!req_rev || !fw_filename) 397 return 0; 398 399 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 400 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 401 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 402 &value); 403 if (err) 404 return err; 405 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 406 return 0; 407 408 /* Validate driver & FW are compatible */ 409 if (rev->major != req_rev->major) { 410 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 411 rev->major, req_rev->major); 412 return -EINVAL; 413 } 414 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 415 return 0; 416 417 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 418 rev->major, rev->minor, rev->subminor, req_rev->major, 419 req_rev->minor, req_rev->subminor); 420 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 421 fw_filename); 422 423 err = request_firmware_direct(&firmware, fw_filename, 424 mlxsw_sp->bus_info->dev); 425 if (err) { 426 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 427 fw_filename); 428 return err; 429 } 430 431 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 432 release_firmware(firmware); 433 if (err) 434 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 435 436 /* On FW flash success, tell the caller FW reset is needed 437 * if current FW supports it. 438 */ 439 if (rev->minor >= req_rev->can_reset_minor) 440 return err ? err : -EAGAIN; 441 else 442 return 0; 443 } 444 445 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 446 const char *file_name, const char *component, 447 struct netlink_ext_ack *extack) 448 { 449 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 450 const struct firmware *firmware; 451 int err; 452 453 if (component) 454 return -EOPNOTSUPP; 455 456 err = request_firmware_direct(&firmware, file_name, 457 mlxsw_sp->bus_info->dev); 458 if (err) 459 return err; 460 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 461 release_firmware(firmware); 462 463 return err; 464 } 465 466 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 467 unsigned int counter_index, u64 *packets, 468 u64 *bytes) 469 { 470 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 471 int err; 472 473 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 474 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 475 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 476 if (err) 477 return err; 478 if (packets) 479 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 480 if (bytes) 481 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 482 return 0; 483 } 484 485 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 486 unsigned int counter_index) 487 { 488 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 489 490 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 491 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 493 } 494 495 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 496 unsigned int *p_counter_index) 497 { 498 int err; 499 500 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 501 p_counter_index); 502 if (err) 503 return err; 504 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 505 if (err) 506 goto err_counter_clear; 507 return 0; 508 509 err_counter_clear: 510 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 511 *p_counter_index); 512 return err; 513 } 514 515 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 516 unsigned int counter_index) 517 { 518 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 519 counter_index); 520 } 521 522 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 523 const struct mlxsw_tx_info *tx_info) 524 { 525 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 526 527 memset(txhdr, 0, MLXSW_TXHDR_LEN); 528 529 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 530 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 531 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 532 mlxsw_tx_hdr_swid_set(txhdr, 0); 533 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 534 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 535 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 536 } 537 538 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 539 { 540 switch (state) { 541 case BR_STATE_FORWARDING: 542 return MLXSW_REG_SPMS_STATE_FORWARDING; 543 case BR_STATE_LEARNING: 544 return MLXSW_REG_SPMS_STATE_LEARNING; 545 case BR_STATE_LISTENING: /* fall-through */ 546 case BR_STATE_DISABLED: /* fall-through */ 547 case BR_STATE_BLOCKING: 548 return MLXSW_REG_SPMS_STATE_DISCARDING; 549 default: 550 BUG(); 551 } 552 } 553 554 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 555 u8 state) 556 { 557 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 559 char *spms_pl; 560 int err; 561 562 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 563 if (!spms_pl) 564 return -ENOMEM; 565 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 566 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 567 568 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 569 kfree(spms_pl); 570 return err; 571 } 572 573 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 574 { 575 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 576 int err; 577 578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 579 if (err) 580 return err; 581 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 582 return 0; 583 } 584 585 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 586 bool enable, u32 rate) 587 { 588 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 589 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 590 591 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 593 } 594 595 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 596 bool is_up) 597 { 598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 599 char paos_pl[MLXSW_REG_PAOS_LEN]; 600 601 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 602 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 603 MLXSW_PORT_ADMIN_STATUS_DOWN); 604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 605 } 606 607 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 608 unsigned char *addr) 609 { 610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 611 char ppad_pl[MLXSW_REG_PPAD_LEN]; 612 613 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 614 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 616 } 617 618 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 619 { 620 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 621 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 622 623 ether_addr_copy(addr, mlxsw_sp->base_mac); 624 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 625 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 626 } 627 628 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 629 { 630 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 631 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 632 int max_mtu; 633 int err; 634 635 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 636 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 637 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 638 if (err) 639 return err; 640 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 641 642 if (mtu > max_mtu) 643 return -EINVAL; 644 645 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 646 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 647 } 648 649 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 650 { 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 char pspa_pl[MLXSW_REG_PSPA_LEN]; 653 654 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 655 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 656 } 657 658 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 659 { 660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 661 char svpe_pl[MLXSW_REG_SVPE_LEN]; 662 663 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 665 } 666 667 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 668 bool learn_enable) 669 { 670 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 671 char *spvmlr_pl; 672 int err; 673 674 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 675 if (!spvmlr_pl) 676 return -ENOMEM; 677 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 678 learn_enable); 679 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 680 kfree(spvmlr_pl); 681 return err; 682 } 683 684 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 685 u16 vid) 686 { 687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 688 char spvid_pl[MLXSW_REG_SPVID_LEN]; 689 690 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 692 } 693 694 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 695 bool allow) 696 { 697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 698 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 699 700 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 702 } 703 704 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 705 { 706 int err; 707 708 if (!vid) { 709 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 710 if (err) 711 return err; 712 } else { 713 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 714 if (err) 715 return err; 716 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 717 if (err) 718 goto err_port_allow_untagged_set; 719 } 720 721 mlxsw_sp_port->pvid = vid; 722 return 0; 723 724 err_port_allow_untagged_set: 725 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 726 return err; 727 } 728 729 static int 730 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 731 { 732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 733 char sspr_pl[MLXSW_REG_SSPR_LEN]; 734 735 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 736 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 737 } 738 739 static int 740 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 741 struct mlxsw_sp_port_mapping *port_mapping) 742 { 743 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 744 bool separate_rxtx; 745 u8 module; 746 u8 width; 747 int err; 748 int i; 749 750 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 751 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 752 if (err) 753 return err; 754 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 755 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 756 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 757 758 if (width && !is_power_of_2(width)) { 759 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 760 local_port); 761 return -EINVAL; 762 } 763 764 for (i = 0; i < width; i++) { 765 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 767 local_port); 768 return -EINVAL; 769 } 770 if (separate_rxtx && 771 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 772 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 773 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 774 local_port); 775 return -EINVAL; 776 } 777 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 778 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 779 local_port); 780 return -EINVAL; 781 } 782 } 783 784 port_mapping->module = module; 785 port_mapping->width = width; 786 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 787 return 0; 788 } 789 790 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 791 { 792 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 794 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 795 int i; 796 797 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 798 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 799 for (i = 0; i < port_mapping->width; i++) { 800 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 801 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 802 } 803 804 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 805 } 806 807 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 808 { 809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 810 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 811 812 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 813 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 814 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 815 } 816 817 static int mlxsw_sp_port_open(struct net_device *dev) 818 { 819 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 820 int err; 821 822 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 823 if (err) 824 return err; 825 netif_start_queue(dev); 826 return 0; 827 } 828 829 static int mlxsw_sp_port_stop(struct net_device *dev) 830 { 831 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 832 833 netif_stop_queue(dev); 834 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 835 } 836 837 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 838 struct net_device *dev) 839 { 840 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 842 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 843 const struct mlxsw_tx_info tx_info = { 844 .local_port = mlxsw_sp_port->local_port, 845 .is_emad = false, 846 }; 847 u64 len; 848 int err; 849 850 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 851 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 852 dev_kfree_skb_any(skb); 853 return NETDEV_TX_OK; 854 } 855 856 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 857 858 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 859 return NETDEV_TX_BUSY; 860 861 if (eth_skb_pad(skb)) { 862 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 863 return NETDEV_TX_OK; 864 } 865 866 mlxsw_sp_txhdr_construct(skb, &tx_info); 867 /* TX header is consumed by HW on the way so we shouldn't count its 868 * bytes as being sent. 869 */ 870 len = skb->len - MLXSW_TXHDR_LEN; 871 872 /* Due to a race we might fail here because of a full queue. In that 873 * unlikely case we simply drop the packet. 874 */ 875 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 876 877 if (!err) { 878 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 879 u64_stats_update_begin(&pcpu_stats->syncp); 880 pcpu_stats->tx_packets++; 881 pcpu_stats->tx_bytes += len; 882 u64_stats_update_end(&pcpu_stats->syncp); 883 } else { 884 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 885 dev_kfree_skb_any(skb); 886 } 887 return NETDEV_TX_OK; 888 } 889 890 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 891 { 892 } 893 894 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 895 { 896 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 897 struct sockaddr *addr = p; 898 int err; 899 900 if (!is_valid_ether_addr(addr->sa_data)) 901 return -EADDRNOTAVAIL; 902 903 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 904 if (err) 905 return err; 906 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 907 return 0; 908 } 909 910 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 911 int mtu) 912 { 913 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 914 } 915 916 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 917 918 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 919 u16 delay) 920 { 921 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 922 BITS_PER_BYTE)); 923 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 924 mtu); 925 } 926 927 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 928 * Assumes 100m cable and maximum MTU. 929 */ 930 #define MLXSW_SP_PAUSE_DELAY 58752 931 932 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 933 u16 delay, bool pfc, bool pause) 934 { 935 if (pfc) 936 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 937 else if (pause) 938 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 939 else 940 return 0; 941 } 942 943 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 944 bool lossy) 945 { 946 if (lossy) 947 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 948 else 949 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 950 thres); 951 } 952 953 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 954 u8 *prio_tc, bool pause_en, 955 struct ieee_pfc *my_pfc) 956 { 957 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 958 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 959 u16 delay = !!my_pfc ? my_pfc->delay : 0; 960 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 961 u32 taken_headroom_cells = 0; 962 u32 max_headroom_cells; 963 int i, j, err; 964 965 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 966 967 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 968 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 969 if (err) 970 return err; 971 972 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 973 bool configure = false; 974 bool pfc = false; 975 u16 thres_cells; 976 u16 delay_cells; 977 u16 total_cells; 978 bool lossy; 979 980 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 981 if (prio_tc[j] == i) { 982 pfc = pfc_en & BIT(j); 983 configure = true; 984 break; 985 } 986 } 987 988 if (!configure) 989 continue; 990 991 lossy = !(pfc || pause_en); 992 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 993 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 994 pfc, pause_en); 995 total_cells = thres_cells + delay_cells; 996 997 taken_headroom_cells += total_cells; 998 if (taken_headroom_cells > max_headroom_cells) 999 return -ENOBUFS; 1000 1001 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1002 thres_cells, lossy); 1003 } 1004 1005 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1006 } 1007 1008 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1009 int mtu, bool pause_en) 1010 { 1011 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1012 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1013 struct ieee_pfc *my_pfc; 1014 u8 *prio_tc; 1015 1016 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1017 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1018 1019 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1020 pause_en, my_pfc); 1021 } 1022 1023 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1024 { 1025 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1026 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1027 int err; 1028 1029 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1030 if (err) 1031 return err; 1032 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1033 if (err) 1034 goto err_span_port_mtu_update; 1035 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1036 if (err) 1037 goto err_port_mtu_set; 1038 dev->mtu = mtu; 1039 return 0; 1040 1041 err_port_mtu_set: 1042 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1043 err_span_port_mtu_update: 1044 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1045 return err; 1046 } 1047 1048 static int 1049 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1050 struct rtnl_link_stats64 *stats) 1051 { 1052 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1053 struct mlxsw_sp_port_pcpu_stats *p; 1054 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1055 u32 tx_dropped = 0; 1056 unsigned int start; 1057 int i; 1058 1059 for_each_possible_cpu(i) { 1060 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1061 do { 1062 start = u64_stats_fetch_begin_irq(&p->syncp); 1063 rx_packets = p->rx_packets; 1064 rx_bytes = p->rx_bytes; 1065 tx_packets = p->tx_packets; 1066 tx_bytes = p->tx_bytes; 1067 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1068 1069 stats->rx_packets += rx_packets; 1070 stats->rx_bytes += rx_bytes; 1071 stats->tx_packets += tx_packets; 1072 stats->tx_bytes += tx_bytes; 1073 /* tx_dropped is u32, updated without syncp protection. */ 1074 tx_dropped += p->tx_dropped; 1075 } 1076 stats->tx_dropped = tx_dropped; 1077 return 0; 1078 } 1079 1080 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1081 { 1082 switch (attr_id) { 1083 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1084 return true; 1085 } 1086 1087 return false; 1088 } 1089 1090 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1091 void *sp) 1092 { 1093 switch (attr_id) { 1094 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1095 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1096 } 1097 1098 return -EINVAL; 1099 } 1100 1101 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1102 int prio, char *ppcnt_pl) 1103 { 1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1105 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1106 1107 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1108 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1109 } 1110 1111 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1112 struct rtnl_link_stats64 *stats) 1113 { 1114 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1115 int err; 1116 1117 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1118 0, ppcnt_pl); 1119 if (err) 1120 goto out; 1121 1122 stats->tx_packets = 1123 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1124 stats->rx_packets = 1125 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1126 stats->tx_bytes = 1127 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1128 stats->rx_bytes = 1129 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1130 stats->multicast = 1131 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1132 1133 stats->rx_crc_errors = 1134 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1135 stats->rx_frame_errors = 1136 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1137 1138 stats->rx_length_errors = ( 1139 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1140 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1141 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1142 1143 stats->rx_errors = (stats->rx_crc_errors + 1144 stats->rx_frame_errors + stats->rx_length_errors); 1145 1146 out: 1147 return err; 1148 } 1149 1150 static void 1151 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1152 struct mlxsw_sp_port_xstats *xstats) 1153 { 1154 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1155 int err, i; 1156 1157 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1158 ppcnt_pl); 1159 if (!err) 1160 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1161 1162 for (i = 0; i < TC_MAX_QUEUE; i++) { 1163 err = mlxsw_sp_port_get_stats_raw(dev, 1164 MLXSW_REG_PPCNT_TC_CONG_TC, 1165 i, ppcnt_pl); 1166 if (!err) 1167 xstats->wred_drop[i] = 1168 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1169 1170 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1171 i, ppcnt_pl); 1172 if (err) 1173 continue; 1174 1175 xstats->backlog[i] = 1176 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1177 xstats->tail_drop[i] = 1178 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1179 } 1180 1181 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1182 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1183 i, ppcnt_pl); 1184 if (err) 1185 continue; 1186 1187 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1188 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1189 } 1190 } 1191 1192 static void update_stats_cache(struct work_struct *work) 1193 { 1194 struct mlxsw_sp_port *mlxsw_sp_port = 1195 container_of(work, struct mlxsw_sp_port, 1196 periodic_hw_stats.update_dw.work); 1197 1198 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1199 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1200 * necessary when port goes down. 1201 */ 1202 goto out; 1203 1204 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1205 &mlxsw_sp_port->periodic_hw_stats.stats); 1206 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1207 &mlxsw_sp_port->periodic_hw_stats.xstats); 1208 1209 out: 1210 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1211 MLXSW_HW_STATS_UPDATE_TIME); 1212 } 1213 1214 /* Return the stats from a cache that is updated periodically, 1215 * as this function might get called in an atomic context. 1216 */ 1217 static void 1218 mlxsw_sp_port_get_stats64(struct net_device *dev, 1219 struct rtnl_link_stats64 *stats) 1220 { 1221 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1222 1223 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1224 } 1225 1226 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1227 u16 vid_begin, u16 vid_end, 1228 bool is_member, bool untagged) 1229 { 1230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1231 char *spvm_pl; 1232 int err; 1233 1234 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1235 if (!spvm_pl) 1236 return -ENOMEM; 1237 1238 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1239 vid_end, is_member, untagged); 1240 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1241 kfree(spvm_pl); 1242 return err; 1243 } 1244 1245 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1246 u16 vid_end, bool is_member, bool untagged) 1247 { 1248 u16 vid, vid_e; 1249 int err; 1250 1251 for (vid = vid_begin; vid <= vid_end; 1252 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1253 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1254 vid_end); 1255 1256 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1257 is_member, untagged); 1258 if (err) 1259 return err; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1266 bool flush_default) 1267 { 1268 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1269 1270 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1271 &mlxsw_sp_port->vlans_list, list) { 1272 if (!flush_default && 1273 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1274 continue; 1275 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1276 } 1277 } 1278 1279 static void 1280 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1281 { 1282 if (mlxsw_sp_port_vlan->bridge_port) 1283 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1284 else if (mlxsw_sp_port_vlan->fid) 1285 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1286 } 1287 1288 struct mlxsw_sp_port_vlan * 1289 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1290 { 1291 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1292 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1293 int err; 1294 1295 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1296 if (mlxsw_sp_port_vlan) 1297 return ERR_PTR(-EEXIST); 1298 1299 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1300 if (err) 1301 return ERR_PTR(err); 1302 1303 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1304 if (!mlxsw_sp_port_vlan) { 1305 err = -ENOMEM; 1306 goto err_port_vlan_alloc; 1307 } 1308 1309 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1310 mlxsw_sp_port_vlan->vid = vid; 1311 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1312 1313 return mlxsw_sp_port_vlan; 1314 1315 err_port_vlan_alloc: 1316 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1317 return ERR_PTR(err); 1318 } 1319 1320 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1321 { 1322 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1323 u16 vid = mlxsw_sp_port_vlan->vid; 1324 1325 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1326 list_del(&mlxsw_sp_port_vlan->list); 1327 kfree(mlxsw_sp_port_vlan); 1328 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1329 } 1330 1331 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1332 __be16 __always_unused proto, u16 vid) 1333 { 1334 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1335 1336 /* VLAN 0 is added to HW filter when device goes up, but it is 1337 * reserved in our case, so simply return. 1338 */ 1339 if (!vid) 1340 return 0; 1341 1342 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1343 } 1344 1345 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1346 __be16 __always_unused proto, u16 vid) 1347 { 1348 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1349 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1350 1351 /* VLAN 0 is removed from HW filter when device goes down, but 1352 * it is reserved in our case, so simply return. 1353 */ 1354 if (!vid) 1355 return 0; 1356 1357 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1358 if (!mlxsw_sp_port_vlan) 1359 return 0; 1360 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1361 1362 return 0; 1363 } 1364 1365 static struct mlxsw_sp_port_mall_tc_entry * 1366 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1367 unsigned long cookie) { 1368 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1369 1370 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1371 if (mall_tc_entry->cookie == cookie) 1372 return mall_tc_entry; 1373 1374 return NULL; 1375 } 1376 1377 static int 1378 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1379 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1380 const struct flow_action_entry *act, 1381 bool ingress) 1382 { 1383 enum mlxsw_sp_span_type span_type; 1384 1385 if (!act->dev) { 1386 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1387 return -EINVAL; 1388 } 1389 1390 mirror->ingress = ingress; 1391 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1392 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1393 true, &mirror->span_id); 1394 } 1395 1396 static void 1397 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1398 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1399 { 1400 enum mlxsw_sp_span_type span_type; 1401 1402 span_type = mirror->ingress ? 1403 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1404 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1405 span_type, true); 1406 } 1407 1408 static int 1409 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1410 struct tc_cls_matchall_offload *cls, 1411 const struct flow_action_entry *act, 1412 bool ingress) 1413 { 1414 int err; 1415 1416 if (!mlxsw_sp_port->sample) 1417 return -EOPNOTSUPP; 1418 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1419 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1420 return -EEXIST; 1421 } 1422 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1423 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1424 return -EOPNOTSUPP; 1425 } 1426 1427 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1428 act->sample.psample_group); 1429 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1430 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1431 mlxsw_sp_port->sample->rate = act->sample.rate; 1432 1433 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1434 if (err) 1435 goto err_port_sample_set; 1436 return 0; 1437 1438 err_port_sample_set: 1439 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1440 return err; 1441 } 1442 1443 static void 1444 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1445 { 1446 if (!mlxsw_sp_port->sample) 1447 return; 1448 1449 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1450 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1451 } 1452 1453 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1454 struct tc_cls_matchall_offload *f, 1455 bool ingress) 1456 { 1457 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1458 __be16 protocol = f->common.protocol; 1459 struct flow_action_entry *act; 1460 int err; 1461 1462 if (!flow_offload_has_one_action(&f->rule->action)) { 1463 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1464 return -EOPNOTSUPP; 1465 } 1466 1467 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1468 if (!mall_tc_entry) 1469 return -ENOMEM; 1470 mall_tc_entry->cookie = f->cookie; 1471 1472 act = &f->rule->action.entries[0]; 1473 1474 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1475 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1476 1477 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1478 mirror = &mall_tc_entry->mirror; 1479 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1480 mirror, act, 1481 ingress); 1482 } else if (act->id == FLOW_ACTION_SAMPLE && 1483 protocol == htons(ETH_P_ALL)) { 1484 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1485 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1486 act, ingress); 1487 } else { 1488 err = -EOPNOTSUPP; 1489 } 1490 1491 if (err) 1492 goto err_add_action; 1493 1494 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1495 return 0; 1496 1497 err_add_action: 1498 kfree(mall_tc_entry); 1499 return err; 1500 } 1501 1502 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1503 struct tc_cls_matchall_offload *f) 1504 { 1505 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1506 1507 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1508 f->cookie); 1509 if (!mall_tc_entry) { 1510 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1511 return; 1512 } 1513 list_del(&mall_tc_entry->list); 1514 1515 switch (mall_tc_entry->type) { 1516 case MLXSW_SP_PORT_MALL_MIRROR: 1517 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1518 &mall_tc_entry->mirror); 1519 break; 1520 case MLXSW_SP_PORT_MALL_SAMPLE: 1521 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1522 break; 1523 default: 1524 WARN_ON(1); 1525 } 1526 1527 kfree(mall_tc_entry); 1528 } 1529 1530 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1531 struct tc_cls_matchall_offload *f, 1532 bool ingress) 1533 { 1534 switch (f->command) { 1535 case TC_CLSMATCHALL_REPLACE: 1536 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1537 ingress); 1538 case TC_CLSMATCHALL_DESTROY: 1539 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1540 return 0; 1541 default: 1542 return -EOPNOTSUPP; 1543 } 1544 } 1545 1546 static int 1547 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1548 struct flow_cls_offload *f) 1549 { 1550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1551 1552 switch (f->command) { 1553 case FLOW_CLS_REPLACE: 1554 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1555 case FLOW_CLS_DESTROY: 1556 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1557 return 0; 1558 case FLOW_CLS_STATS: 1559 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1560 case FLOW_CLS_TMPLT_CREATE: 1561 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1562 case FLOW_CLS_TMPLT_DESTROY: 1563 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1564 return 0; 1565 default: 1566 return -EOPNOTSUPP; 1567 } 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1571 void *type_data, 1572 void *cb_priv, bool ingress) 1573 { 1574 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1575 1576 switch (type) { 1577 case TC_SETUP_CLSMATCHALL: 1578 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1579 type_data)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1583 ingress); 1584 case TC_SETUP_CLSFLOWER: 1585 return 0; 1586 default: 1587 return -EOPNOTSUPP; 1588 } 1589 } 1590 1591 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1592 void *type_data, 1593 void *cb_priv) 1594 { 1595 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1596 cb_priv, true); 1597 } 1598 1599 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1600 void *type_data, 1601 void *cb_priv) 1602 { 1603 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1604 cb_priv, false); 1605 } 1606 1607 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1608 void *type_data, void *cb_priv) 1609 { 1610 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1611 1612 switch (type) { 1613 case TC_SETUP_CLSMATCHALL: 1614 return 0; 1615 case TC_SETUP_CLSFLOWER: 1616 if (mlxsw_sp_acl_block_disabled(acl_block)) 1617 return -EOPNOTSUPP; 1618 1619 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1620 default: 1621 return -EOPNOTSUPP; 1622 } 1623 } 1624 1625 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1626 { 1627 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1628 1629 mlxsw_sp_acl_block_destroy(acl_block); 1630 } 1631 1632 static LIST_HEAD(mlxsw_sp_block_cb_list); 1633 1634 static int 1635 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1636 struct flow_block_offload *f, bool ingress) 1637 { 1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1639 struct mlxsw_sp_acl_block *acl_block; 1640 struct flow_block_cb *block_cb; 1641 bool register_block = false; 1642 int err; 1643 1644 block_cb = flow_block_cb_lookup(f->block, 1645 mlxsw_sp_setup_tc_block_cb_flower, 1646 mlxsw_sp); 1647 if (!block_cb) { 1648 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1649 if (!acl_block) 1650 return -ENOMEM; 1651 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1652 mlxsw_sp, acl_block, 1653 mlxsw_sp_tc_block_flower_release); 1654 if (IS_ERR(block_cb)) { 1655 mlxsw_sp_acl_block_destroy(acl_block); 1656 err = PTR_ERR(block_cb); 1657 goto err_cb_register; 1658 } 1659 register_block = true; 1660 } else { 1661 acl_block = flow_block_cb_priv(block_cb); 1662 } 1663 flow_block_cb_incref(block_cb); 1664 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1665 mlxsw_sp_port, ingress, f->extack); 1666 if (err) 1667 goto err_block_bind; 1668 1669 if (ingress) 1670 mlxsw_sp_port->ing_acl_block = acl_block; 1671 else 1672 mlxsw_sp_port->eg_acl_block = acl_block; 1673 1674 if (register_block) { 1675 flow_block_cb_add(block_cb, f); 1676 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1677 } 1678 1679 return 0; 1680 1681 err_block_bind: 1682 if (!flow_block_cb_decref(block_cb)) 1683 flow_block_cb_free(block_cb); 1684 err_cb_register: 1685 return err; 1686 } 1687 1688 static void 1689 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1690 struct flow_block_offload *f, bool ingress) 1691 { 1692 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1693 struct mlxsw_sp_acl_block *acl_block; 1694 struct flow_block_cb *block_cb; 1695 int err; 1696 1697 block_cb = flow_block_cb_lookup(f->block, 1698 mlxsw_sp_setup_tc_block_cb_flower, 1699 mlxsw_sp); 1700 if (!block_cb) 1701 return; 1702 1703 if (ingress) 1704 mlxsw_sp_port->ing_acl_block = NULL; 1705 else 1706 mlxsw_sp_port->eg_acl_block = NULL; 1707 1708 acl_block = flow_block_cb_priv(block_cb); 1709 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1710 mlxsw_sp_port, ingress); 1711 if (!err && !flow_block_cb_decref(block_cb)) { 1712 flow_block_cb_remove(block_cb, f); 1713 list_del(&block_cb->driver_list); 1714 } 1715 } 1716 1717 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1718 struct flow_block_offload *f) 1719 { 1720 struct flow_block_cb *block_cb; 1721 flow_setup_cb_t *cb; 1722 bool ingress; 1723 int err; 1724 1725 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1726 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1727 ingress = true; 1728 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1729 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1730 ingress = false; 1731 } else { 1732 return -EOPNOTSUPP; 1733 } 1734 1735 f->driver_block_list = &mlxsw_sp_block_cb_list; 1736 1737 switch (f->command) { 1738 case FLOW_BLOCK_BIND: 1739 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1740 &mlxsw_sp_block_cb_list)) 1741 return -EBUSY; 1742 1743 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1744 mlxsw_sp_port, NULL); 1745 if (IS_ERR(block_cb)) 1746 return PTR_ERR(block_cb); 1747 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1748 ingress); 1749 if (err) { 1750 flow_block_cb_free(block_cb); 1751 return err; 1752 } 1753 flow_block_cb_add(block_cb, f); 1754 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1755 return 0; 1756 case FLOW_BLOCK_UNBIND: 1757 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1758 f, ingress); 1759 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1760 if (!block_cb) 1761 return -ENOENT; 1762 1763 flow_block_cb_remove(block_cb, f); 1764 list_del(&block_cb->driver_list); 1765 return 0; 1766 default: 1767 return -EOPNOTSUPP; 1768 } 1769 } 1770 1771 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1772 void *type_data) 1773 { 1774 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1775 1776 switch (type) { 1777 case TC_SETUP_BLOCK: 1778 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1779 case TC_SETUP_QDISC_RED: 1780 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1781 case TC_SETUP_QDISC_PRIO: 1782 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1783 case TC_SETUP_QDISC_ETS: 1784 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1785 case TC_SETUP_QDISC_TBF: 1786 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1787 case TC_SETUP_QDISC_FIFO: 1788 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1789 default: 1790 return -EOPNOTSUPP; 1791 } 1792 } 1793 1794 1795 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1796 { 1797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1798 1799 if (!enable) { 1800 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1801 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1802 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1803 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1804 return -EINVAL; 1805 } 1806 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1807 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1808 } else { 1809 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1810 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1811 } 1812 return 0; 1813 } 1814 1815 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1816 { 1817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1818 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1819 int err; 1820 1821 if (netif_running(dev)) 1822 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1823 1824 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1825 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1826 pplr_pl); 1827 1828 if (netif_running(dev)) 1829 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1830 1831 return err; 1832 } 1833 1834 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1835 1836 static int mlxsw_sp_handle_feature(struct net_device *dev, 1837 netdev_features_t wanted_features, 1838 netdev_features_t feature, 1839 mlxsw_sp_feature_handler feature_handler) 1840 { 1841 netdev_features_t changes = wanted_features ^ dev->features; 1842 bool enable = !!(wanted_features & feature); 1843 int err; 1844 1845 if (!(changes & feature)) 1846 return 0; 1847 1848 err = feature_handler(dev, enable); 1849 if (err) { 1850 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1851 enable ? "Enable" : "Disable", &feature, err); 1852 return err; 1853 } 1854 1855 if (enable) 1856 dev->features |= feature; 1857 else 1858 dev->features &= ~feature; 1859 1860 return 0; 1861 } 1862 static int mlxsw_sp_set_features(struct net_device *dev, 1863 netdev_features_t features) 1864 { 1865 netdev_features_t oper_features = dev->features; 1866 int err = 0; 1867 1868 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1869 mlxsw_sp_feature_hw_tc); 1870 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1871 mlxsw_sp_feature_loopback); 1872 1873 if (err) { 1874 dev->features = oper_features; 1875 return -EINVAL; 1876 } 1877 1878 return 0; 1879 } 1880 1881 static struct devlink_port * 1882 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1883 { 1884 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1886 1887 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1888 mlxsw_sp_port->local_port); 1889 } 1890 1891 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1892 struct ifreq *ifr) 1893 { 1894 struct hwtstamp_config config; 1895 int err; 1896 1897 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1898 return -EFAULT; 1899 1900 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1901 &config); 1902 if (err) 1903 return err; 1904 1905 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1906 return -EFAULT; 1907 1908 return 0; 1909 } 1910 1911 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1912 struct ifreq *ifr) 1913 { 1914 struct hwtstamp_config config; 1915 int err; 1916 1917 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1918 &config); 1919 if (err) 1920 return err; 1921 1922 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1923 return -EFAULT; 1924 1925 return 0; 1926 } 1927 1928 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1929 { 1930 struct hwtstamp_config config = {0}; 1931 1932 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1933 } 1934 1935 static int 1936 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1937 { 1938 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1939 1940 switch (cmd) { 1941 case SIOCSHWTSTAMP: 1942 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1943 case SIOCGHWTSTAMP: 1944 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1945 default: 1946 return -EOPNOTSUPP; 1947 } 1948 } 1949 1950 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1951 .ndo_open = mlxsw_sp_port_open, 1952 .ndo_stop = mlxsw_sp_port_stop, 1953 .ndo_start_xmit = mlxsw_sp_port_xmit, 1954 .ndo_setup_tc = mlxsw_sp_setup_tc, 1955 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1956 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1957 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1958 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1959 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1960 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1961 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1962 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1963 .ndo_set_features = mlxsw_sp_set_features, 1964 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1965 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1966 }; 1967 1968 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1969 struct ethtool_drvinfo *drvinfo) 1970 { 1971 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1972 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1973 1974 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1975 sizeof(drvinfo->driver)); 1976 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1977 sizeof(drvinfo->version)); 1978 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1979 "%d.%d.%d", 1980 mlxsw_sp->bus_info->fw_rev.major, 1981 mlxsw_sp->bus_info->fw_rev.minor, 1982 mlxsw_sp->bus_info->fw_rev.subminor); 1983 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1984 sizeof(drvinfo->bus_info)); 1985 } 1986 1987 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1988 struct ethtool_pauseparam *pause) 1989 { 1990 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1991 1992 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1993 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1994 } 1995 1996 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1997 struct ethtool_pauseparam *pause) 1998 { 1999 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2000 2001 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2002 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2003 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2004 2005 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2006 pfcc_pl); 2007 } 2008 2009 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2010 struct ethtool_pauseparam *pause) 2011 { 2012 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2013 bool pause_en = pause->tx_pause || pause->rx_pause; 2014 int err; 2015 2016 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2017 netdev_err(dev, "PFC already enabled on port\n"); 2018 return -EINVAL; 2019 } 2020 2021 if (pause->autoneg) { 2022 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2023 return -EINVAL; 2024 } 2025 2026 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2027 if (err) { 2028 netdev_err(dev, "Failed to configure port's headroom\n"); 2029 return err; 2030 } 2031 2032 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2033 if (err) { 2034 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2035 goto err_port_pause_configure; 2036 } 2037 2038 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2039 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2040 2041 return 0; 2042 2043 err_port_pause_configure: 2044 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2045 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2046 return err; 2047 } 2048 2049 struct mlxsw_sp_port_hw_stats { 2050 char str[ETH_GSTRING_LEN]; 2051 u64 (*getter)(const char *payload); 2052 bool cells_bytes; 2053 }; 2054 2055 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2056 { 2057 .str = "a_frames_transmitted_ok", 2058 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2059 }, 2060 { 2061 .str = "a_frames_received_ok", 2062 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2063 }, 2064 { 2065 .str = "a_frame_check_sequence_errors", 2066 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2067 }, 2068 { 2069 .str = "a_alignment_errors", 2070 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2071 }, 2072 { 2073 .str = "a_octets_transmitted_ok", 2074 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2075 }, 2076 { 2077 .str = "a_octets_received_ok", 2078 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2079 }, 2080 { 2081 .str = "a_multicast_frames_xmitted_ok", 2082 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2083 }, 2084 { 2085 .str = "a_broadcast_frames_xmitted_ok", 2086 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2087 }, 2088 { 2089 .str = "a_multicast_frames_received_ok", 2090 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2091 }, 2092 { 2093 .str = "a_broadcast_frames_received_ok", 2094 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2095 }, 2096 { 2097 .str = "a_in_range_length_errors", 2098 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2099 }, 2100 { 2101 .str = "a_out_of_range_length_field", 2102 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2103 }, 2104 { 2105 .str = "a_frame_too_long_errors", 2106 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2107 }, 2108 { 2109 .str = "a_symbol_error_during_carrier", 2110 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2111 }, 2112 { 2113 .str = "a_mac_control_frames_transmitted", 2114 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2115 }, 2116 { 2117 .str = "a_mac_control_frames_received", 2118 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2119 }, 2120 { 2121 .str = "a_unsupported_opcodes_received", 2122 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2123 }, 2124 { 2125 .str = "a_pause_mac_ctrl_frames_received", 2126 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2127 }, 2128 { 2129 .str = "a_pause_mac_ctrl_frames_xmitted", 2130 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2131 }, 2132 }; 2133 2134 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2135 2136 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2137 { 2138 .str = "if_in_discards", 2139 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2140 }, 2141 { 2142 .str = "if_out_discards", 2143 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2144 }, 2145 { 2146 .str = "if_out_errors", 2147 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2148 }, 2149 }; 2150 2151 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2152 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2153 2154 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2155 { 2156 .str = "ether_stats_undersize_pkts", 2157 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2158 }, 2159 { 2160 .str = "ether_stats_oversize_pkts", 2161 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2162 }, 2163 { 2164 .str = "ether_stats_fragments", 2165 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2166 }, 2167 { 2168 .str = "ether_pkts64octets", 2169 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2170 }, 2171 { 2172 .str = "ether_pkts65to127octets", 2173 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2174 }, 2175 { 2176 .str = "ether_pkts128to255octets", 2177 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2178 }, 2179 { 2180 .str = "ether_pkts256to511octets", 2181 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2182 }, 2183 { 2184 .str = "ether_pkts512to1023octets", 2185 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2186 }, 2187 { 2188 .str = "ether_pkts1024to1518octets", 2189 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2190 }, 2191 { 2192 .str = "ether_pkts1519to2047octets", 2193 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2194 }, 2195 { 2196 .str = "ether_pkts2048to4095octets", 2197 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2198 }, 2199 { 2200 .str = "ether_pkts4096to8191octets", 2201 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2202 }, 2203 { 2204 .str = "ether_pkts8192to10239octets", 2205 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2206 }, 2207 }; 2208 2209 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2210 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2211 2212 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2213 { 2214 .str = "dot3stats_fcs_errors", 2215 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2216 }, 2217 { 2218 .str = "dot3stats_symbol_errors", 2219 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2220 }, 2221 { 2222 .str = "dot3control_in_unknown_opcodes", 2223 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2224 }, 2225 { 2226 .str = "dot3in_pause_frames", 2227 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2228 }, 2229 }; 2230 2231 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2232 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2233 2234 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { 2235 { 2236 .str = "ecn_marked", 2237 .getter = mlxsw_reg_ppcnt_ecn_marked_get, 2238 }, 2239 }; 2240 2241 #define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) 2242 2243 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2244 { 2245 .str = "discard_ingress_general", 2246 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2247 }, 2248 { 2249 .str = "discard_ingress_policy_engine", 2250 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2251 }, 2252 { 2253 .str = "discard_ingress_vlan_membership", 2254 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2255 }, 2256 { 2257 .str = "discard_ingress_tag_frame_type", 2258 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2259 }, 2260 { 2261 .str = "discard_egress_vlan_membership", 2262 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2263 }, 2264 { 2265 .str = "discard_loopback_filter", 2266 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2267 }, 2268 { 2269 .str = "discard_egress_general", 2270 .getter = mlxsw_reg_ppcnt_egress_general_get, 2271 }, 2272 { 2273 .str = "discard_egress_hoq", 2274 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2275 }, 2276 { 2277 .str = "discard_egress_policy_engine", 2278 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2279 }, 2280 { 2281 .str = "discard_ingress_tx_link_down", 2282 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2283 }, 2284 { 2285 .str = "discard_egress_stp_filter", 2286 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2287 }, 2288 { 2289 .str = "discard_egress_sll", 2290 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2291 }, 2292 }; 2293 2294 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2295 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2296 2297 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2298 { 2299 .str = "rx_octets_prio", 2300 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2301 }, 2302 { 2303 .str = "rx_frames_prio", 2304 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2305 }, 2306 { 2307 .str = "tx_octets_prio", 2308 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2309 }, 2310 { 2311 .str = "tx_frames_prio", 2312 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2313 }, 2314 { 2315 .str = "rx_pause_prio", 2316 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2317 }, 2318 { 2319 .str = "rx_pause_duration_prio", 2320 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2321 }, 2322 { 2323 .str = "tx_pause_prio", 2324 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2325 }, 2326 { 2327 .str = "tx_pause_duration_prio", 2328 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2329 }, 2330 }; 2331 2332 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2333 2334 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2335 { 2336 .str = "tc_transmit_queue_tc", 2337 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2338 .cells_bytes = true, 2339 }, 2340 { 2341 .str = "tc_no_buffer_discard_uc_tc", 2342 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2343 }, 2344 }; 2345 2346 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2347 2348 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2349 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2351 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2354 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2355 IEEE_8021QAZ_MAX_TCS) + \ 2356 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2357 TC_MAX_QUEUE)) 2358 2359 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2360 { 2361 int i; 2362 2363 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2364 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2365 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2366 *p += ETH_GSTRING_LEN; 2367 } 2368 } 2369 2370 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2371 { 2372 int i; 2373 2374 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2375 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2376 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2377 *p += ETH_GSTRING_LEN; 2378 } 2379 } 2380 2381 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2382 u32 stringset, u8 *data) 2383 { 2384 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2385 u8 *p = data; 2386 int i; 2387 2388 switch (stringset) { 2389 case ETH_SS_STATS: 2390 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2391 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2392 ETH_GSTRING_LEN); 2393 p += ETH_GSTRING_LEN; 2394 } 2395 2396 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2397 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2398 ETH_GSTRING_LEN); 2399 p += ETH_GSTRING_LEN; 2400 } 2401 2402 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2403 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2404 ETH_GSTRING_LEN); 2405 p += ETH_GSTRING_LEN; 2406 } 2407 2408 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2409 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2410 ETH_GSTRING_LEN); 2411 p += ETH_GSTRING_LEN; 2412 } 2413 2414 for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { 2415 memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, 2416 ETH_GSTRING_LEN); 2417 p += ETH_GSTRING_LEN; 2418 } 2419 2420 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2421 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2422 ETH_GSTRING_LEN); 2423 p += ETH_GSTRING_LEN; 2424 } 2425 2426 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2427 mlxsw_sp_port_get_prio_strings(&p, i); 2428 2429 for (i = 0; i < TC_MAX_QUEUE; i++) 2430 mlxsw_sp_port_get_tc_strings(&p, i); 2431 2432 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2433 break; 2434 } 2435 } 2436 2437 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2438 enum ethtool_phys_id_state state) 2439 { 2440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2442 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2443 bool active; 2444 2445 switch (state) { 2446 case ETHTOOL_ID_ACTIVE: 2447 active = true; 2448 break; 2449 case ETHTOOL_ID_INACTIVE: 2450 active = false; 2451 break; 2452 default: 2453 return -EOPNOTSUPP; 2454 } 2455 2456 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2457 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2458 } 2459 2460 static int 2461 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2462 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2463 { 2464 switch (grp) { 2465 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2466 *p_hw_stats = mlxsw_sp_port_hw_stats; 2467 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2468 break; 2469 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2470 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2471 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2472 break; 2473 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2474 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2475 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2476 break; 2477 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2478 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2479 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2480 break; 2481 case MLXSW_REG_PPCNT_EXT_CNT: 2482 *p_hw_stats = mlxsw_sp_port_hw_ext_stats; 2483 *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2484 break; 2485 case MLXSW_REG_PPCNT_DISCARD_CNT: 2486 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2487 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2488 break; 2489 case MLXSW_REG_PPCNT_PRIO_CNT: 2490 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2491 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2492 break; 2493 case MLXSW_REG_PPCNT_TC_CNT: 2494 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2495 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2496 break; 2497 default: 2498 WARN_ON(1); 2499 return -EOPNOTSUPP; 2500 } 2501 return 0; 2502 } 2503 2504 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2505 enum mlxsw_reg_ppcnt_grp grp, int prio, 2506 u64 *data, int data_index) 2507 { 2508 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2509 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2510 struct mlxsw_sp_port_hw_stats *hw_stats; 2511 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2512 int i, len; 2513 int err; 2514 2515 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2516 if (err) 2517 return; 2518 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2519 for (i = 0; i < len; i++) { 2520 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2521 if (!hw_stats[i].cells_bytes) 2522 continue; 2523 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2524 data[data_index + i]); 2525 } 2526 } 2527 2528 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2529 struct ethtool_stats *stats, u64 *data) 2530 { 2531 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2532 int i, data_index = 0; 2533 2534 /* IEEE 802.3 Counters */ 2535 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2536 data, data_index); 2537 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2538 2539 /* RFC 2863 Counters */ 2540 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2541 data, data_index); 2542 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2543 2544 /* RFC 2819 Counters */ 2545 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2546 data, data_index); 2547 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2548 2549 /* RFC 3635 Counters */ 2550 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2551 data, data_index); 2552 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2553 2554 /* Extended Counters */ 2555 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 2556 data, data_index); 2557 data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2558 2559 /* Discard Counters */ 2560 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2561 data, data_index); 2562 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2563 2564 /* Per-Priority Counters */ 2565 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2566 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2567 data, data_index); 2568 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2569 } 2570 2571 /* Per-TC Counters */ 2572 for (i = 0; i < TC_MAX_QUEUE; i++) { 2573 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2574 data, data_index); 2575 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2576 } 2577 2578 /* PTP counters */ 2579 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2580 data, data_index); 2581 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2582 } 2583 2584 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2585 { 2586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2587 2588 switch (sset) { 2589 case ETH_SS_STATS: 2590 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2591 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2592 default: 2593 return -EOPNOTSUPP; 2594 } 2595 } 2596 2597 struct mlxsw_sp1_port_link_mode { 2598 enum ethtool_link_mode_bit_indices mask_ethtool; 2599 u32 mask; 2600 u32 speed; 2601 }; 2602 2603 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2604 { 2605 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2606 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2607 .speed = SPEED_100, 2608 }, 2609 { 2610 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2611 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2612 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2613 .speed = SPEED_1000, 2614 }, 2615 { 2616 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2617 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2618 .speed = SPEED_10000, 2619 }, 2620 { 2621 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2622 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2624 .speed = SPEED_10000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2628 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2629 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2630 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2631 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2632 .speed = SPEED_10000, 2633 }, 2634 { 2635 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2636 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2637 .speed = SPEED_20000, 2638 }, 2639 { 2640 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2641 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2642 .speed = SPEED_40000, 2643 }, 2644 { 2645 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2646 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2647 .speed = SPEED_40000, 2648 }, 2649 { 2650 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2651 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2652 .speed = SPEED_40000, 2653 }, 2654 { 2655 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2656 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2657 .speed = SPEED_40000, 2658 }, 2659 { 2660 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2661 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2662 .speed = SPEED_25000, 2663 }, 2664 { 2665 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2666 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2667 .speed = SPEED_25000, 2668 }, 2669 { 2670 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2671 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2672 .speed = SPEED_25000, 2673 }, 2674 { 2675 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2676 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2677 .speed = SPEED_50000, 2678 }, 2679 { 2680 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2681 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2682 .speed = SPEED_50000, 2683 }, 2684 { 2685 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2686 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2687 .speed = SPEED_50000, 2688 }, 2689 { 2690 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2691 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2692 .speed = SPEED_100000, 2693 }, 2694 { 2695 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2696 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2697 .speed = SPEED_100000, 2698 }, 2699 { 2700 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2701 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2702 .speed = SPEED_100000, 2703 }, 2704 { 2705 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2706 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2707 .speed = SPEED_100000, 2708 }, 2709 }; 2710 2711 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2712 2713 static void 2714 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2715 u32 ptys_eth_proto, 2716 struct ethtool_link_ksettings *cmd) 2717 { 2718 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2719 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2720 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2721 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2722 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2723 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2724 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2725 2726 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2727 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2728 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2729 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2730 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2731 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2732 } 2733 2734 static void 2735 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2736 u8 width, unsigned long *mode) 2737 { 2738 int i; 2739 2740 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2741 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2742 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2743 mode); 2744 } 2745 } 2746 2747 static u32 2748 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2749 { 2750 int i; 2751 2752 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2753 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2754 return mlxsw_sp1_port_link_mode[i].speed; 2755 } 2756 2757 return SPEED_UNKNOWN; 2758 } 2759 2760 static void 2761 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2762 u32 ptys_eth_proto, 2763 struct ethtool_link_ksettings *cmd) 2764 { 2765 cmd->base.speed = SPEED_UNKNOWN; 2766 cmd->base.duplex = DUPLEX_UNKNOWN; 2767 2768 if (!carrier_ok) 2769 return; 2770 2771 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2772 if (cmd->base.speed != SPEED_UNKNOWN) 2773 cmd->base.duplex = DUPLEX_FULL; 2774 } 2775 2776 static u32 2777 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2778 const struct ethtool_link_ksettings *cmd) 2779 { 2780 u32 ptys_proto = 0; 2781 int i; 2782 2783 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2784 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2785 cmd->link_modes.advertising)) 2786 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2787 } 2788 return ptys_proto; 2789 } 2790 2791 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2792 u32 speed) 2793 { 2794 u32 ptys_proto = 0; 2795 int i; 2796 2797 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2798 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2799 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2800 } 2801 return ptys_proto; 2802 } 2803 2804 static void 2805 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2806 u8 local_port, u32 proto_admin, bool autoneg) 2807 { 2808 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2809 } 2810 2811 static void 2812 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2813 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2814 u32 *p_eth_proto_oper) 2815 { 2816 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2817 p_eth_proto_oper); 2818 } 2819 2820 static const struct mlxsw_sp_port_type_speed_ops 2821 mlxsw_sp1_port_type_speed_ops = { 2822 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2823 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2824 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2825 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2826 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2827 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2828 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2829 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2830 }; 2831 2832 static const enum ethtool_link_mode_bit_indices 2833 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2834 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2835 }; 2836 2837 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2838 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2839 2840 static const enum ethtool_link_mode_bit_indices 2841 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2842 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2843 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2844 }; 2845 2846 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2847 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2848 2849 static const enum ethtool_link_mode_bit_indices 2850 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2851 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2852 }; 2853 2854 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2855 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2856 2857 static const enum ethtool_link_mode_bit_indices 2858 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2859 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2860 }; 2861 2862 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2863 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2864 2865 static const enum ethtool_link_mode_bit_indices 2866 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2867 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2868 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2869 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2870 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2871 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2872 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2873 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2874 }; 2875 2876 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2877 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2878 2879 static const enum ethtool_link_mode_bit_indices 2880 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2881 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2882 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2883 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2884 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2885 }; 2886 2887 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2888 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2889 2890 static const enum ethtool_link_mode_bit_indices 2891 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2892 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2893 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2894 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2895 }; 2896 2897 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2898 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2899 2900 static const enum ethtool_link_mode_bit_indices 2901 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2902 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2903 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2904 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2905 }; 2906 2907 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2908 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2909 2910 static const enum ethtool_link_mode_bit_indices 2911 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2912 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2913 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2914 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2915 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2916 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2917 }; 2918 2919 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2920 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2921 2922 static const enum ethtool_link_mode_bit_indices 2923 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2924 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2925 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2926 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2927 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2928 }; 2929 2930 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2931 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2932 2933 static const enum ethtool_link_mode_bit_indices 2934 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2935 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2936 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2937 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2938 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2939 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2940 }; 2941 2942 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2943 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2944 2945 static const enum ethtool_link_mode_bit_indices 2946 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2947 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2948 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2949 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2950 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2951 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2952 }; 2953 2954 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2955 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2956 2957 static const enum ethtool_link_mode_bit_indices 2958 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2959 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2960 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2961 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2962 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2963 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2964 }; 2965 2966 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2967 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2968 2969 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2970 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2971 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2972 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2973 2974 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2975 { 2976 switch (width) { 2977 case 1: 2978 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2979 case 2: 2980 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2981 case 4: 2982 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2983 case 8: 2984 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2985 default: 2986 WARN_ON_ONCE(1); 2987 return 0; 2988 } 2989 } 2990 2991 struct mlxsw_sp2_port_link_mode { 2992 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2993 int m_ethtool_len; 2994 u32 mask; 2995 u32 speed; 2996 u8 mask_width; 2997 }; 2998 2999 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3000 { 3001 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3002 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3003 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3004 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3005 MLXSW_SP_PORT_MASK_WIDTH_2X | 3006 MLXSW_SP_PORT_MASK_WIDTH_4X | 3007 MLXSW_SP_PORT_MASK_WIDTH_8X, 3008 .speed = SPEED_100, 3009 }, 3010 { 3011 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3012 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3013 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3014 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3015 MLXSW_SP_PORT_MASK_WIDTH_2X | 3016 MLXSW_SP_PORT_MASK_WIDTH_4X | 3017 MLXSW_SP_PORT_MASK_WIDTH_8X, 3018 .speed = SPEED_1000, 3019 }, 3020 { 3021 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3022 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3023 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3024 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3025 MLXSW_SP_PORT_MASK_WIDTH_2X | 3026 MLXSW_SP_PORT_MASK_WIDTH_4X | 3027 MLXSW_SP_PORT_MASK_WIDTH_8X, 3028 .speed = SPEED_2500, 3029 }, 3030 { 3031 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3032 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3033 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3034 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3035 MLXSW_SP_PORT_MASK_WIDTH_2X | 3036 MLXSW_SP_PORT_MASK_WIDTH_4X | 3037 MLXSW_SP_PORT_MASK_WIDTH_8X, 3038 .speed = SPEED_5000, 3039 }, 3040 { 3041 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3042 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3043 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3044 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3045 MLXSW_SP_PORT_MASK_WIDTH_2X | 3046 MLXSW_SP_PORT_MASK_WIDTH_4X | 3047 MLXSW_SP_PORT_MASK_WIDTH_8X, 3048 .speed = SPEED_10000, 3049 }, 3050 { 3051 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3052 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3053 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3054 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3055 MLXSW_SP_PORT_MASK_WIDTH_8X, 3056 .speed = SPEED_40000, 3057 }, 3058 { 3059 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3060 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3061 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3062 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3063 MLXSW_SP_PORT_MASK_WIDTH_2X | 3064 MLXSW_SP_PORT_MASK_WIDTH_4X | 3065 MLXSW_SP_PORT_MASK_WIDTH_8X, 3066 .speed = SPEED_25000, 3067 }, 3068 { 3069 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3070 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3071 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3072 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3073 MLXSW_SP_PORT_MASK_WIDTH_4X | 3074 MLXSW_SP_PORT_MASK_WIDTH_8X, 3075 .speed = SPEED_50000, 3076 }, 3077 { 3078 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3079 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3080 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3081 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3082 .speed = SPEED_50000, 3083 }, 3084 { 3085 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3086 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3087 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3088 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3089 MLXSW_SP_PORT_MASK_WIDTH_8X, 3090 .speed = SPEED_100000, 3091 }, 3092 { 3093 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3094 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3095 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3096 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3097 .speed = SPEED_100000, 3098 }, 3099 { 3100 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3101 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3102 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3103 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3104 MLXSW_SP_PORT_MASK_WIDTH_8X, 3105 .speed = SPEED_200000, 3106 }, 3107 { 3108 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3109 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3110 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3111 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3112 .speed = SPEED_400000, 3113 }, 3114 }; 3115 3116 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3117 3118 static void 3119 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3120 u32 ptys_eth_proto, 3121 struct ethtool_link_ksettings *cmd) 3122 { 3123 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3124 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3125 } 3126 3127 static void 3128 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3129 unsigned long *mode) 3130 { 3131 int i; 3132 3133 for (i = 0; i < link_mode->m_ethtool_len; i++) 3134 __set_bit(link_mode->mask_ethtool[i], mode); 3135 } 3136 3137 static void 3138 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3139 u8 width, unsigned long *mode) 3140 { 3141 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3142 int i; 3143 3144 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3145 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3146 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3147 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3148 mode); 3149 } 3150 } 3151 3152 static u32 3153 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3154 { 3155 int i; 3156 3157 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3158 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3159 return mlxsw_sp2_port_link_mode[i].speed; 3160 } 3161 3162 return SPEED_UNKNOWN; 3163 } 3164 3165 static void 3166 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3167 u32 ptys_eth_proto, 3168 struct ethtool_link_ksettings *cmd) 3169 { 3170 cmd->base.speed = SPEED_UNKNOWN; 3171 cmd->base.duplex = DUPLEX_UNKNOWN; 3172 3173 if (!carrier_ok) 3174 return; 3175 3176 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3177 if (cmd->base.speed != SPEED_UNKNOWN) 3178 cmd->base.duplex = DUPLEX_FULL; 3179 } 3180 3181 static bool 3182 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3183 const unsigned long *mode) 3184 { 3185 int cnt = 0; 3186 int i; 3187 3188 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3189 if (test_bit(link_mode->mask_ethtool[i], mode)) 3190 cnt++; 3191 } 3192 3193 return cnt == link_mode->m_ethtool_len; 3194 } 3195 3196 static u32 3197 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3198 const struct ethtool_link_ksettings *cmd) 3199 { 3200 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3201 u32 ptys_proto = 0; 3202 int i; 3203 3204 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3205 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3206 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3207 cmd->link_modes.advertising)) 3208 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3209 } 3210 return ptys_proto; 3211 } 3212 3213 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3214 u8 width, u32 speed) 3215 { 3216 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3217 u32 ptys_proto = 0; 3218 int i; 3219 3220 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3221 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3222 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3223 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3224 } 3225 return ptys_proto; 3226 } 3227 3228 static void 3229 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3230 u8 local_port, u32 proto_admin, 3231 bool autoneg) 3232 { 3233 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3234 } 3235 3236 static void 3237 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3238 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3239 u32 *p_eth_proto_oper) 3240 { 3241 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3242 p_eth_proto_admin, p_eth_proto_oper); 3243 } 3244 3245 static const struct mlxsw_sp_port_type_speed_ops 3246 mlxsw_sp2_port_type_speed_ops = { 3247 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3248 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3249 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3250 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3251 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3252 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3253 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3254 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3255 }; 3256 3257 static void 3258 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3259 u8 width, struct ethtool_link_ksettings *cmd) 3260 { 3261 const struct mlxsw_sp_port_type_speed_ops *ops; 3262 3263 ops = mlxsw_sp->port_type_speed_ops; 3264 3265 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3266 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3267 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3268 3269 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3270 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3271 cmd->link_modes.supported); 3272 } 3273 3274 static void 3275 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3276 u32 eth_proto_admin, bool autoneg, u8 width, 3277 struct ethtool_link_ksettings *cmd) 3278 { 3279 const struct mlxsw_sp_port_type_speed_ops *ops; 3280 3281 ops = mlxsw_sp->port_type_speed_ops; 3282 3283 if (!autoneg) 3284 return; 3285 3286 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3287 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3288 cmd->link_modes.advertising); 3289 } 3290 3291 static u8 3292 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3293 { 3294 switch (connector_type) { 3295 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3296 return PORT_OTHER; 3297 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3298 return PORT_NONE; 3299 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3300 return PORT_TP; 3301 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3302 return PORT_AUI; 3303 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3304 return PORT_BNC; 3305 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3306 return PORT_MII; 3307 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3308 return PORT_FIBRE; 3309 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3310 return PORT_DA; 3311 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3312 return PORT_OTHER; 3313 default: 3314 WARN_ON_ONCE(1); 3315 return PORT_OTHER; 3316 } 3317 } 3318 3319 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3320 struct ethtool_link_ksettings *cmd) 3321 { 3322 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3323 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3325 const struct mlxsw_sp_port_type_speed_ops *ops; 3326 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3327 u8 connector_type; 3328 bool autoneg; 3329 int err; 3330 3331 ops = mlxsw_sp->port_type_speed_ops; 3332 3333 autoneg = mlxsw_sp_port->link.autoneg; 3334 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3335 0, false); 3336 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3337 if (err) 3338 return err; 3339 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3340 ð_proto_admin, ð_proto_oper); 3341 3342 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3343 mlxsw_sp_port->mapping.width, cmd); 3344 3345 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3346 mlxsw_sp_port->mapping.width, cmd); 3347 3348 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3349 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3350 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3351 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3352 eth_proto_oper, cmd); 3353 3354 return 0; 3355 } 3356 3357 static int 3358 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3359 const struct ethtool_link_ksettings *cmd) 3360 { 3361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3362 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3363 const struct mlxsw_sp_port_type_speed_ops *ops; 3364 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3365 u32 eth_proto_cap, eth_proto_new; 3366 bool autoneg; 3367 int err; 3368 3369 ops = mlxsw_sp->port_type_speed_ops; 3370 3371 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3372 0, false); 3373 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3374 if (err) 3375 return err; 3376 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3377 3378 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3379 eth_proto_new = autoneg ? 3380 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3381 cmd) : 3382 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3383 cmd->base.speed); 3384 3385 eth_proto_new = eth_proto_new & eth_proto_cap; 3386 if (!eth_proto_new) { 3387 netdev_err(dev, "No supported speed requested\n"); 3388 return -EINVAL; 3389 } 3390 3391 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3392 eth_proto_new, autoneg); 3393 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3394 if (err) 3395 return err; 3396 3397 mlxsw_sp_port->link.autoneg = autoneg; 3398 3399 if (!netif_running(dev)) 3400 return 0; 3401 3402 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3403 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3404 3405 return 0; 3406 } 3407 3408 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3409 struct ethtool_modinfo *modinfo) 3410 { 3411 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3413 int err; 3414 3415 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3416 mlxsw_sp_port->mapping.module, 3417 modinfo); 3418 3419 return err; 3420 } 3421 3422 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3423 struct ethtool_eeprom *ee, 3424 u8 *data) 3425 { 3426 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3428 int err; 3429 3430 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3431 mlxsw_sp_port->mapping.module, ee, 3432 data); 3433 3434 return err; 3435 } 3436 3437 static int 3438 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3439 { 3440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3442 3443 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3444 } 3445 3446 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3447 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3448 .get_link = ethtool_op_get_link, 3449 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3450 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3451 .get_strings = mlxsw_sp_port_get_strings, 3452 .set_phys_id = mlxsw_sp_port_set_phys_id, 3453 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3454 .get_sset_count = mlxsw_sp_port_get_sset_count, 3455 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3456 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3457 .get_module_info = mlxsw_sp_get_module_info, 3458 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3459 .get_ts_info = mlxsw_sp_get_ts_info, 3460 }; 3461 3462 static int 3463 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3464 { 3465 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3466 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3467 const struct mlxsw_sp_port_type_speed_ops *ops; 3468 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3469 int err; 3470 3471 ops = mlxsw_sp->port_type_speed_ops; 3472 3473 /* Set advertised speeds to supported speeds. */ 3474 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3475 0, false); 3476 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3477 if (err) 3478 return err; 3479 3480 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3481 ð_proto_admin, ð_proto_oper); 3482 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3483 eth_proto_cap, mlxsw_sp_port->link.autoneg); 3484 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3485 } 3486 3487 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3488 { 3489 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3491 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3492 u32 eth_proto_oper; 3493 int err; 3494 3495 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3496 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3497 mlxsw_sp_port->local_port, 0, 3498 false); 3499 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3500 if (err) 3501 return err; 3502 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3503 ð_proto_oper); 3504 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3505 return 0; 3506 } 3507 3508 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3509 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3510 bool dwrr, u8 dwrr_weight) 3511 { 3512 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3513 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3514 3515 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3516 next_index); 3517 mlxsw_reg_qeec_de_set(qeec_pl, true); 3518 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3519 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3521 } 3522 3523 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3524 enum mlxsw_reg_qeec_hr hr, u8 index, 3525 u8 next_index, u32 maxrate, u8 burst_size) 3526 { 3527 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3528 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3529 3530 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3531 next_index); 3532 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3533 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3534 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3535 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3536 } 3537 3538 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3539 enum mlxsw_reg_qeec_hr hr, u8 index, 3540 u8 next_index, u32 minrate) 3541 { 3542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3543 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3544 3545 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3546 next_index); 3547 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3548 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3549 3550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3551 } 3552 3553 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3554 u8 switch_prio, u8 tclass) 3555 { 3556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3557 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3558 3559 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3560 tclass); 3561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3562 } 3563 3564 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3565 { 3566 int err, i; 3567 3568 /* Setup the elements hierarcy, so that each TC is linked to 3569 * one subgroup, which are all member in the same group. 3570 */ 3571 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3572 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3573 if (err) 3574 return err; 3575 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3576 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3577 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3578 0, false, 0); 3579 if (err) 3580 return err; 3581 } 3582 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3583 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3584 MLXSW_REG_QEEC_HR_TC, i, i, 3585 false, 0); 3586 if (err) 3587 return err; 3588 3589 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3590 MLXSW_REG_QEEC_HR_TC, 3591 i + 8, i, 3592 true, 100); 3593 if (err) 3594 return err; 3595 } 3596 3597 /* Make sure the max shaper is disabled in all hierarchies that support 3598 * it. Note that this disables ptps (PTP shaper), but that is intended 3599 * for the initial configuration. 3600 */ 3601 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3602 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3603 MLXSW_REG_QEEC_MAS_DIS, 0); 3604 if (err) 3605 return err; 3606 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3607 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3608 MLXSW_REG_QEEC_HR_SUBGROUP, 3609 i, 0, 3610 MLXSW_REG_QEEC_MAS_DIS, 0); 3611 if (err) 3612 return err; 3613 } 3614 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3615 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3616 MLXSW_REG_QEEC_HR_TC, 3617 i, i, 3618 MLXSW_REG_QEEC_MAS_DIS, 0); 3619 if (err) 3620 return err; 3621 3622 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3623 MLXSW_REG_QEEC_HR_TC, 3624 i + 8, i, 3625 MLXSW_REG_QEEC_MAS_DIS, 0); 3626 if (err) 3627 return err; 3628 } 3629 3630 /* Configure the min shaper for multicast TCs. */ 3631 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3632 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3633 MLXSW_REG_QEEC_HR_TC, 3634 i + 8, i, 3635 MLXSW_REG_QEEC_MIS_MIN); 3636 if (err) 3637 return err; 3638 } 3639 3640 /* Map all priorities to traffic class 0. */ 3641 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3642 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3643 if (err) 3644 return err; 3645 } 3646 3647 return 0; 3648 } 3649 3650 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3651 bool enable) 3652 { 3653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3654 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3655 3656 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3657 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3658 } 3659 3660 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3661 u8 split_base_local_port, 3662 struct mlxsw_sp_port_mapping *port_mapping) 3663 { 3664 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3665 bool split = !!split_base_local_port; 3666 struct mlxsw_sp_port *mlxsw_sp_port; 3667 struct net_device *dev; 3668 int err; 3669 3670 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3671 port_mapping->module + 1, split, 3672 port_mapping->lane / port_mapping->width, 3673 mlxsw_sp->base_mac, 3674 sizeof(mlxsw_sp->base_mac)); 3675 if (err) { 3676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3677 local_port); 3678 return err; 3679 } 3680 3681 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3682 if (!dev) { 3683 err = -ENOMEM; 3684 goto err_alloc_etherdev; 3685 } 3686 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3687 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3688 mlxsw_sp_port = netdev_priv(dev); 3689 mlxsw_sp_port->dev = dev; 3690 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3691 mlxsw_sp_port->local_port = local_port; 3692 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3693 mlxsw_sp_port->split = split; 3694 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3695 mlxsw_sp_port->mapping = *port_mapping; 3696 mlxsw_sp_port->link.autoneg = 1; 3697 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3698 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3699 3700 mlxsw_sp_port->pcpu_stats = 3701 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3702 if (!mlxsw_sp_port->pcpu_stats) { 3703 err = -ENOMEM; 3704 goto err_alloc_stats; 3705 } 3706 3707 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3708 GFP_KERNEL); 3709 if (!mlxsw_sp_port->sample) { 3710 err = -ENOMEM; 3711 goto err_alloc_sample; 3712 } 3713 3714 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3715 &update_stats_cache); 3716 3717 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3718 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3719 3720 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3723 mlxsw_sp_port->local_port); 3724 goto err_port_module_map; 3725 } 3726 3727 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3728 if (err) { 3729 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3730 mlxsw_sp_port->local_port); 3731 goto err_port_swid_set; 3732 } 3733 3734 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3735 if (err) { 3736 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3737 mlxsw_sp_port->local_port); 3738 goto err_dev_addr_init; 3739 } 3740 3741 netif_carrier_off(dev); 3742 3743 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3744 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3745 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3746 3747 dev->min_mtu = 0; 3748 dev->max_mtu = ETH_MAX_MTU; 3749 3750 /* Each packet needs to have a Tx header (metadata) on top all other 3751 * headers. 3752 */ 3753 dev->needed_headroom = MLXSW_TXHDR_LEN; 3754 3755 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3756 if (err) { 3757 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3758 mlxsw_sp_port->local_port); 3759 goto err_port_system_port_mapping_set; 3760 } 3761 3762 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3763 if (err) { 3764 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3765 mlxsw_sp_port->local_port); 3766 goto err_port_speed_by_width_set; 3767 } 3768 3769 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3770 if (err) { 3771 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3772 mlxsw_sp_port->local_port); 3773 goto err_port_mtu_set; 3774 } 3775 3776 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3777 if (err) 3778 goto err_port_admin_status_set; 3779 3780 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3781 if (err) { 3782 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3783 mlxsw_sp_port->local_port); 3784 goto err_port_buffers_init; 3785 } 3786 3787 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3788 if (err) { 3789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3790 mlxsw_sp_port->local_port); 3791 goto err_port_ets_init; 3792 } 3793 3794 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3795 if (err) { 3796 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3797 mlxsw_sp_port->local_port); 3798 goto err_port_tc_mc_mode; 3799 } 3800 3801 /* ETS and buffers must be initialized before DCB. */ 3802 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3803 if (err) { 3804 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3805 mlxsw_sp_port->local_port); 3806 goto err_port_dcb_init; 3807 } 3808 3809 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3810 if (err) { 3811 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3812 mlxsw_sp_port->local_port); 3813 goto err_port_fids_init; 3814 } 3815 3816 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3817 if (err) { 3818 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3819 mlxsw_sp_port->local_port); 3820 goto err_port_qdiscs_init; 3821 } 3822 3823 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3824 false); 3825 if (err) { 3826 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3827 mlxsw_sp_port->local_port); 3828 goto err_port_vlan_clear; 3829 } 3830 3831 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3832 if (err) { 3833 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3834 mlxsw_sp_port->local_port); 3835 goto err_port_nve_init; 3836 } 3837 3838 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3839 if (err) { 3840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3841 mlxsw_sp_port->local_port); 3842 goto err_port_pvid_set; 3843 } 3844 3845 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3846 MLXSW_SP_DEFAULT_VID); 3847 if (IS_ERR(mlxsw_sp_port_vlan)) { 3848 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3849 mlxsw_sp_port->local_port); 3850 err = PTR_ERR(mlxsw_sp_port_vlan); 3851 goto err_port_vlan_create; 3852 } 3853 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3854 3855 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3856 mlxsw_sp->ptp_ops->shaper_work); 3857 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3858 mlxsw_sp_span_speed_update_work); 3859 3860 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3861 err = register_netdev(dev); 3862 if (err) { 3863 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3864 mlxsw_sp_port->local_port); 3865 goto err_register_netdev; 3866 } 3867 3868 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3869 mlxsw_sp_port, dev); 3870 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3871 return 0; 3872 3873 err_register_netdev: 3874 mlxsw_sp->ports[local_port] = NULL; 3875 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3876 err_port_vlan_create: 3877 err_port_pvid_set: 3878 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3879 err_port_nve_init: 3880 err_port_vlan_clear: 3881 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3882 err_port_qdiscs_init: 3883 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3884 err_port_fids_init: 3885 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3886 err_port_dcb_init: 3887 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3888 err_port_tc_mc_mode: 3889 err_port_ets_init: 3890 err_port_buffers_init: 3891 err_port_admin_status_set: 3892 err_port_mtu_set: 3893 err_port_speed_by_width_set: 3894 err_port_system_port_mapping_set: 3895 err_dev_addr_init: 3896 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3897 err_port_swid_set: 3898 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3899 err_port_module_map: 3900 kfree(mlxsw_sp_port->sample); 3901 err_alloc_sample: 3902 free_percpu(mlxsw_sp_port->pcpu_stats); 3903 err_alloc_stats: 3904 free_netdev(dev); 3905 err_alloc_etherdev: 3906 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3907 return err; 3908 } 3909 3910 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3911 { 3912 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3913 3914 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3915 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3916 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3917 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3918 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3919 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3920 mlxsw_sp->ports[local_port] = NULL; 3921 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3922 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3923 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3924 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3925 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3926 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3927 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3928 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3929 kfree(mlxsw_sp_port->sample); 3930 free_percpu(mlxsw_sp_port->pcpu_stats); 3931 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3932 free_netdev(mlxsw_sp_port->dev); 3933 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3934 } 3935 3936 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3937 { 3938 struct mlxsw_sp_port *mlxsw_sp_port; 3939 int err; 3940 3941 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3942 if (!mlxsw_sp_port) 3943 return -ENOMEM; 3944 3945 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3946 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3947 3948 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3949 mlxsw_sp_port, 3950 mlxsw_sp->base_mac, 3951 sizeof(mlxsw_sp->base_mac)); 3952 if (err) { 3953 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3954 goto err_core_cpu_port_init; 3955 } 3956 3957 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3958 return 0; 3959 3960 err_core_cpu_port_init: 3961 kfree(mlxsw_sp_port); 3962 return err; 3963 } 3964 3965 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3966 { 3967 struct mlxsw_sp_port *mlxsw_sp_port = 3968 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3969 3970 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3971 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3972 kfree(mlxsw_sp_port); 3973 } 3974 3975 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3976 { 3977 return mlxsw_sp->ports[local_port] != NULL; 3978 } 3979 3980 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3981 { 3982 int i; 3983 3984 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3985 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 kfree(mlxsw_sp->ports); 3989 } 3990 3991 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3992 { 3993 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3994 struct mlxsw_sp_port_mapping *port_mapping; 3995 size_t alloc_size; 3996 int i; 3997 int err; 3998 3999 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4000 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4001 if (!mlxsw_sp->ports) 4002 return -ENOMEM; 4003 4004 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4005 if (err) 4006 goto err_cpu_port_create; 4007 4008 for (i = 1; i < max_ports; i++) { 4009 port_mapping = mlxsw_sp->port_mapping[i]; 4010 if (!port_mapping) 4011 continue; 4012 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4013 if (err) 4014 goto err_port_create; 4015 } 4016 return 0; 4017 4018 err_port_create: 4019 for (i--; i >= 1; i--) 4020 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4021 mlxsw_sp_port_remove(mlxsw_sp, i); 4022 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4023 err_cpu_port_create: 4024 kfree(mlxsw_sp->ports); 4025 return err; 4026 } 4027 4028 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4029 { 4030 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4031 struct mlxsw_sp_port_mapping port_mapping; 4032 int i; 4033 int err; 4034 4035 mlxsw_sp->port_mapping = kcalloc(max_ports, 4036 sizeof(struct mlxsw_sp_port_mapping *), 4037 GFP_KERNEL); 4038 if (!mlxsw_sp->port_mapping) 4039 return -ENOMEM; 4040 4041 for (i = 1; i < max_ports; i++) { 4042 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4043 if (err) 4044 goto err_port_module_info_get; 4045 if (!port_mapping.width) 4046 continue; 4047 4048 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4049 sizeof(port_mapping), 4050 GFP_KERNEL); 4051 if (!mlxsw_sp->port_mapping[i]) { 4052 err = -ENOMEM; 4053 goto err_port_module_info_dup; 4054 } 4055 } 4056 return 0; 4057 4058 err_port_module_info_get: 4059 err_port_module_info_dup: 4060 for (i--; i >= 1; i--) 4061 kfree(mlxsw_sp->port_mapping[i]); 4062 kfree(mlxsw_sp->port_mapping); 4063 return err; 4064 } 4065 4066 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4067 { 4068 int i; 4069 4070 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4071 kfree(mlxsw_sp->port_mapping[i]); 4072 kfree(mlxsw_sp->port_mapping); 4073 } 4074 4075 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4076 { 4077 u8 offset = (local_port - 1) % max_width; 4078 4079 return local_port - offset; 4080 } 4081 4082 static int 4083 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4084 struct mlxsw_sp_port_mapping *port_mapping, 4085 unsigned int count, u8 offset) 4086 { 4087 struct mlxsw_sp_port_mapping split_port_mapping; 4088 int err, i; 4089 4090 split_port_mapping = *port_mapping; 4091 split_port_mapping.width /= count; 4092 for (i = 0; i < count; i++) { 4093 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4094 base_port, &split_port_mapping); 4095 if (err) 4096 goto err_port_create; 4097 split_port_mapping.lane += split_port_mapping.width; 4098 } 4099 4100 return 0; 4101 4102 err_port_create: 4103 for (i--; i >= 0; i--) 4104 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4105 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4106 return err; 4107 } 4108 4109 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4110 u8 base_port, 4111 unsigned int count, u8 offset) 4112 { 4113 struct mlxsw_sp_port_mapping *port_mapping; 4114 int i; 4115 4116 /* Go over original unsplit ports in the gap and recreate them. */ 4117 for (i = 0; i < count * offset; i++) { 4118 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4119 if (!port_mapping) 4120 continue; 4121 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4122 } 4123 } 4124 4125 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4126 unsigned int count, 4127 unsigned int max_width) 4128 { 4129 enum mlxsw_res_id local_ports_in_x_res_id; 4130 int split_width = max_width / count; 4131 4132 if (split_width == 1) 4133 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4134 else if (split_width == 2) 4135 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4136 else if (split_width == 4) 4137 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4138 else 4139 return -EINVAL; 4140 4141 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4142 return -EINVAL; 4143 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4144 } 4145 4146 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4147 unsigned int count, 4148 struct netlink_ext_ack *extack) 4149 { 4150 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4151 struct mlxsw_sp_port_mapping port_mapping; 4152 struct mlxsw_sp_port *mlxsw_sp_port; 4153 int max_width; 4154 u8 base_port; 4155 int offset; 4156 int i; 4157 int err; 4158 4159 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4160 if (!mlxsw_sp_port) { 4161 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4162 local_port); 4163 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4164 return -EINVAL; 4165 } 4166 4167 /* Split ports cannot be split. */ 4168 if (mlxsw_sp_port->split) { 4169 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4170 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4171 return -EINVAL; 4172 } 4173 4174 max_width = mlxsw_core_module_max_width(mlxsw_core, 4175 mlxsw_sp_port->mapping.module); 4176 if (max_width < 0) { 4177 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4178 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4179 return max_width; 4180 } 4181 4182 /* Split port with non-max and 1 module width cannot be split. */ 4183 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4184 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4185 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4186 return -EINVAL; 4187 } 4188 4189 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4190 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4191 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4192 return -EINVAL; 4193 } 4194 4195 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4196 if (offset < 0) { 4197 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4198 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4199 return -EINVAL; 4200 } 4201 4202 /* Only in case max split is being done, the local port and 4203 * base port may differ. 4204 */ 4205 base_port = count == max_width ? 4206 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4207 local_port; 4208 4209 for (i = 0; i < count * offset; i++) { 4210 /* Expect base port to exist and also the one in the middle in 4211 * case of maximal split count. 4212 */ 4213 if (i == 0 || (count == max_width && i == count / 2)) 4214 continue; 4215 4216 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4217 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4218 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4219 return -EINVAL; 4220 } 4221 } 4222 4223 port_mapping = mlxsw_sp_port->mapping; 4224 4225 for (i = 0; i < count; i++) 4226 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4227 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4228 4229 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4230 count, offset); 4231 if (err) { 4232 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4233 goto err_port_split_create; 4234 } 4235 4236 return 0; 4237 4238 err_port_split_create: 4239 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4240 return err; 4241 } 4242 4243 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4244 struct netlink_ext_ack *extack) 4245 { 4246 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4247 struct mlxsw_sp_port *mlxsw_sp_port; 4248 unsigned int count; 4249 int max_width; 4250 u8 base_port; 4251 int offset; 4252 int i; 4253 4254 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4255 if (!mlxsw_sp_port) { 4256 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4257 local_port); 4258 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4259 return -EINVAL; 4260 } 4261 4262 if (!mlxsw_sp_port->split) { 4263 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4264 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4265 return -EINVAL; 4266 } 4267 4268 max_width = mlxsw_core_module_max_width(mlxsw_core, 4269 mlxsw_sp_port->mapping.module); 4270 if (max_width < 0) { 4271 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4272 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4273 return max_width; 4274 } 4275 4276 count = max_width / mlxsw_sp_port->mapping.width; 4277 4278 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4279 if (WARN_ON(offset < 0)) { 4280 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4281 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4282 return -EINVAL; 4283 } 4284 4285 base_port = mlxsw_sp_port->split_base_local_port; 4286 4287 for (i = 0; i < count; i++) 4288 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4289 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4290 4291 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4292 4293 return 0; 4294 } 4295 4296 static void 4297 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 4298 { 4299 int i; 4300 4301 for (i = 0; i < TC_MAX_QUEUE; i++) 4302 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 4303 } 4304 4305 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4306 char *pude_pl, void *priv) 4307 { 4308 struct mlxsw_sp *mlxsw_sp = priv; 4309 struct mlxsw_sp_port *mlxsw_sp_port; 4310 enum mlxsw_reg_pude_oper_status status; 4311 u8 local_port; 4312 4313 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4314 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4315 if (!mlxsw_sp_port) 4316 return; 4317 4318 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4319 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4320 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4321 netif_carrier_on(mlxsw_sp_port->dev); 4322 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4323 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 4324 } else { 4325 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4326 netif_carrier_off(mlxsw_sp_port->dev); 4327 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 4328 } 4329 } 4330 4331 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4332 char *mtpptr_pl, bool ingress) 4333 { 4334 u8 local_port; 4335 u8 num_rec; 4336 int i; 4337 4338 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4339 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4340 for (i = 0; i < num_rec; i++) { 4341 u8 domain_number; 4342 u8 message_type; 4343 u16 sequence_id; 4344 u64 timestamp; 4345 4346 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4347 &domain_number, &sequence_id, 4348 ×tamp); 4349 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4350 message_type, domain_number, 4351 sequence_id, timestamp); 4352 } 4353 } 4354 4355 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4356 char *mtpptr_pl, void *priv) 4357 { 4358 struct mlxsw_sp *mlxsw_sp = priv; 4359 4360 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4361 } 4362 4363 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4364 char *mtpptr_pl, void *priv) 4365 { 4366 struct mlxsw_sp *mlxsw_sp = priv; 4367 4368 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4369 } 4370 4371 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4372 u8 local_port, void *priv) 4373 { 4374 struct mlxsw_sp *mlxsw_sp = priv; 4375 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4376 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4377 4378 if (unlikely(!mlxsw_sp_port)) { 4379 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4380 local_port); 4381 return; 4382 } 4383 4384 skb->dev = mlxsw_sp_port->dev; 4385 4386 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4387 u64_stats_update_begin(&pcpu_stats->syncp); 4388 pcpu_stats->rx_packets++; 4389 pcpu_stats->rx_bytes += skb->len; 4390 u64_stats_update_end(&pcpu_stats->syncp); 4391 4392 skb->protocol = eth_type_trans(skb, skb->dev); 4393 netif_receive_skb(skb); 4394 } 4395 4396 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4397 void *priv) 4398 { 4399 skb->offload_fwd_mark = 1; 4400 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4401 } 4402 4403 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4404 u8 local_port, void *priv) 4405 { 4406 skb->offload_l3_fwd_mark = 1; 4407 skb->offload_fwd_mark = 1; 4408 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4409 } 4410 4411 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4412 void *priv) 4413 { 4414 struct mlxsw_sp *mlxsw_sp = priv; 4415 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4416 struct psample_group *psample_group; 4417 u32 size; 4418 4419 if (unlikely(!mlxsw_sp_port)) { 4420 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4421 local_port); 4422 goto out; 4423 } 4424 if (unlikely(!mlxsw_sp_port->sample)) { 4425 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4426 local_port); 4427 goto out; 4428 } 4429 4430 size = mlxsw_sp_port->sample->truncate ? 4431 mlxsw_sp_port->sample->trunc_size : skb->len; 4432 4433 rcu_read_lock(); 4434 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4435 if (!psample_group) 4436 goto out_unlock; 4437 psample_sample_packet(psample_group, skb, size, 4438 mlxsw_sp_port->dev->ifindex, 0, 4439 mlxsw_sp_port->sample->rate); 4440 out_unlock: 4441 rcu_read_unlock(); 4442 out: 4443 consume_skb(skb); 4444 } 4445 4446 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4447 void *priv) 4448 { 4449 struct mlxsw_sp *mlxsw_sp = priv; 4450 4451 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4452 } 4453 4454 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4455 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4456 _is_ctrl, SP_##_trap_group, DISCARD) 4457 4458 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4459 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4460 _is_ctrl, SP_##_trap_group, DISCARD) 4461 4462 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4463 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4464 _is_ctrl, SP_##_trap_group, DISCARD) 4465 4466 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4467 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4468 4469 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4470 /* Events */ 4471 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4472 /* L2 traps */ 4473 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4474 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4475 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4476 false, SP_LLDP, DISCARD), 4477 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4478 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4479 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4480 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4481 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4482 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4483 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4484 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4485 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4486 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4487 false), 4488 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4489 false), 4490 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4491 false), 4492 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4493 false), 4494 /* L3 traps */ 4495 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4496 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4497 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4498 false), 4499 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4500 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4501 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4502 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4503 false), 4504 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4505 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4506 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4507 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4508 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4509 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4510 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4511 false), 4512 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4513 false), 4514 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4515 false), 4516 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4517 false), 4518 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4519 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4520 false), 4521 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4522 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4523 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4524 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4525 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4526 ROUTER_EXP, false), 4527 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4528 ROUTER_EXP, false), 4529 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4530 ROUTER_EXP, false), 4531 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4532 ROUTER_EXP, false), 4533 /* PKT Sample trap */ 4534 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4535 false, SP_IP2ME, DISCARD), 4536 /* ACL trap */ 4537 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4538 /* Multicast Router Traps */ 4539 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4540 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4541 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4542 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4543 /* NVE traps */ 4544 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4545 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4546 /* PTP traps */ 4547 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4548 false, SP_PTP0, DISCARD), 4549 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4550 }; 4551 4552 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4553 /* Events */ 4554 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4555 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4556 }; 4557 4558 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4559 { 4560 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4561 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4562 enum mlxsw_reg_qpcr_ir_units ir_units; 4563 int max_cpu_policers; 4564 bool is_bytes; 4565 u8 burst_size; 4566 u32 rate; 4567 int i, err; 4568 4569 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4570 return -EIO; 4571 4572 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4573 4574 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4575 for (i = 0; i < max_cpu_policers; i++) { 4576 is_bytes = false; 4577 switch (i) { 4578 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4579 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4583 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4584 rate = 128; 4585 burst_size = 7; 4586 break; 4587 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4589 rate = 16 * 1024; 4590 burst_size = 10; 4591 break; 4592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4599 rate = 1024; 4600 burst_size = 7; 4601 break; 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4603 rate = 1024; 4604 burst_size = 7; 4605 break; 4606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4607 rate = 24 * 1024; 4608 burst_size = 12; 4609 break; 4610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4611 rate = 19 * 1024; 4612 burst_size = 12; 4613 break; 4614 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4615 rate = 360; 4616 burst_size = 7; 4617 break; 4618 default: 4619 continue; 4620 } 4621 4622 __set_bit(i, mlxsw_sp->trap->policers_usage); 4623 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4624 burst_size); 4625 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4626 if (err) 4627 return err; 4628 } 4629 4630 return 0; 4631 } 4632 4633 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4634 { 4635 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4636 enum mlxsw_reg_htgt_trap_group i; 4637 int max_cpu_policers; 4638 int max_trap_groups; 4639 u8 priority, tc; 4640 u16 policer_id; 4641 int err; 4642 4643 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4644 return -EIO; 4645 4646 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4647 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4648 4649 for (i = 0; i < max_trap_groups; i++) { 4650 policer_id = i; 4651 switch (i) { 4652 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4653 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4654 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4655 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4656 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4657 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4658 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4659 priority = 5; 4660 tc = 5; 4661 break; 4662 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4663 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4664 priority = 4; 4665 tc = 4; 4666 break; 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4669 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4670 priority = 3; 4671 tc = 3; 4672 break; 4673 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4674 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4675 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4676 priority = 2; 4677 tc = 2; 4678 break; 4679 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4680 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4681 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4682 priority = 1; 4683 tc = 1; 4684 break; 4685 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4686 priority = 0; 4687 tc = 1; 4688 break; 4689 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4690 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4691 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4692 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4693 break; 4694 default: 4695 continue; 4696 } 4697 4698 if (max_cpu_policers <= policer_id && 4699 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4700 return -EIO; 4701 4702 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4703 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4704 if (err) 4705 return err; 4706 } 4707 4708 return 0; 4709 } 4710 4711 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4712 const struct mlxsw_listener listeners[], 4713 size_t listeners_count) 4714 { 4715 int i; 4716 int err; 4717 4718 for (i = 0; i < listeners_count; i++) { 4719 err = mlxsw_core_trap_register(mlxsw_sp->core, 4720 &listeners[i], 4721 mlxsw_sp); 4722 if (err) 4723 goto err_listener_register; 4724 4725 } 4726 return 0; 4727 4728 err_listener_register: 4729 for (i--; i >= 0; i--) { 4730 mlxsw_core_trap_unregister(mlxsw_sp->core, 4731 &listeners[i], 4732 mlxsw_sp); 4733 } 4734 return err; 4735 } 4736 4737 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4738 const struct mlxsw_listener listeners[], 4739 size_t listeners_count) 4740 { 4741 int i; 4742 4743 for (i = 0; i < listeners_count; i++) { 4744 mlxsw_core_trap_unregister(mlxsw_sp->core, 4745 &listeners[i], 4746 mlxsw_sp); 4747 } 4748 } 4749 4750 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4751 { 4752 struct mlxsw_sp_trap *trap; 4753 u64 max_policers; 4754 int err; 4755 4756 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 4757 return -EIO; 4758 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 4759 trap = kzalloc(struct_size(trap, policers_usage, 4760 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 4761 if (!trap) 4762 return -ENOMEM; 4763 trap->max_policers = max_policers; 4764 mlxsw_sp->trap = trap; 4765 4766 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4767 if (err) 4768 goto err_cpu_policers_set; 4769 4770 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4771 if (err) 4772 goto err_trap_groups_set; 4773 4774 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4775 ARRAY_SIZE(mlxsw_sp_listener)); 4776 if (err) 4777 goto err_traps_register; 4778 4779 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4780 mlxsw_sp->listeners_count); 4781 if (err) 4782 goto err_extra_traps_init; 4783 4784 return 0; 4785 4786 err_extra_traps_init: 4787 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4788 ARRAY_SIZE(mlxsw_sp_listener)); 4789 err_traps_register: 4790 err_trap_groups_set: 4791 err_cpu_policers_set: 4792 kfree(trap); 4793 return err; 4794 } 4795 4796 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4797 { 4798 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4799 mlxsw_sp->listeners_count); 4800 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4801 ARRAY_SIZE(mlxsw_sp_listener)); 4802 kfree(mlxsw_sp->trap); 4803 } 4804 4805 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4806 4807 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4808 { 4809 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4810 u32 seed; 4811 int err; 4812 4813 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4814 MLXSW_SP_LAG_SEED_INIT); 4815 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4816 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4817 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4818 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4819 MLXSW_REG_SLCR_LAG_HASH_SIP | 4820 MLXSW_REG_SLCR_LAG_HASH_DIP | 4821 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4822 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4823 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4824 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4825 if (err) 4826 return err; 4827 4828 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4829 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4830 return -EIO; 4831 4832 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4833 sizeof(struct mlxsw_sp_upper), 4834 GFP_KERNEL); 4835 if (!mlxsw_sp->lags) 4836 return -ENOMEM; 4837 4838 return 0; 4839 } 4840 4841 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4842 { 4843 kfree(mlxsw_sp->lags); 4844 } 4845 4846 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4847 { 4848 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4849 4850 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4851 MLXSW_REG_HTGT_INVALID_POLICER, 4852 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4853 MLXSW_REG_HTGT_DEFAULT_TC); 4854 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4855 } 4856 4857 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4858 .clock_init = mlxsw_sp1_ptp_clock_init, 4859 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4860 .init = mlxsw_sp1_ptp_init, 4861 .fini = mlxsw_sp1_ptp_fini, 4862 .receive = mlxsw_sp1_ptp_receive, 4863 .transmitted = mlxsw_sp1_ptp_transmitted, 4864 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4865 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4866 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4867 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4868 .get_stats_count = mlxsw_sp1_get_stats_count, 4869 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4870 .get_stats = mlxsw_sp1_get_stats, 4871 }; 4872 4873 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4874 .clock_init = mlxsw_sp2_ptp_clock_init, 4875 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4876 .init = mlxsw_sp2_ptp_init, 4877 .fini = mlxsw_sp2_ptp_fini, 4878 .receive = mlxsw_sp2_ptp_receive, 4879 .transmitted = mlxsw_sp2_ptp_transmitted, 4880 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4881 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4882 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4883 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4884 .get_stats_count = mlxsw_sp2_get_stats_count, 4885 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4886 .get_stats = mlxsw_sp2_get_stats, 4887 }; 4888 4889 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4890 { 4891 return mtu * 5 / 2; 4892 } 4893 4894 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4895 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4896 }; 4897 4898 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4899 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 4900 4901 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 4902 { 4903 return 3 * mtu + buffer_factor * speed / 1000; 4904 } 4905 4906 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4907 { 4908 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 4909 4910 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4911 } 4912 4913 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4914 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4915 }; 4916 4917 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 4918 { 4919 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 4920 4921 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4922 } 4923 4924 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 4925 .buffsize_get = mlxsw_sp3_span_buffsize_get, 4926 }; 4927 4928 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4929 { 4930 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4931 4932 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4933 } 4934 4935 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4936 unsigned long event, void *ptr); 4937 4938 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4939 const struct mlxsw_bus_info *mlxsw_bus_info, 4940 struct netlink_ext_ack *extack) 4941 { 4942 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4943 int err; 4944 4945 mlxsw_sp->core = mlxsw_core; 4946 mlxsw_sp->bus_info = mlxsw_bus_info; 4947 4948 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4949 if (err) 4950 return err; 4951 4952 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4953 4954 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4955 if (err) { 4956 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4957 return err; 4958 } 4959 4960 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4961 if (err) { 4962 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4963 return err; 4964 } 4965 4966 err = mlxsw_sp_fids_init(mlxsw_sp); 4967 if (err) { 4968 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4969 goto err_fids_init; 4970 } 4971 4972 err = mlxsw_sp_traps_init(mlxsw_sp); 4973 if (err) { 4974 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4975 goto err_traps_init; 4976 } 4977 4978 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4979 if (err) { 4980 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4981 goto err_devlink_traps_init; 4982 } 4983 4984 err = mlxsw_sp_buffers_init(mlxsw_sp); 4985 if (err) { 4986 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4987 goto err_buffers_init; 4988 } 4989 4990 err = mlxsw_sp_lag_init(mlxsw_sp); 4991 if (err) { 4992 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4993 goto err_lag_init; 4994 } 4995 4996 /* Initialize SPAN before router and switchdev, so that those components 4997 * can call mlxsw_sp_span_respin(). 4998 */ 4999 err = mlxsw_sp_span_init(mlxsw_sp); 5000 if (err) { 5001 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 5002 goto err_span_init; 5003 } 5004 5005 err = mlxsw_sp_switchdev_init(mlxsw_sp); 5006 if (err) { 5007 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 5008 goto err_switchdev_init; 5009 } 5010 5011 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 5012 if (err) { 5013 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 5014 goto err_counter_pool_init; 5015 } 5016 5017 err = mlxsw_sp_afa_init(mlxsw_sp); 5018 if (err) { 5019 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 5020 goto err_afa_init; 5021 } 5022 5023 err = mlxsw_sp_nve_init(mlxsw_sp); 5024 if (err) { 5025 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 5026 goto err_nve_init; 5027 } 5028 5029 err = mlxsw_sp_acl_init(mlxsw_sp); 5030 if (err) { 5031 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 5032 goto err_acl_init; 5033 } 5034 5035 err = mlxsw_sp_router_init(mlxsw_sp, extack); 5036 if (err) { 5037 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 5038 goto err_router_init; 5039 } 5040 5041 if (mlxsw_sp->bus_info->read_frc_capable) { 5042 /* NULL is a valid return value from clock_init */ 5043 mlxsw_sp->clock = 5044 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 5045 mlxsw_sp->bus_info->dev); 5046 if (IS_ERR(mlxsw_sp->clock)) { 5047 err = PTR_ERR(mlxsw_sp->clock); 5048 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 5049 goto err_ptp_clock_init; 5050 } 5051 } 5052 5053 if (mlxsw_sp->clock) { 5054 /* NULL is a valid return value from ptp_ops->init */ 5055 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5056 if (IS_ERR(mlxsw_sp->ptp_state)) { 5057 err = PTR_ERR(mlxsw_sp->ptp_state); 5058 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5059 goto err_ptp_init; 5060 } 5061 } 5062 5063 /* Initialize netdevice notifier after router and SPAN is initialized, 5064 * so that the event handler can use router structures and call SPAN 5065 * respin. 5066 */ 5067 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5068 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5069 &mlxsw_sp->netdevice_nb); 5070 if (err) { 5071 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5072 goto err_netdev_notifier; 5073 } 5074 5075 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5076 if (err) { 5077 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5078 goto err_dpipe_init; 5079 } 5080 5081 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5082 if (err) { 5083 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5084 goto err_port_module_info_init; 5085 } 5086 5087 err = mlxsw_sp_ports_create(mlxsw_sp); 5088 if (err) { 5089 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5090 goto err_ports_create; 5091 } 5092 5093 return 0; 5094 5095 err_ports_create: 5096 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5097 err_port_module_info_init: 5098 mlxsw_sp_dpipe_fini(mlxsw_sp); 5099 err_dpipe_init: 5100 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5101 &mlxsw_sp->netdevice_nb); 5102 err_netdev_notifier: 5103 if (mlxsw_sp->clock) 5104 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5105 err_ptp_init: 5106 if (mlxsw_sp->clock) 5107 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5108 err_ptp_clock_init: 5109 mlxsw_sp_router_fini(mlxsw_sp); 5110 err_router_init: 5111 mlxsw_sp_acl_fini(mlxsw_sp); 5112 err_acl_init: 5113 mlxsw_sp_nve_fini(mlxsw_sp); 5114 err_nve_init: 5115 mlxsw_sp_afa_fini(mlxsw_sp); 5116 err_afa_init: 5117 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5118 err_counter_pool_init: 5119 mlxsw_sp_switchdev_fini(mlxsw_sp); 5120 err_switchdev_init: 5121 mlxsw_sp_span_fini(mlxsw_sp); 5122 err_span_init: 5123 mlxsw_sp_lag_fini(mlxsw_sp); 5124 err_lag_init: 5125 mlxsw_sp_buffers_fini(mlxsw_sp); 5126 err_buffers_init: 5127 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5128 err_devlink_traps_init: 5129 mlxsw_sp_traps_fini(mlxsw_sp); 5130 err_traps_init: 5131 mlxsw_sp_fids_fini(mlxsw_sp); 5132 err_fids_init: 5133 mlxsw_sp_kvdl_fini(mlxsw_sp); 5134 return err; 5135 } 5136 5137 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5138 const struct mlxsw_bus_info *mlxsw_bus_info, 5139 struct netlink_ext_ack *extack) 5140 { 5141 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5142 5143 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5144 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5145 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5146 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5147 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5148 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5149 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5150 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5151 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5152 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5153 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5154 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5155 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5156 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 5157 mlxsw_sp->listeners = mlxsw_sp1_listener; 5158 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5159 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 5160 5161 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5162 } 5163 5164 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5165 const struct mlxsw_bus_info *mlxsw_bus_info, 5166 struct netlink_ext_ack *extack) 5167 { 5168 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5169 5170 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5171 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5172 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5173 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5174 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5175 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5176 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5177 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5178 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5179 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5180 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5181 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5182 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5183 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 5184 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 5185 5186 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5187 } 5188 5189 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 5190 const struct mlxsw_bus_info *mlxsw_bus_info, 5191 struct netlink_ext_ack *extack) 5192 { 5193 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5194 5195 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5196 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5197 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5198 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5199 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5200 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5201 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5202 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5203 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5204 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5205 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5206 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 5207 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 5208 5209 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5210 } 5211 5212 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5213 { 5214 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5215 5216 mlxsw_sp_ports_remove(mlxsw_sp); 5217 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5218 mlxsw_sp_dpipe_fini(mlxsw_sp); 5219 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5220 &mlxsw_sp->netdevice_nb); 5221 if (mlxsw_sp->clock) { 5222 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5223 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5224 } 5225 mlxsw_sp_router_fini(mlxsw_sp); 5226 mlxsw_sp_acl_fini(mlxsw_sp); 5227 mlxsw_sp_nve_fini(mlxsw_sp); 5228 mlxsw_sp_afa_fini(mlxsw_sp); 5229 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5230 mlxsw_sp_switchdev_fini(mlxsw_sp); 5231 mlxsw_sp_span_fini(mlxsw_sp); 5232 mlxsw_sp_lag_fini(mlxsw_sp); 5233 mlxsw_sp_buffers_fini(mlxsw_sp); 5234 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5235 mlxsw_sp_traps_fini(mlxsw_sp); 5236 mlxsw_sp_fids_fini(mlxsw_sp); 5237 mlxsw_sp_kvdl_fini(mlxsw_sp); 5238 } 5239 5240 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5241 * 802.1Q FIDs 5242 */ 5243 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5244 VLAN_VID_MASK - 1) 5245 5246 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5247 .used_max_mid = 1, 5248 .max_mid = MLXSW_SP_MID_MAX, 5249 .used_flood_tables = 1, 5250 .used_flood_mode = 1, 5251 .flood_mode = 3, 5252 .max_fid_flood_tables = 3, 5253 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5254 .used_max_ib_mc = 1, 5255 .max_ib_mc = 0, 5256 .used_max_pkey = 1, 5257 .max_pkey = 0, 5258 .used_kvd_sizes = 1, 5259 .kvd_hash_single_parts = 59, 5260 .kvd_hash_double_parts = 41, 5261 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5262 .swid_config = { 5263 { 5264 .used_type = 1, 5265 .type = MLXSW_PORT_SWID_TYPE_ETH, 5266 } 5267 }, 5268 }; 5269 5270 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5271 .used_max_mid = 1, 5272 .max_mid = MLXSW_SP_MID_MAX, 5273 .used_flood_tables = 1, 5274 .used_flood_mode = 1, 5275 .flood_mode = 3, 5276 .max_fid_flood_tables = 3, 5277 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5278 .used_max_ib_mc = 1, 5279 .max_ib_mc = 0, 5280 .used_max_pkey = 1, 5281 .max_pkey = 0, 5282 .swid_config = { 5283 { 5284 .used_type = 1, 5285 .type = MLXSW_PORT_SWID_TYPE_ETH, 5286 } 5287 }, 5288 }; 5289 5290 static void 5291 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5292 struct devlink_resource_size_params *kvd_size_params, 5293 struct devlink_resource_size_params *linear_size_params, 5294 struct devlink_resource_size_params *hash_double_size_params, 5295 struct devlink_resource_size_params *hash_single_size_params) 5296 { 5297 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5298 KVD_SINGLE_MIN_SIZE); 5299 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5300 KVD_DOUBLE_MIN_SIZE); 5301 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5302 u32 linear_size_min = 0; 5303 5304 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5305 MLXSW_SP_KVD_GRANULARITY, 5306 DEVLINK_RESOURCE_UNIT_ENTRY); 5307 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5308 kvd_size - single_size_min - 5309 double_size_min, 5310 MLXSW_SP_KVD_GRANULARITY, 5311 DEVLINK_RESOURCE_UNIT_ENTRY); 5312 devlink_resource_size_params_init(hash_double_size_params, 5313 double_size_min, 5314 kvd_size - single_size_min - 5315 linear_size_min, 5316 MLXSW_SP_KVD_GRANULARITY, 5317 DEVLINK_RESOURCE_UNIT_ENTRY); 5318 devlink_resource_size_params_init(hash_single_size_params, 5319 single_size_min, 5320 kvd_size - double_size_min - 5321 linear_size_min, 5322 MLXSW_SP_KVD_GRANULARITY, 5323 DEVLINK_RESOURCE_UNIT_ENTRY); 5324 } 5325 5326 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5327 { 5328 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5329 struct devlink_resource_size_params hash_single_size_params; 5330 struct devlink_resource_size_params hash_double_size_params; 5331 struct devlink_resource_size_params linear_size_params; 5332 struct devlink_resource_size_params kvd_size_params; 5333 u32 kvd_size, single_size, double_size, linear_size; 5334 const struct mlxsw_config_profile *profile; 5335 int err; 5336 5337 profile = &mlxsw_sp1_config_profile; 5338 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5339 return -EIO; 5340 5341 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5342 &linear_size_params, 5343 &hash_double_size_params, 5344 &hash_single_size_params); 5345 5346 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5347 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5348 kvd_size, MLXSW_SP_RESOURCE_KVD, 5349 DEVLINK_RESOURCE_ID_PARENT_TOP, 5350 &kvd_size_params); 5351 if (err) 5352 return err; 5353 5354 linear_size = profile->kvd_linear_size; 5355 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5356 linear_size, 5357 MLXSW_SP_RESOURCE_KVD_LINEAR, 5358 MLXSW_SP_RESOURCE_KVD, 5359 &linear_size_params); 5360 if (err) 5361 return err; 5362 5363 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5364 if (err) 5365 return err; 5366 5367 double_size = kvd_size - linear_size; 5368 double_size *= profile->kvd_hash_double_parts; 5369 double_size /= profile->kvd_hash_double_parts + 5370 profile->kvd_hash_single_parts; 5371 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5372 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5373 double_size, 5374 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5375 MLXSW_SP_RESOURCE_KVD, 5376 &hash_double_size_params); 5377 if (err) 5378 return err; 5379 5380 single_size = kvd_size - double_size - linear_size; 5381 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5382 single_size, 5383 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5384 MLXSW_SP_RESOURCE_KVD, 5385 &hash_single_size_params); 5386 if (err) 5387 return err; 5388 5389 return 0; 5390 } 5391 5392 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5393 { 5394 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5395 struct devlink_resource_size_params kvd_size_params; 5396 u32 kvd_size; 5397 5398 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5399 return -EIO; 5400 5401 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5402 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5403 MLXSW_SP_KVD_GRANULARITY, 5404 DEVLINK_RESOURCE_UNIT_ENTRY); 5405 5406 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5407 kvd_size, MLXSW_SP_RESOURCE_KVD, 5408 DEVLINK_RESOURCE_ID_PARENT_TOP, 5409 &kvd_size_params); 5410 } 5411 5412 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5413 { 5414 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5415 struct devlink_resource_size_params span_size_params; 5416 u32 max_span; 5417 5418 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5419 return -EIO; 5420 5421 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5422 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5423 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5424 5425 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5426 max_span, MLXSW_SP_RESOURCE_SPAN, 5427 DEVLINK_RESOURCE_ID_PARENT_TOP, 5428 &span_size_params); 5429 } 5430 5431 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5432 { 5433 int err; 5434 5435 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5436 if (err) 5437 return err; 5438 5439 err = mlxsw_sp_resources_span_register(mlxsw_core); 5440 if (err) 5441 goto err_resources_span_register; 5442 5443 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5444 if (err) 5445 goto err_resources_counter_register; 5446 5447 return 0; 5448 5449 err_resources_counter_register: 5450 err_resources_span_register: 5451 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5452 return err; 5453 } 5454 5455 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5456 { 5457 int err; 5458 5459 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5460 if (err) 5461 return err; 5462 5463 err = mlxsw_sp_resources_span_register(mlxsw_core); 5464 if (err) 5465 goto err_resources_span_register; 5466 5467 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5468 if (err) 5469 goto err_resources_counter_register; 5470 5471 return 0; 5472 5473 err_resources_counter_register: 5474 err_resources_span_register: 5475 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5476 return err; 5477 } 5478 5479 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5480 const struct mlxsw_config_profile *profile, 5481 u64 *p_single_size, u64 *p_double_size, 5482 u64 *p_linear_size) 5483 { 5484 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5485 u32 double_size; 5486 int err; 5487 5488 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5489 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5490 return -EIO; 5491 5492 /* The hash part is what left of the kvd without the 5493 * linear part. It is split to the single size and 5494 * double size by the parts ratio from the profile. 5495 * Both sizes must be a multiplications of the 5496 * granularity from the profile. In case the user 5497 * provided the sizes they are obtained via devlink. 5498 */ 5499 err = devlink_resource_size_get(devlink, 5500 MLXSW_SP_RESOURCE_KVD_LINEAR, 5501 p_linear_size); 5502 if (err) 5503 *p_linear_size = profile->kvd_linear_size; 5504 5505 err = devlink_resource_size_get(devlink, 5506 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5507 p_double_size); 5508 if (err) { 5509 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5510 *p_linear_size; 5511 double_size *= profile->kvd_hash_double_parts; 5512 double_size /= profile->kvd_hash_double_parts + 5513 profile->kvd_hash_single_parts; 5514 *p_double_size = rounddown(double_size, 5515 MLXSW_SP_KVD_GRANULARITY); 5516 } 5517 5518 err = devlink_resource_size_get(devlink, 5519 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5520 p_single_size); 5521 if (err) 5522 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5523 *p_double_size - *p_linear_size; 5524 5525 /* Check results are legal. */ 5526 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5527 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5528 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5529 return -EIO; 5530 5531 return 0; 5532 } 5533 5534 static int 5535 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5536 union devlink_param_value val, 5537 struct netlink_ext_ack *extack) 5538 { 5539 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5540 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5541 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5542 return -EINVAL; 5543 } 5544 5545 return 0; 5546 } 5547 5548 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5549 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5550 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5551 NULL, NULL, 5552 mlxsw_sp_devlink_param_fw_load_policy_validate), 5553 }; 5554 5555 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5556 { 5557 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5558 union devlink_param_value value; 5559 int err; 5560 5561 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5562 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5563 if (err) 5564 return err; 5565 5566 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5567 devlink_param_driverinit_value_set(devlink, 5568 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5569 value); 5570 return 0; 5571 } 5572 5573 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5574 { 5575 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5576 mlxsw_sp_devlink_params, 5577 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5578 } 5579 5580 static int 5581 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5582 struct devlink_param_gset_ctx *ctx) 5583 { 5584 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5585 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5586 5587 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5588 return 0; 5589 } 5590 5591 static int 5592 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5593 struct devlink_param_gset_ctx *ctx) 5594 { 5595 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5596 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5597 5598 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5599 } 5600 5601 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5602 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5603 "acl_region_rehash_interval", 5604 DEVLINK_PARAM_TYPE_U32, 5605 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5606 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5607 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5608 NULL), 5609 }; 5610 5611 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5612 { 5613 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5614 union devlink_param_value value; 5615 int err; 5616 5617 err = mlxsw_sp_params_register(mlxsw_core); 5618 if (err) 5619 return err; 5620 5621 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5622 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5623 if (err) 5624 goto err_devlink_params_register; 5625 5626 value.vu32 = 0; 5627 devlink_param_driverinit_value_set(devlink, 5628 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5629 value); 5630 return 0; 5631 5632 err_devlink_params_register: 5633 mlxsw_sp_params_unregister(mlxsw_core); 5634 return err; 5635 } 5636 5637 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5638 { 5639 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5640 mlxsw_sp2_devlink_params, 5641 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5642 mlxsw_sp_params_unregister(mlxsw_core); 5643 } 5644 5645 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5646 struct sk_buff *skb, u8 local_port) 5647 { 5648 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5649 5650 skb_pull(skb, MLXSW_TXHDR_LEN); 5651 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5652 } 5653 5654 static struct mlxsw_driver mlxsw_sp1_driver = { 5655 .kind = mlxsw_sp1_driver_name, 5656 .priv_size = sizeof(struct mlxsw_sp), 5657 .init = mlxsw_sp1_init, 5658 .fini = mlxsw_sp_fini, 5659 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5660 .port_split = mlxsw_sp_port_split, 5661 .port_unsplit = mlxsw_sp_port_unsplit, 5662 .sb_pool_get = mlxsw_sp_sb_pool_get, 5663 .sb_pool_set = mlxsw_sp_sb_pool_set, 5664 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5665 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5666 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5667 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5668 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5669 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5670 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5671 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5672 .flash_update = mlxsw_sp_flash_update, 5673 .trap_init = mlxsw_sp_trap_init, 5674 .trap_fini = mlxsw_sp_trap_fini, 5675 .trap_action_set = mlxsw_sp_trap_action_set, 5676 .trap_group_init = mlxsw_sp_trap_group_init, 5677 .trap_group_set = mlxsw_sp_trap_group_set, 5678 .trap_policer_init = mlxsw_sp_trap_policer_init, 5679 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5680 .trap_policer_set = mlxsw_sp_trap_policer_set, 5681 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5682 .txhdr_construct = mlxsw_sp_txhdr_construct, 5683 .resources_register = mlxsw_sp1_resources_register, 5684 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5685 .params_register = mlxsw_sp_params_register, 5686 .params_unregister = mlxsw_sp_params_unregister, 5687 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5688 .txhdr_len = MLXSW_TXHDR_LEN, 5689 .profile = &mlxsw_sp1_config_profile, 5690 .res_query_enabled = true, 5691 }; 5692 5693 static struct mlxsw_driver mlxsw_sp2_driver = { 5694 .kind = mlxsw_sp2_driver_name, 5695 .priv_size = sizeof(struct mlxsw_sp), 5696 .init = mlxsw_sp2_init, 5697 .fini = mlxsw_sp_fini, 5698 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5699 .port_split = mlxsw_sp_port_split, 5700 .port_unsplit = mlxsw_sp_port_unsplit, 5701 .sb_pool_get = mlxsw_sp_sb_pool_get, 5702 .sb_pool_set = mlxsw_sp_sb_pool_set, 5703 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5704 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5705 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5706 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5707 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5708 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5709 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5710 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5711 .flash_update = mlxsw_sp_flash_update, 5712 .trap_init = mlxsw_sp_trap_init, 5713 .trap_fini = mlxsw_sp_trap_fini, 5714 .trap_action_set = mlxsw_sp_trap_action_set, 5715 .trap_group_init = mlxsw_sp_trap_group_init, 5716 .trap_group_set = mlxsw_sp_trap_group_set, 5717 .trap_policer_init = mlxsw_sp_trap_policer_init, 5718 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5719 .trap_policer_set = mlxsw_sp_trap_policer_set, 5720 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5721 .txhdr_construct = mlxsw_sp_txhdr_construct, 5722 .resources_register = mlxsw_sp2_resources_register, 5723 .params_register = mlxsw_sp2_params_register, 5724 .params_unregister = mlxsw_sp2_params_unregister, 5725 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5726 .txhdr_len = MLXSW_TXHDR_LEN, 5727 .profile = &mlxsw_sp2_config_profile, 5728 .res_query_enabled = true, 5729 }; 5730 5731 static struct mlxsw_driver mlxsw_sp3_driver = { 5732 .kind = mlxsw_sp3_driver_name, 5733 .priv_size = sizeof(struct mlxsw_sp), 5734 .init = mlxsw_sp3_init, 5735 .fini = mlxsw_sp_fini, 5736 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5737 .port_split = mlxsw_sp_port_split, 5738 .port_unsplit = mlxsw_sp_port_unsplit, 5739 .sb_pool_get = mlxsw_sp_sb_pool_get, 5740 .sb_pool_set = mlxsw_sp_sb_pool_set, 5741 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5742 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5743 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5744 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5745 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5746 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5747 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5748 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5749 .flash_update = mlxsw_sp_flash_update, 5750 .trap_init = mlxsw_sp_trap_init, 5751 .trap_fini = mlxsw_sp_trap_fini, 5752 .trap_action_set = mlxsw_sp_trap_action_set, 5753 .trap_group_init = mlxsw_sp_trap_group_init, 5754 .trap_group_set = mlxsw_sp_trap_group_set, 5755 .trap_policer_init = mlxsw_sp_trap_policer_init, 5756 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5757 .trap_policer_set = mlxsw_sp_trap_policer_set, 5758 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5759 .txhdr_construct = mlxsw_sp_txhdr_construct, 5760 .resources_register = mlxsw_sp2_resources_register, 5761 .params_register = mlxsw_sp2_params_register, 5762 .params_unregister = mlxsw_sp2_params_unregister, 5763 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5764 .txhdr_len = MLXSW_TXHDR_LEN, 5765 .profile = &mlxsw_sp2_config_profile, 5766 .res_query_enabled = true, 5767 }; 5768 5769 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5770 { 5771 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5772 } 5773 5774 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5775 { 5776 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5777 int ret = 0; 5778 5779 if (mlxsw_sp_port_dev_check(lower_dev)) { 5780 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5781 ret = 1; 5782 } 5783 5784 return ret; 5785 } 5786 5787 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5788 { 5789 struct mlxsw_sp_port *mlxsw_sp_port; 5790 5791 if (mlxsw_sp_port_dev_check(dev)) 5792 return netdev_priv(dev); 5793 5794 mlxsw_sp_port = NULL; 5795 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5796 5797 return mlxsw_sp_port; 5798 } 5799 5800 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5801 { 5802 struct mlxsw_sp_port *mlxsw_sp_port; 5803 5804 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5805 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5806 } 5807 5808 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5809 { 5810 struct mlxsw_sp_port *mlxsw_sp_port; 5811 5812 if (mlxsw_sp_port_dev_check(dev)) 5813 return netdev_priv(dev); 5814 5815 mlxsw_sp_port = NULL; 5816 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5817 &mlxsw_sp_port); 5818 5819 return mlxsw_sp_port; 5820 } 5821 5822 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5823 { 5824 struct mlxsw_sp_port *mlxsw_sp_port; 5825 5826 rcu_read_lock(); 5827 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5828 if (mlxsw_sp_port) 5829 dev_hold(mlxsw_sp_port->dev); 5830 rcu_read_unlock(); 5831 return mlxsw_sp_port; 5832 } 5833 5834 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5835 { 5836 dev_put(mlxsw_sp_port->dev); 5837 } 5838 5839 static void 5840 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5841 struct net_device *lag_dev) 5842 { 5843 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5844 struct net_device *upper_dev; 5845 struct list_head *iter; 5846 5847 if (netif_is_bridge_port(lag_dev)) 5848 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5849 5850 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5851 if (!netif_is_bridge_port(upper_dev)) 5852 continue; 5853 br_dev = netdev_master_upper_dev_get(upper_dev); 5854 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5855 } 5856 } 5857 5858 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5859 { 5860 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5861 5862 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5863 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5864 } 5865 5866 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5867 { 5868 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5869 5870 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5871 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5872 } 5873 5874 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5875 u16 lag_id, u8 port_index) 5876 { 5877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5878 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5879 5880 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5881 lag_id, port_index); 5882 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5883 } 5884 5885 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5886 u16 lag_id) 5887 { 5888 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5889 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5890 5891 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5892 lag_id); 5893 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5894 } 5895 5896 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5897 u16 lag_id) 5898 { 5899 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5900 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5901 5902 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5903 lag_id); 5904 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5905 } 5906 5907 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5908 u16 lag_id) 5909 { 5910 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5911 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5912 5913 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5914 lag_id); 5915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5916 } 5917 5918 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5919 struct net_device *lag_dev, 5920 u16 *p_lag_id) 5921 { 5922 struct mlxsw_sp_upper *lag; 5923 int free_lag_id = -1; 5924 u64 max_lag; 5925 int i; 5926 5927 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5928 for (i = 0; i < max_lag; i++) { 5929 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5930 if (lag->ref_count) { 5931 if (lag->dev == lag_dev) { 5932 *p_lag_id = i; 5933 return 0; 5934 } 5935 } else if (free_lag_id < 0) { 5936 free_lag_id = i; 5937 } 5938 } 5939 if (free_lag_id < 0) 5940 return -EBUSY; 5941 *p_lag_id = free_lag_id; 5942 return 0; 5943 } 5944 5945 static bool 5946 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5947 struct net_device *lag_dev, 5948 struct netdev_lag_upper_info *lag_upper_info, 5949 struct netlink_ext_ack *extack) 5950 { 5951 u16 lag_id; 5952 5953 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5954 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5955 return false; 5956 } 5957 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5958 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5959 return false; 5960 } 5961 return true; 5962 } 5963 5964 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5965 u16 lag_id, u8 *p_port_index) 5966 { 5967 u64 max_lag_members; 5968 int i; 5969 5970 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5971 MAX_LAG_MEMBERS); 5972 for (i = 0; i < max_lag_members; i++) { 5973 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5974 *p_port_index = i; 5975 return 0; 5976 } 5977 } 5978 return -EBUSY; 5979 } 5980 5981 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5982 struct net_device *lag_dev) 5983 { 5984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5985 struct mlxsw_sp_upper *lag; 5986 u16 lag_id; 5987 u8 port_index; 5988 int err; 5989 5990 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5991 if (err) 5992 return err; 5993 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5994 if (!lag->ref_count) { 5995 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5996 if (err) 5997 return err; 5998 lag->dev = lag_dev; 5999 } 6000 6001 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 6002 if (err) 6003 return err; 6004 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 6005 if (err) 6006 goto err_col_port_add; 6007 6008 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 6009 mlxsw_sp_port->local_port); 6010 mlxsw_sp_port->lag_id = lag_id; 6011 mlxsw_sp_port->lagged = 1; 6012 lag->ref_count++; 6013 6014 /* Port is no longer usable as a router interface */ 6015 if (mlxsw_sp_port->default_vlan->fid) 6016 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 6017 6018 return 0; 6019 6020 err_col_port_add: 6021 if (!lag->ref_count) 6022 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6023 return err; 6024 } 6025 6026 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 6027 struct net_device *lag_dev) 6028 { 6029 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6030 u16 lag_id = mlxsw_sp_port->lag_id; 6031 struct mlxsw_sp_upper *lag; 6032 6033 if (!mlxsw_sp_port->lagged) 6034 return; 6035 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 6036 WARN_ON(lag->ref_count == 0); 6037 6038 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 6039 6040 /* Any VLANs configured on the port are no longer valid */ 6041 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 6042 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 6043 /* Make the LAG and its directly linked uppers leave bridges they 6044 * are memeber in 6045 */ 6046 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 6047 6048 if (lag->ref_count == 1) 6049 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6050 6051 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 6052 mlxsw_sp_port->local_port); 6053 mlxsw_sp_port->lagged = 0; 6054 lag->ref_count--; 6055 6056 /* Make sure untagged frames are allowed to ingress */ 6057 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 6058 } 6059 6060 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 6061 u16 lag_id) 6062 { 6063 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6064 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6065 6066 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 6067 mlxsw_sp_port->local_port); 6068 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6069 } 6070 6071 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 6072 u16 lag_id) 6073 { 6074 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6075 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6076 6077 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 6078 mlxsw_sp_port->local_port); 6079 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6080 } 6081 6082 static int 6083 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 6084 { 6085 int err; 6086 6087 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 6088 mlxsw_sp_port->lag_id); 6089 if (err) 6090 return err; 6091 6092 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6093 if (err) 6094 goto err_dist_port_add; 6095 6096 return 0; 6097 6098 err_dist_port_add: 6099 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6100 return err; 6101 } 6102 6103 static int 6104 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6105 { 6106 int err; 6107 6108 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6109 mlxsw_sp_port->lag_id); 6110 if (err) 6111 return err; 6112 6113 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6114 mlxsw_sp_port->lag_id); 6115 if (err) 6116 goto err_col_port_disable; 6117 6118 return 0; 6119 6120 err_col_port_disable: 6121 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6122 return err; 6123 } 6124 6125 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6126 struct netdev_lag_lower_state_info *info) 6127 { 6128 if (info->tx_enabled) 6129 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6130 else 6131 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6132 } 6133 6134 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6135 bool enable) 6136 { 6137 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6138 enum mlxsw_reg_spms_state spms_state; 6139 char *spms_pl; 6140 u16 vid; 6141 int err; 6142 6143 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6144 MLXSW_REG_SPMS_STATE_DISCARDING; 6145 6146 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6147 if (!spms_pl) 6148 return -ENOMEM; 6149 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6150 6151 for (vid = 0; vid < VLAN_N_VID; vid++) 6152 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6153 6154 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6155 kfree(spms_pl); 6156 return err; 6157 } 6158 6159 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6160 { 6161 u16 vid = 1; 6162 int err; 6163 6164 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6165 if (err) 6166 return err; 6167 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6168 if (err) 6169 goto err_port_stp_set; 6170 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6171 true, false); 6172 if (err) 6173 goto err_port_vlan_set; 6174 6175 for (; vid <= VLAN_N_VID - 1; vid++) { 6176 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6177 vid, false); 6178 if (err) 6179 goto err_vid_learning_set; 6180 } 6181 6182 return 0; 6183 6184 err_vid_learning_set: 6185 for (vid--; vid >= 1; vid--) 6186 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6187 err_port_vlan_set: 6188 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6189 err_port_stp_set: 6190 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6191 return err; 6192 } 6193 6194 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6195 { 6196 u16 vid; 6197 6198 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6199 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6200 vid, true); 6201 6202 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6203 false, false); 6204 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6205 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6206 } 6207 6208 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6209 { 6210 unsigned int num_vxlans = 0; 6211 struct net_device *dev; 6212 struct list_head *iter; 6213 6214 netdev_for_each_lower_dev(br_dev, dev, iter) { 6215 if (netif_is_vxlan(dev)) 6216 num_vxlans++; 6217 } 6218 6219 return num_vxlans > 1; 6220 } 6221 6222 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6223 { 6224 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6225 struct net_device *dev; 6226 struct list_head *iter; 6227 6228 netdev_for_each_lower_dev(br_dev, dev, iter) { 6229 u16 pvid; 6230 int err; 6231 6232 if (!netif_is_vxlan(dev)) 6233 continue; 6234 6235 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6236 if (err || !pvid) 6237 continue; 6238 6239 if (test_and_set_bit(pvid, vlans)) 6240 return false; 6241 } 6242 6243 return true; 6244 } 6245 6246 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6247 struct netlink_ext_ack *extack) 6248 { 6249 if (br_multicast_enabled(br_dev)) { 6250 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6251 return false; 6252 } 6253 6254 if (!br_vlan_enabled(br_dev) && 6255 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6256 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6257 return false; 6258 } 6259 6260 if (br_vlan_enabled(br_dev) && 6261 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6262 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6263 return false; 6264 } 6265 6266 return true; 6267 } 6268 6269 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6270 struct net_device *dev, 6271 unsigned long event, void *ptr) 6272 { 6273 struct netdev_notifier_changeupper_info *info; 6274 struct mlxsw_sp_port *mlxsw_sp_port; 6275 struct netlink_ext_ack *extack; 6276 struct net_device *upper_dev; 6277 struct mlxsw_sp *mlxsw_sp; 6278 int err = 0; 6279 6280 mlxsw_sp_port = netdev_priv(dev); 6281 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6282 info = ptr; 6283 extack = netdev_notifier_info_to_extack(&info->info); 6284 6285 switch (event) { 6286 case NETDEV_PRECHANGEUPPER: 6287 upper_dev = info->upper_dev; 6288 if (!is_vlan_dev(upper_dev) && 6289 !netif_is_lag_master(upper_dev) && 6290 !netif_is_bridge_master(upper_dev) && 6291 !netif_is_ovs_master(upper_dev) && 6292 !netif_is_macvlan(upper_dev)) { 6293 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6294 return -EINVAL; 6295 } 6296 if (!info->linking) 6297 break; 6298 if (netif_is_bridge_master(upper_dev) && 6299 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6300 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6301 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6302 return -EOPNOTSUPP; 6303 if (netdev_has_any_upper_dev(upper_dev) && 6304 (!netif_is_bridge_master(upper_dev) || 6305 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6306 upper_dev))) { 6307 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6308 return -EINVAL; 6309 } 6310 if (netif_is_lag_master(upper_dev) && 6311 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6312 info->upper_info, extack)) 6313 return -EINVAL; 6314 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6315 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6316 return -EINVAL; 6317 } 6318 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6319 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6320 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6321 return -EINVAL; 6322 } 6323 if (netif_is_macvlan(upper_dev) && 6324 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 6325 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6326 return -EOPNOTSUPP; 6327 } 6328 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6329 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6330 return -EINVAL; 6331 } 6332 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6333 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6334 return -EINVAL; 6335 } 6336 break; 6337 case NETDEV_CHANGEUPPER: 6338 upper_dev = info->upper_dev; 6339 if (netif_is_bridge_master(upper_dev)) { 6340 if (info->linking) 6341 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6342 lower_dev, 6343 upper_dev, 6344 extack); 6345 else 6346 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6347 lower_dev, 6348 upper_dev); 6349 } else if (netif_is_lag_master(upper_dev)) { 6350 if (info->linking) { 6351 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6352 upper_dev); 6353 } else { 6354 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6355 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6356 upper_dev); 6357 } 6358 } else if (netif_is_ovs_master(upper_dev)) { 6359 if (info->linking) 6360 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6361 else 6362 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6363 } else if (netif_is_macvlan(upper_dev)) { 6364 if (!info->linking) 6365 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6366 } else if (is_vlan_dev(upper_dev)) { 6367 struct net_device *br_dev; 6368 6369 if (!netif_is_bridge_port(upper_dev)) 6370 break; 6371 if (info->linking) 6372 break; 6373 br_dev = netdev_master_upper_dev_get(upper_dev); 6374 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6375 br_dev); 6376 } 6377 break; 6378 } 6379 6380 return err; 6381 } 6382 6383 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6384 unsigned long event, void *ptr) 6385 { 6386 struct netdev_notifier_changelowerstate_info *info; 6387 struct mlxsw_sp_port *mlxsw_sp_port; 6388 int err; 6389 6390 mlxsw_sp_port = netdev_priv(dev); 6391 info = ptr; 6392 6393 switch (event) { 6394 case NETDEV_CHANGELOWERSTATE: 6395 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6396 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6397 info->lower_state_info); 6398 if (err) 6399 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6400 } 6401 break; 6402 } 6403 6404 return 0; 6405 } 6406 6407 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6408 struct net_device *port_dev, 6409 unsigned long event, void *ptr) 6410 { 6411 switch (event) { 6412 case NETDEV_PRECHANGEUPPER: 6413 case NETDEV_CHANGEUPPER: 6414 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6415 event, ptr); 6416 case NETDEV_CHANGELOWERSTATE: 6417 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6418 ptr); 6419 } 6420 6421 return 0; 6422 } 6423 6424 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6425 unsigned long event, void *ptr) 6426 { 6427 struct net_device *dev; 6428 struct list_head *iter; 6429 int ret; 6430 6431 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6432 if (mlxsw_sp_port_dev_check(dev)) { 6433 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6434 ptr); 6435 if (ret) 6436 return ret; 6437 } 6438 } 6439 6440 return 0; 6441 } 6442 6443 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6444 struct net_device *dev, 6445 unsigned long event, void *ptr, 6446 u16 vid) 6447 { 6448 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6450 struct netdev_notifier_changeupper_info *info = ptr; 6451 struct netlink_ext_ack *extack; 6452 struct net_device *upper_dev; 6453 int err = 0; 6454 6455 extack = netdev_notifier_info_to_extack(&info->info); 6456 6457 switch (event) { 6458 case NETDEV_PRECHANGEUPPER: 6459 upper_dev = info->upper_dev; 6460 if (!netif_is_bridge_master(upper_dev) && 6461 !netif_is_macvlan(upper_dev)) { 6462 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6463 return -EINVAL; 6464 } 6465 if (!info->linking) 6466 break; 6467 if (netif_is_bridge_master(upper_dev) && 6468 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6469 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6470 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6471 return -EOPNOTSUPP; 6472 if (netdev_has_any_upper_dev(upper_dev) && 6473 (!netif_is_bridge_master(upper_dev) || 6474 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6475 upper_dev))) { 6476 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6477 return -EINVAL; 6478 } 6479 if (netif_is_macvlan(upper_dev) && 6480 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6481 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6482 return -EOPNOTSUPP; 6483 } 6484 break; 6485 case NETDEV_CHANGEUPPER: 6486 upper_dev = info->upper_dev; 6487 if (netif_is_bridge_master(upper_dev)) { 6488 if (info->linking) 6489 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6490 vlan_dev, 6491 upper_dev, 6492 extack); 6493 else 6494 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6495 vlan_dev, 6496 upper_dev); 6497 } else if (netif_is_macvlan(upper_dev)) { 6498 if (!info->linking) 6499 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6500 } else { 6501 err = -EINVAL; 6502 WARN_ON(1); 6503 } 6504 break; 6505 } 6506 6507 return err; 6508 } 6509 6510 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6511 struct net_device *lag_dev, 6512 unsigned long event, 6513 void *ptr, u16 vid) 6514 { 6515 struct net_device *dev; 6516 struct list_head *iter; 6517 int ret; 6518 6519 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6520 if (mlxsw_sp_port_dev_check(dev)) { 6521 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6522 event, ptr, 6523 vid); 6524 if (ret) 6525 return ret; 6526 } 6527 } 6528 6529 return 0; 6530 } 6531 6532 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6533 struct net_device *br_dev, 6534 unsigned long event, void *ptr, 6535 u16 vid) 6536 { 6537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6538 struct netdev_notifier_changeupper_info *info = ptr; 6539 struct netlink_ext_ack *extack; 6540 struct net_device *upper_dev; 6541 6542 if (!mlxsw_sp) 6543 return 0; 6544 6545 extack = netdev_notifier_info_to_extack(&info->info); 6546 6547 switch (event) { 6548 case NETDEV_PRECHANGEUPPER: 6549 upper_dev = info->upper_dev; 6550 if (!netif_is_macvlan(upper_dev)) { 6551 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6552 return -EOPNOTSUPP; 6553 } 6554 if (!info->linking) 6555 break; 6556 if (netif_is_macvlan(upper_dev) && 6557 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6558 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6559 return -EOPNOTSUPP; 6560 } 6561 break; 6562 case NETDEV_CHANGEUPPER: 6563 upper_dev = info->upper_dev; 6564 if (info->linking) 6565 break; 6566 if (netif_is_macvlan(upper_dev)) 6567 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6568 break; 6569 } 6570 6571 return 0; 6572 } 6573 6574 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6575 unsigned long event, void *ptr) 6576 { 6577 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6578 u16 vid = vlan_dev_vlan_id(vlan_dev); 6579 6580 if (mlxsw_sp_port_dev_check(real_dev)) 6581 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6582 event, ptr, vid); 6583 else if (netif_is_lag_master(real_dev)) 6584 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6585 real_dev, event, 6586 ptr, vid); 6587 else if (netif_is_bridge_master(real_dev)) 6588 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6589 event, ptr, vid); 6590 6591 return 0; 6592 } 6593 6594 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6595 unsigned long event, void *ptr) 6596 { 6597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6598 struct netdev_notifier_changeupper_info *info = ptr; 6599 struct netlink_ext_ack *extack; 6600 struct net_device *upper_dev; 6601 6602 if (!mlxsw_sp) 6603 return 0; 6604 6605 extack = netdev_notifier_info_to_extack(&info->info); 6606 6607 switch (event) { 6608 case NETDEV_PRECHANGEUPPER: 6609 upper_dev = info->upper_dev; 6610 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6611 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6612 return -EOPNOTSUPP; 6613 } 6614 if (!info->linking) 6615 break; 6616 if (netif_is_macvlan(upper_dev) && 6617 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 6618 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6619 return -EOPNOTSUPP; 6620 } 6621 break; 6622 case NETDEV_CHANGEUPPER: 6623 upper_dev = info->upper_dev; 6624 if (info->linking) 6625 break; 6626 if (is_vlan_dev(upper_dev)) 6627 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6628 if (netif_is_macvlan(upper_dev)) 6629 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6630 break; 6631 } 6632 6633 return 0; 6634 } 6635 6636 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6637 unsigned long event, void *ptr) 6638 { 6639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6640 struct netdev_notifier_changeupper_info *info = ptr; 6641 struct netlink_ext_ack *extack; 6642 6643 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6644 return 0; 6645 6646 extack = netdev_notifier_info_to_extack(&info->info); 6647 6648 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6649 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6650 6651 return -EOPNOTSUPP; 6652 } 6653 6654 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6655 { 6656 struct netdev_notifier_changeupper_info *info = ptr; 6657 6658 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6659 return false; 6660 return netif_is_l3_master(info->upper_dev); 6661 } 6662 6663 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6664 struct net_device *dev, 6665 unsigned long event, void *ptr) 6666 { 6667 struct netdev_notifier_changeupper_info *cu_info; 6668 struct netdev_notifier_info *info = ptr; 6669 struct netlink_ext_ack *extack; 6670 struct net_device *upper_dev; 6671 6672 extack = netdev_notifier_info_to_extack(info); 6673 6674 switch (event) { 6675 case NETDEV_CHANGEUPPER: 6676 cu_info = container_of(info, 6677 struct netdev_notifier_changeupper_info, 6678 info); 6679 upper_dev = cu_info->upper_dev; 6680 if (!netif_is_bridge_master(upper_dev)) 6681 return 0; 6682 if (!mlxsw_sp_lower_get(upper_dev)) 6683 return 0; 6684 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6685 return -EOPNOTSUPP; 6686 if (cu_info->linking) { 6687 if (!netif_running(dev)) 6688 return 0; 6689 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6690 * device needs to be mapped to a VLAN, but at this 6691 * point no VLANs are configured on the VxLAN device 6692 */ 6693 if (br_vlan_enabled(upper_dev)) 6694 return 0; 6695 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6696 dev, 0, extack); 6697 } else { 6698 /* VLANs were already flushed, which triggered the 6699 * necessary cleanup 6700 */ 6701 if (br_vlan_enabled(upper_dev)) 6702 return 0; 6703 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6704 } 6705 break; 6706 case NETDEV_PRE_UP: 6707 upper_dev = netdev_master_upper_dev_get(dev); 6708 if (!upper_dev) 6709 return 0; 6710 if (!netif_is_bridge_master(upper_dev)) 6711 return 0; 6712 if (!mlxsw_sp_lower_get(upper_dev)) 6713 return 0; 6714 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6715 extack); 6716 case NETDEV_DOWN: 6717 upper_dev = netdev_master_upper_dev_get(dev); 6718 if (!upper_dev) 6719 return 0; 6720 if (!netif_is_bridge_master(upper_dev)) 6721 return 0; 6722 if (!mlxsw_sp_lower_get(upper_dev)) 6723 return 0; 6724 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6725 break; 6726 } 6727 6728 return 0; 6729 } 6730 6731 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6732 unsigned long event, void *ptr) 6733 { 6734 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6735 struct mlxsw_sp_span_entry *span_entry; 6736 struct mlxsw_sp *mlxsw_sp; 6737 int err = 0; 6738 6739 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6740 if (event == NETDEV_UNREGISTER) { 6741 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6742 if (span_entry) 6743 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6744 } 6745 mlxsw_sp_span_respin(mlxsw_sp); 6746 6747 if (netif_is_vxlan(dev)) 6748 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6749 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6750 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6751 event, ptr); 6752 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6753 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6754 event, ptr); 6755 else if (event == NETDEV_PRE_CHANGEADDR || 6756 event == NETDEV_CHANGEADDR || 6757 event == NETDEV_CHANGEMTU) 6758 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6759 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6760 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6761 else if (mlxsw_sp_port_dev_check(dev)) 6762 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6763 else if (netif_is_lag_master(dev)) 6764 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6765 else if (is_vlan_dev(dev)) 6766 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6767 else if (netif_is_bridge_master(dev)) 6768 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6769 else if (netif_is_macvlan(dev)) 6770 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6771 6772 return notifier_from_errno(err); 6773 } 6774 6775 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6776 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6777 }; 6778 6779 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6780 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6781 }; 6782 6783 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6784 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6785 {0, }, 6786 }; 6787 6788 static struct pci_driver mlxsw_sp1_pci_driver = { 6789 .name = mlxsw_sp1_driver_name, 6790 .id_table = mlxsw_sp1_pci_id_table, 6791 }; 6792 6793 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6794 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6795 {0, }, 6796 }; 6797 6798 static struct pci_driver mlxsw_sp2_pci_driver = { 6799 .name = mlxsw_sp2_driver_name, 6800 .id_table = mlxsw_sp2_pci_id_table, 6801 }; 6802 6803 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6804 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6805 {0, }, 6806 }; 6807 6808 static struct pci_driver mlxsw_sp3_pci_driver = { 6809 .name = mlxsw_sp3_driver_name, 6810 .id_table = mlxsw_sp3_pci_id_table, 6811 }; 6812 6813 static int __init mlxsw_sp_module_init(void) 6814 { 6815 int err; 6816 6817 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6818 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6819 6820 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6821 if (err) 6822 goto err_sp1_core_driver_register; 6823 6824 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6825 if (err) 6826 goto err_sp2_core_driver_register; 6827 6828 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6829 if (err) 6830 goto err_sp3_core_driver_register; 6831 6832 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6833 if (err) 6834 goto err_sp1_pci_driver_register; 6835 6836 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6837 if (err) 6838 goto err_sp2_pci_driver_register; 6839 6840 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6841 if (err) 6842 goto err_sp3_pci_driver_register; 6843 6844 return 0; 6845 6846 err_sp3_pci_driver_register: 6847 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6848 err_sp2_pci_driver_register: 6849 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6850 err_sp1_pci_driver_register: 6851 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6852 err_sp3_core_driver_register: 6853 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6854 err_sp2_core_driver_register: 6855 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6856 err_sp1_core_driver_register: 6857 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6858 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6859 return err; 6860 } 6861 6862 static void __exit mlxsw_sp_module_exit(void) 6863 { 6864 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6865 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6866 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6867 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6868 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6869 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6870 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6871 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6872 } 6873 6874 module_init(mlxsw_sp_module_init); 6875 module_exit(mlxsw_sp_module_exit); 6876 6877 MODULE_LICENSE("Dual BSD/GPL"); 6878 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6879 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6880 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6881 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6882 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6883 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6884 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6885