1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 #include "../mlxfw/mlxfw.h" 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 2714 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 #define MLXSW_SP2_FWREV_MINOR 2000 68 #define MLXSW_SP2_FWREV_SUBMINOR 2714 69 70 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 71 .major = MLXSW_SP2_FWREV_MAJOR, 72 .minor = MLXSW_SP2_FWREV_MINOR, 73 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 74 }; 75 76 #define MLXSW_SP2_FW_FILENAME \ 77 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 80 81 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 82 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 83 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 84 static const char mlxsw_sp_driver_version[] = "1.0"; 85 86 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 87 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 88 }; 89 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 90 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 91 }; 92 93 /* tx_hdr_version 94 * Tx header version. 95 * Must be set to 1. 96 */ 97 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 98 99 /* tx_hdr_ctl 100 * Packet control type. 101 * 0 - Ethernet control (e.g. EMADs, LACP) 102 * 1 - Ethernet data 103 */ 104 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 105 106 /* tx_hdr_proto 107 * Packet protocol type. Must be set to 1 (Ethernet). 108 */ 109 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 110 111 /* tx_hdr_rx_is_router 112 * Packet is sent from the router. Valid for data packets only. 113 */ 114 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 115 116 /* tx_hdr_fid_valid 117 * Indicates if the 'fid' field is valid and should be used for 118 * forwarding lookup. Valid for data packets only. 119 */ 120 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 121 122 /* tx_hdr_swid 123 * Switch partition ID. Must be set to 0. 124 */ 125 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 126 127 /* tx_hdr_control_tclass 128 * Indicates if the packet should use the control TClass and not one 129 * of the data TClasses. 130 */ 131 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 132 133 /* tx_hdr_etclass 134 * Egress TClass to be used on the egress device on the egress port. 135 */ 136 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 137 138 /* tx_hdr_port_mid 139 * Destination local port for unicast packets. 140 * Destination multicast ID for multicast packets. 141 * 142 * Control packets are directed to a specific egress port, while data 143 * packets are transmitted through the CPU port (0) into the switch partition, 144 * where forwarding rules are applied. 145 */ 146 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 147 148 /* tx_hdr_fid 149 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 150 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 151 * Valid for data packets only. 152 */ 153 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 154 155 /* tx_hdr_type 156 * 0 - Data packets 157 * 6 - Control packets 158 */ 159 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 160 161 struct mlxsw_sp_mlxfw_dev { 162 struct mlxfw_dev mlxfw_dev; 163 struct mlxsw_sp *mlxsw_sp; 164 }; 165 166 struct mlxsw_sp_ptp_ops { 167 struct mlxsw_sp_ptp_clock * 168 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 169 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 170 171 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 172 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 173 174 /* Notify a driver that a packet that might be PTP was received. Driver 175 * is responsible for freeing the passed-in SKB. 176 */ 177 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 178 u8 local_port); 179 180 /* Notify a driver that a timestamped packet was transmitted. Driver 181 * is responsible for freeing the passed-in SKB. 182 */ 183 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 184 u8 local_port); 185 186 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 187 struct hwtstamp_config *config); 188 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 189 struct hwtstamp_config *config); 190 void (*shaper_work)(struct work_struct *work); 191 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 192 struct ethtool_ts_info *info); 193 int (*get_stats_count)(void); 194 void (*get_stats_strings)(u8 **p); 195 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 196 u64 *data, int data_index); 197 }; 198 199 struct mlxsw_sp_span_ops { 200 u32 (*buffsize_get)(int mtu, u32 speed); 201 }; 202 203 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 204 u16 component_index, u32 *p_max_size, 205 u8 *p_align_bits, u16 *p_max_write_size) 206 { 207 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 208 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 210 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 211 int err; 212 213 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 214 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 215 if (err) 216 return err; 217 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 218 p_max_write_size); 219 220 *p_align_bits = max_t(u8, *p_align_bits, 2); 221 *p_max_write_size = min_t(u16, *p_max_write_size, 222 MLXSW_REG_MCDA_MAX_DATA_LEN); 223 return 0; 224 } 225 226 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 227 { 228 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 229 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 231 char mcc_pl[MLXSW_REG_MCC_LEN]; 232 u8 control_state; 233 int err; 234 235 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 236 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 if (err) 238 return err; 239 240 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 241 if (control_state != MLXFW_FSM_STATE_IDLE) 242 return -EBUSY; 243 244 mlxsw_reg_mcc_pack(mcc_pl, 245 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 246 0, *fwhandle, 0); 247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 248 } 249 250 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 251 u32 fwhandle, u16 component_index, 252 u32 component_size) 253 { 254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 257 char mcc_pl[MLXSW_REG_MCC_LEN]; 258 259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 260 component_index, fwhandle, component_size); 261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 262 } 263 264 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 265 u32 fwhandle, u8 *data, u16 size, 266 u32 offset) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcda_pl[MLXSW_REG_MCDA_LEN]; 272 273 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 275 } 276 277 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 278 u32 fwhandle, u16 component_index) 279 { 280 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 281 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 283 char mcc_pl[MLXSW_REG_MCC_LEN]; 284 285 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 286 component_index, fwhandle, 0); 287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 288 } 289 290 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 291 { 292 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 293 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 295 char mcc_pl[MLXSW_REG_MCC_LEN]; 296 297 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 298 fwhandle, 0); 299 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 300 } 301 302 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 303 enum mlxfw_fsm_state *fsm_state, 304 enum mlxfw_fsm_state_err *fsm_state_err) 305 { 306 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 307 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 309 char mcc_pl[MLXSW_REG_MCC_LEN]; 310 u8 control_state; 311 u8 error_code; 312 int err; 313 314 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 315 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 316 if (err) 317 return err; 318 319 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 320 *fsm_state = control_state; 321 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 322 MLXFW_FSM_STATE_ERR_MAX); 323 return 0; 324 } 325 326 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 327 { 328 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 329 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 330 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 331 char mcc_pl[MLXSW_REG_MCC_LEN]; 332 333 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 334 fwhandle, 0); 335 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 336 } 337 338 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 339 { 340 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 341 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 343 char mcc_pl[MLXSW_REG_MCC_LEN]; 344 345 mlxsw_reg_mcc_pack(mcc_pl, 346 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 347 fwhandle, 0); 348 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 349 } 350 351 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 352 .component_query = mlxsw_sp_component_query, 353 .fsm_lock = mlxsw_sp_fsm_lock, 354 .fsm_component_update = mlxsw_sp_fsm_component_update, 355 .fsm_block_download = mlxsw_sp_fsm_block_download, 356 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 357 .fsm_activate = mlxsw_sp_fsm_activate, 358 .fsm_query_state = mlxsw_sp_fsm_query_state, 359 .fsm_cancel = mlxsw_sp_fsm_cancel, 360 .fsm_release = mlxsw_sp_fsm_release, 361 }; 362 363 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 364 const struct firmware *firmware, 365 struct netlink_ext_ack *extack) 366 { 367 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 368 .mlxfw_dev = { 369 .ops = &mlxsw_sp_mlxfw_dev_ops, 370 .psid = mlxsw_sp->bus_info->psid, 371 .psid_size = strlen(mlxsw_sp->bus_info->psid), 372 .devlink = priv_to_devlink(mlxsw_sp->core), 373 }, 374 .mlxsw_sp = mlxsw_sp 375 }; 376 int err; 377 378 mlxsw_core_fw_flash_start(mlxsw_sp->core); 379 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 380 firmware, extack); 381 mlxsw_core_fw_flash_end(mlxsw_sp->core); 382 383 return err; 384 } 385 386 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 387 { 388 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 389 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 390 const char *fw_filename = mlxsw_sp->fw_filename; 391 union devlink_param_value value; 392 const struct firmware *firmware; 393 int err; 394 395 /* Don't check if driver does not require it */ 396 if (!req_rev || !fw_filename) 397 return 0; 398 399 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 400 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 401 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 402 &value); 403 if (err) 404 return err; 405 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 406 return 0; 407 408 /* Validate driver & FW are compatible */ 409 if (rev->major != req_rev->major) { 410 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 411 rev->major, req_rev->major); 412 return -EINVAL; 413 } 414 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 415 return 0; 416 417 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 418 rev->major, rev->minor, rev->subminor, req_rev->major, 419 req_rev->minor, req_rev->subminor); 420 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 421 fw_filename); 422 423 err = request_firmware_direct(&firmware, fw_filename, 424 mlxsw_sp->bus_info->dev); 425 if (err) { 426 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 427 fw_filename); 428 return err; 429 } 430 431 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 432 release_firmware(firmware); 433 if (err) 434 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 435 436 /* On FW flash success, tell the caller FW reset is needed 437 * if current FW supports it. 438 */ 439 if (rev->minor >= req_rev->can_reset_minor) 440 return err ? err : -EAGAIN; 441 else 442 return 0; 443 } 444 445 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 446 const char *file_name, const char *component, 447 struct netlink_ext_ack *extack) 448 { 449 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 450 const struct firmware *firmware; 451 int err; 452 453 if (component) 454 return -EOPNOTSUPP; 455 456 err = request_firmware_direct(&firmware, file_name, 457 mlxsw_sp->bus_info->dev); 458 if (err) 459 return err; 460 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 461 release_firmware(firmware); 462 463 return err; 464 } 465 466 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 467 unsigned int counter_index, u64 *packets, 468 u64 *bytes) 469 { 470 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 471 int err; 472 473 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 474 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 475 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 476 if (err) 477 return err; 478 if (packets) 479 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 480 if (bytes) 481 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 482 return 0; 483 } 484 485 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 486 unsigned int counter_index) 487 { 488 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 489 490 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 491 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 493 } 494 495 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 496 unsigned int *p_counter_index) 497 { 498 int err; 499 500 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 501 p_counter_index); 502 if (err) 503 return err; 504 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 505 if (err) 506 goto err_counter_clear; 507 return 0; 508 509 err_counter_clear: 510 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 511 *p_counter_index); 512 return err; 513 } 514 515 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 516 unsigned int counter_index) 517 { 518 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 519 counter_index); 520 } 521 522 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 523 const struct mlxsw_tx_info *tx_info) 524 { 525 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 526 527 memset(txhdr, 0, MLXSW_TXHDR_LEN); 528 529 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 530 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 531 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 532 mlxsw_tx_hdr_swid_set(txhdr, 0); 533 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 534 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 535 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 536 } 537 538 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 539 { 540 switch (state) { 541 case BR_STATE_FORWARDING: 542 return MLXSW_REG_SPMS_STATE_FORWARDING; 543 case BR_STATE_LEARNING: 544 return MLXSW_REG_SPMS_STATE_LEARNING; 545 case BR_STATE_LISTENING: /* fall-through */ 546 case BR_STATE_DISABLED: /* fall-through */ 547 case BR_STATE_BLOCKING: 548 return MLXSW_REG_SPMS_STATE_DISCARDING; 549 default: 550 BUG(); 551 } 552 } 553 554 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 555 u8 state) 556 { 557 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 559 char *spms_pl; 560 int err; 561 562 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 563 if (!spms_pl) 564 return -ENOMEM; 565 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 566 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 567 568 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 569 kfree(spms_pl); 570 return err; 571 } 572 573 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 574 { 575 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 576 int err; 577 578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 579 if (err) 580 return err; 581 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 582 return 0; 583 } 584 585 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 586 bool enable, u32 rate) 587 { 588 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 589 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 590 591 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 593 } 594 595 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 596 bool is_up) 597 { 598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 599 char paos_pl[MLXSW_REG_PAOS_LEN]; 600 601 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 602 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 603 MLXSW_PORT_ADMIN_STATUS_DOWN); 604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 605 } 606 607 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 608 unsigned char *addr) 609 { 610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 611 char ppad_pl[MLXSW_REG_PPAD_LEN]; 612 613 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 614 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 616 } 617 618 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 619 { 620 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 621 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 622 623 ether_addr_copy(addr, mlxsw_sp->base_mac); 624 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 625 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 626 } 627 628 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 629 { 630 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 631 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 632 int max_mtu; 633 int err; 634 635 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 636 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 637 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 638 if (err) 639 return err; 640 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 641 642 if (mtu > max_mtu) 643 return -EINVAL; 644 645 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 646 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 647 } 648 649 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 650 { 651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 652 char pspa_pl[MLXSW_REG_PSPA_LEN]; 653 654 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 655 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 656 } 657 658 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 659 { 660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 661 char svpe_pl[MLXSW_REG_SVPE_LEN]; 662 663 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 665 } 666 667 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 668 bool learn_enable) 669 { 670 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 671 char *spvmlr_pl; 672 int err; 673 674 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 675 if (!spvmlr_pl) 676 return -ENOMEM; 677 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 678 learn_enable); 679 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 680 kfree(spvmlr_pl); 681 return err; 682 } 683 684 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 685 u16 vid) 686 { 687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 688 char spvid_pl[MLXSW_REG_SPVID_LEN]; 689 690 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 692 } 693 694 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 695 bool allow) 696 { 697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 698 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 699 700 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 702 } 703 704 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 705 { 706 int err; 707 708 if (!vid) { 709 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 710 if (err) 711 return err; 712 } else { 713 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 714 if (err) 715 return err; 716 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 717 if (err) 718 goto err_port_allow_untagged_set; 719 } 720 721 mlxsw_sp_port->pvid = vid; 722 return 0; 723 724 err_port_allow_untagged_set: 725 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 726 return err; 727 } 728 729 static int 730 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 731 { 732 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 733 char sspr_pl[MLXSW_REG_SSPR_LEN]; 734 735 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 736 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 737 } 738 739 static int 740 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 741 struct mlxsw_sp_port_mapping *port_mapping) 742 { 743 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 744 bool separate_rxtx; 745 u8 module; 746 u8 width; 747 int err; 748 int i; 749 750 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 751 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 752 if (err) 753 return err; 754 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 755 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 756 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 757 758 if (width && !is_power_of_2(width)) { 759 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 760 local_port); 761 return -EINVAL; 762 } 763 764 for (i = 0; i < width; i++) { 765 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 767 local_port); 768 return -EINVAL; 769 } 770 if (separate_rxtx && 771 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 772 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 773 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 774 local_port); 775 return -EINVAL; 776 } 777 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 778 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 779 local_port); 780 return -EINVAL; 781 } 782 } 783 784 port_mapping->module = module; 785 port_mapping->width = width; 786 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 787 return 0; 788 } 789 790 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 791 { 792 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 794 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 795 int i; 796 797 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 798 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 799 for (i = 0; i < port_mapping->width; i++) { 800 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 801 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 802 } 803 804 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 805 } 806 807 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 808 { 809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 810 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 811 812 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 813 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 814 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 815 } 816 817 static int mlxsw_sp_port_open(struct net_device *dev) 818 { 819 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 820 int err; 821 822 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 823 if (err) 824 return err; 825 netif_start_queue(dev); 826 return 0; 827 } 828 829 static int mlxsw_sp_port_stop(struct net_device *dev) 830 { 831 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 832 833 netif_stop_queue(dev); 834 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 835 } 836 837 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 838 struct net_device *dev) 839 { 840 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 842 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 843 const struct mlxsw_tx_info tx_info = { 844 .local_port = mlxsw_sp_port->local_port, 845 .is_emad = false, 846 }; 847 u64 len; 848 int err; 849 850 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 851 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 852 dev_kfree_skb_any(skb); 853 return NETDEV_TX_OK; 854 } 855 856 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 857 858 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 859 return NETDEV_TX_BUSY; 860 861 if (eth_skb_pad(skb)) { 862 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 863 return NETDEV_TX_OK; 864 } 865 866 mlxsw_sp_txhdr_construct(skb, &tx_info); 867 /* TX header is consumed by HW on the way so we shouldn't count its 868 * bytes as being sent. 869 */ 870 len = skb->len - MLXSW_TXHDR_LEN; 871 872 /* Due to a race we might fail here because of a full queue. In that 873 * unlikely case we simply drop the packet. 874 */ 875 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 876 877 if (!err) { 878 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 879 u64_stats_update_begin(&pcpu_stats->syncp); 880 pcpu_stats->tx_packets++; 881 pcpu_stats->tx_bytes += len; 882 u64_stats_update_end(&pcpu_stats->syncp); 883 } else { 884 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 885 dev_kfree_skb_any(skb); 886 } 887 return NETDEV_TX_OK; 888 } 889 890 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 891 { 892 } 893 894 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 895 { 896 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 897 struct sockaddr *addr = p; 898 int err; 899 900 if (!is_valid_ether_addr(addr->sa_data)) 901 return -EADDRNOTAVAIL; 902 903 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 904 if (err) 905 return err; 906 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 907 return 0; 908 } 909 910 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 911 int mtu) 912 { 913 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 914 } 915 916 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 917 918 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 919 u16 delay) 920 { 921 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 922 BITS_PER_BYTE)); 923 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 924 mtu); 925 } 926 927 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 928 * Assumes 100m cable and maximum MTU. 929 */ 930 #define MLXSW_SP_PAUSE_DELAY 58752 931 932 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 933 u16 delay, bool pfc, bool pause) 934 { 935 if (pfc) 936 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 937 else if (pause) 938 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 939 else 940 return 0; 941 } 942 943 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 944 bool lossy) 945 { 946 if (lossy) 947 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 948 else 949 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 950 thres); 951 } 952 953 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 954 u8 *prio_tc, bool pause_en, 955 struct ieee_pfc *my_pfc) 956 { 957 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 958 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 959 u16 delay = !!my_pfc ? my_pfc->delay : 0; 960 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 961 u32 taken_headroom_cells = 0; 962 u32 max_headroom_cells; 963 int i, j, err; 964 965 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 966 967 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 968 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 969 if (err) 970 return err; 971 972 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 973 bool configure = false; 974 bool pfc = false; 975 u16 thres_cells; 976 u16 delay_cells; 977 u16 total_cells; 978 bool lossy; 979 980 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 981 if (prio_tc[j] == i) { 982 pfc = pfc_en & BIT(j); 983 configure = true; 984 break; 985 } 986 } 987 988 if (!configure) 989 continue; 990 991 lossy = !(pfc || pause_en); 992 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 993 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 994 pfc, pause_en); 995 total_cells = thres_cells + delay_cells; 996 997 taken_headroom_cells += total_cells; 998 if (taken_headroom_cells > max_headroom_cells) 999 return -ENOBUFS; 1000 1001 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1002 thres_cells, lossy); 1003 } 1004 1005 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1006 } 1007 1008 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1009 int mtu, bool pause_en) 1010 { 1011 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1012 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1013 struct ieee_pfc *my_pfc; 1014 u8 *prio_tc; 1015 1016 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1017 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1018 1019 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1020 pause_en, my_pfc); 1021 } 1022 1023 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1024 { 1025 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1026 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1027 int err; 1028 1029 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1030 if (err) 1031 return err; 1032 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1033 if (err) 1034 goto err_span_port_mtu_update; 1035 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1036 if (err) 1037 goto err_port_mtu_set; 1038 dev->mtu = mtu; 1039 return 0; 1040 1041 err_port_mtu_set: 1042 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1043 err_span_port_mtu_update: 1044 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1045 return err; 1046 } 1047 1048 static int 1049 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1050 struct rtnl_link_stats64 *stats) 1051 { 1052 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1053 struct mlxsw_sp_port_pcpu_stats *p; 1054 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1055 u32 tx_dropped = 0; 1056 unsigned int start; 1057 int i; 1058 1059 for_each_possible_cpu(i) { 1060 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1061 do { 1062 start = u64_stats_fetch_begin_irq(&p->syncp); 1063 rx_packets = p->rx_packets; 1064 rx_bytes = p->rx_bytes; 1065 tx_packets = p->tx_packets; 1066 tx_bytes = p->tx_bytes; 1067 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1068 1069 stats->rx_packets += rx_packets; 1070 stats->rx_bytes += rx_bytes; 1071 stats->tx_packets += tx_packets; 1072 stats->tx_bytes += tx_bytes; 1073 /* tx_dropped is u32, updated without syncp protection. */ 1074 tx_dropped += p->tx_dropped; 1075 } 1076 stats->tx_dropped = tx_dropped; 1077 return 0; 1078 } 1079 1080 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1081 { 1082 switch (attr_id) { 1083 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1084 return true; 1085 } 1086 1087 return false; 1088 } 1089 1090 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1091 void *sp) 1092 { 1093 switch (attr_id) { 1094 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1095 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1096 } 1097 1098 return -EINVAL; 1099 } 1100 1101 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1102 int prio, char *ppcnt_pl) 1103 { 1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1105 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1106 1107 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1108 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1109 } 1110 1111 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1112 struct rtnl_link_stats64 *stats) 1113 { 1114 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1115 int err; 1116 1117 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1118 0, ppcnt_pl); 1119 if (err) 1120 goto out; 1121 1122 stats->tx_packets = 1123 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1124 stats->rx_packets = 1125 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1126 stats->tx_bytes = 1127 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1128 stats->rx_bytes = 1129 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1130 stats->multicast = 1131 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1132 1133 stats->rx_crc_errors = 1134 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1135 stats->rx_frame_errors = 1136 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1137 1138 stats->rx_length_errors = ( 1139 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1140 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1141 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1142 1143 stats->rx_errors = (stats->rx_crc_errors + 1144 stats->rx_frame_errors + stats->rx_length_errors); 1145 1146 out: 1147 return err; 1148 } 1149 1150 static void 1151 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1152 struct mlxsw_sp_port_xstats *xstats) 1153 { 1154 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1155 int err, i; 1156 1157 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1158 ppcnt_pl); 1159 if (!err) 1160 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1161 1162 for (i = 0; i < TC_MAX_QUEUE; i++) { 1163 err = mlxsw_sp_port_get_stats_raw(dev, 1164 MLXSW_REG_PPCNT_TC_CONG_TC, 1165 i, ppcnt_pl); 1166 if (!err) 1167 xstats->wred_drop[i] = 1168 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1169 1170 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1171 i, ppcnt_pl); 1172 if (err) 1173 continue; 1174 1175 xstats->backlog[i] = 1176 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1177 xstats->tail_drop[i] = 1178 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1179 } 1180 1181 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1182 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1183 i, ppcnt_pl); 1184 if (err) 1185 continue; 1186 1187 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1188 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1189 } 1190 } 1191 1192 static void update_stats_cache(struct work_struct *work) 1193 { 1194 struct mlxsw_sp_port *mlxsw_sp_port = 1195 container_of(work, struct mlxsw_sp_port, 1196 periodic_hw_stats.update_dw.work); 1197 1198 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1199 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1200 * necessary when port goes down. 1201 */ 1202 goto out; 1203 1204 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1205 &mlxsw_sp_port->periodic_hw_stats.stats); 1206 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1207 &mlxsw_sp_port->periodic_hw_stats.xstats); 1208 1209 out: 1210 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1211 MLXSW_HW_STATS_UPDATE_TIME); 1212 } 1213 1214 /* Return the stats from a cache that is updated periodically, 1215 * as this function might get called in an atomic context. 1216 */ 1217 static void 1218 mlxsw_sp_port_get_stats64(struct net_device *dev, 1219 struct rtnl_link_stats64 *stats) 1220 { 1221 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1222 1223 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1224 } 1225 1226 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1227 u16 vid_begin, u16 vid_end, 1228 bool is_member, bool untagged) 1229 { 1230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1231 char *spvm_pl; 1232 int err; 1233 1234 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1235 if (!spvm_pl) 1236 return -ENOMEM; 1237 1238 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1239 vid_end, is_member, untagged); 1240 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1241 kfree(spvm_pl); 1242 return err; 1243 } 1244 1245 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1246 u16 vid_end, bool is_member, bool untagged) 1247 { 1248 u16 vid, vid_e; 1249 int err; 1250 1251 for (vid = vid_begin; vid <= vid_end; 1252 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1253 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1254 vid_end); 1255 1256 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1257 is_member, untagged); 1258 if (err) 1259 return err; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1266 bool flush_default) 1267 { 1268 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1269 1270 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1271 &mlxsw_sp_port->vlans_list, list) { 1272 if (!flush_default && 1273 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1274 continue; 1275 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1276 } 1277 } 1278 1279 static void 1280 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1281 { 1282 if (mlxsw_sp_port_vlan->bridge_port) 1283 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1284 else if (mlxsw_sp_port_vlan->fid) 1285 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1286 } 1287 1288 struct mlxsw_sp_port_vlan * 1289 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1290 { 1291 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1292 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1293 int err; 1294 1295 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1296 if (mlxsw_sp_port_vlan) 1297 return ERR_PTR(-EEXIST); 1298 1299 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1300 if (err) 1301 return ERR_PTR(err); 1302 1303 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1304 if (!mlxsw_sp_port_vlan) { 1305 err = -ENOMEM; 1306 goto err_port_vlan_alloc; 1307 } 1308 1309 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1310 mlxsw_sp_port_vlan->vid = vid; 1311 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1312 1313 return mlxsw_sp_port_vlan; 1314 1315 err_port_vlan_alloc: 1316 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1317 return ERR_PTR(err); 1318 } 1319 1320 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1321 { 1322 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1323 u16 vid = mlxsw_sp_port_vlan->vid; 1324 1325 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1326 list_del(&mlxsw_sp_port_vlan->list); 1327 kfree(mlxsw_sp_port_vlan); 1328 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1329 } 1330 1331 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1332 __be16 __always_unused proto, u16 vid) 1333 { 1334 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1335 1336 /* VLAN 0 is added to HW filter when device goes up, but it is 1337 * reserved in our case, so simply return. 1338 */ 1339 if (!vid) 1340 return 0; 1341 1342 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1343 } 1344 1345 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1346 __be16 __always_unused proto, u16 vid) 1347 { 1348 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1349 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1350 1351 /* VLAN 0 is removed from HW filter when device goes down, but 1352 * it is reserved in our case, so simply return. 1353 */ 1354 if (!vid) 1355 return 0; 1356 1357 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1358 if (!mlxsw_sp_port_vlan) 1359 return 0; 1360 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1361 1362 return 0; 1363 } 1364 1365 static struct mlxsw_sp_port_mall_tc_entry * 1366 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1367 unsigned long cookie) { 1368 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1369 1370 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1371 if (mall_tc_entry->cookie == cookie) 1372 return mall_tc_entry; 1373 1374 return NULL; 1375 } 1376 1377 static int 1378 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1379 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1380 const struct flow_action_entry *act, 1381 bool ingress) 1382 { 1383 enum mlxsw_sp_span_type span_type; 1384 1385 if (!act->dev) { 1386 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1387 return -EINVAL; 1388 } 1389 1390 mirror->ingress = ingress; 1391 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1392 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1393 true, &mirror->span_id); 1394 } 1395 1396 static void 1397 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1398 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1399 { 1400 enum mlxsw_sp_span_type span_type; 1401 1402 span_type = mirror->ingress ? 1403 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1404 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1405 span_type, true); 1406 } 1407 1408 static int 1409 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1410 struct tc_cls_matchall_offload *cls, 1411 const struct flow_action_entry *act, 1412 bool ingress) 1413 { 1414 int err; 1415 1416 if (!mlxsw_sp_port->sample) 1417 return -EOPNOTSUPP; 1418 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1419 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1420 return -EEXIST; 1421 } 1422 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1423 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1424 return -EOPNOTSUPP; 1425 } 1426 1427 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1428 act->sample.psample_group); 1429 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1430 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1431 mlxsw_sp_port->sample->rate = act->sample.rate; 1432 1433 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1434 if (err) 1435 goto err_port_sample_set; 1436 return 0; 1437 1438 err_port_sample_set: 1439 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1440 return err; 1441 } 1442 1443 static void 1444 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1445 { 1446 if (!mlxsw_sp_port->sample) 1447 return; 1448 1449 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1450 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1451 } 1452 1453 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1454 struct tc_cls_matchall_offload *f, 1455 bool ingress) 1456 { 1457 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1458 __be16 protocol = f->common.protocol; 1459 struct flow_action_entry *act; 1460 int err; 1461 1462 if (!flow_offload_has_one_action(&f->rule->action)) { 1463 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1464 return -EOPNOTSUPP; 1465 } 1466 1467 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1468 if (!mall_tc_entry) 1469 return -ENOMEM; 1470 mall_tc_entry->cookie = f->cookie; 1471 1472 act = &f->rule->action.entries[0]; 1473 1474 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1475 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1476 1477 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1478 mirror = &mall_tc_entry->mirror; 1479 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1480 mirror, act, 1481 ingress); 1482 } else if (act->id == FLOW_ACTION_SAMPLE && 1483 protocol == htons(ETH_P_ALL)) { 1484 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1485 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1486 act, ingress); 1487 } else { 1488 err = -EOPNOTSUPP; 1489 } 1490 1491 if (err) 1492 goto err_add_action; 1493 1494 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1495 return 0; 1496 1497 err_add_action: 1498 kfree(mall_tc_entry); 1499 return err; 1500 } 1501 1502 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1503 struct tc_cls_matchall_offload *f) 1504 { 1505 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1506 1507 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1508 f->cookie); 1509 if (!mall_tc_entry) { 1510 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1511 return; 1512 } 1513 list_del(&mall_tc_entry->list); 1514 1515 switch (mall_tc_entry->type) { 1516 case MLXSW_SP_PORT_MALL_MIRROR: 1517 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1518 &mall_tc_entry->mirror); 1519 break; 1520 case MLXSW_SP_PORT_MALL_SAMPLE: 1521 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1522 break; 1523 default: 1524 WARN_ON(1); 1525 } 1526 1527 kfree(mall_tc_entry); 1528 } 1529 1530 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1531 struct tc_cls_matchall_offload *f, 1532 bool ingress) 1533 { 1534 switch (f->command) { 1535 case TC_CLSMATCHALL_REPLACE: 1536 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1537 ingress); 1538 case TC_CLSMATCHALL_DESTROY: 1539 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1540 return 0; 1541 default: 1542 return -EOPNOTSUPP; 1543 } 1544 } 1545 1546 static int 1547 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1548 struct flow_cls_offload *f) 1549 { 1550 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1551 1552 switch (f->command) { 1553 case FLOW_CLS_REPLACE: 1554 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1555 case FLOW_CLS_DESTROY: 1556 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1557 return 0; 1558 case FLOW_CLS_STATS: 1559 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1560 case FLOW_CLS_TMPLT_CREATE: 1561 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1562 case FLOW_CLS_TMPLT_DESTROY: 1563 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1564 return 0; 1565 default: 1566 return -EOPNOTSUPP; 1567 } 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1571 void *type_data, 1572 void *cb_priv, bool ingress) 1573 { 1574 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1575 1576 switch (type) { 1577 case TC_SETUP_CLSMATCHALL: 1578 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1579 type_data)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1583 ingress); 1584 case TC_SETUP_CLSFLOWER: 1585 return 0; 1586 default: 1587 return -EOPNOTSUPP; 1588 } 1589 } 1590 1591 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1592 void *type_data, 1593 void *cb_priv) 1594 { 1595 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1596 cb_priv, true); 1597 } 1598 1599 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1600 void *type_data, 1601 void *cb_priv) 1602 { 1603 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1604 cb_priv, false); 1605 } 1606 1607 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1608 void *type_data, void *cb_priv) 1609 { 1610 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1611 1612 switch (type) { 1613 case TC_SETUP_CLSMATCHALL: 1614 return 0; 1615 case TC_SETUP_CLSFLOWER: 1616 if (mlxsw_sp_acl_block_disabled(acl_block)) 1617 return -EOPNOTSUPP; 1618 1619 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1620 default: 1621 return -EOPNOTSUPP; 1622 } 1623 } 1624 1625 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1626 { 1627 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1628 1629 mlxsw_sp_acl_block_destroy(acl_block); 1630 } 1631 1632 static LIST_HEAD(mlxsw_sp_block_cb_list); 1633 1634 static int 1635 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1636 struct flow_block_offload *f, bool ingress) 1637 { 1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1639 struct mlxsw_sp_acl_block *acl_block; 1640 struct flow_block_cb *block_cb; 1641 bool register_block = false; 1642 int err; 1643 1644 block_cb = flow_block_cb_lookup(f->block, 1645 mlxsw_sp_setup_tc_block_cb_flower, 1646 mlxsw_sp); 1647 if (!block_cb) { 1648 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1649 if (!acl_block) 1650 return -ENOMEM; 1651 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1652 mlxsw_sp, acl_block, 1653 mlxsw_sp_tc_block_flower_release); 1654 if (IS_ERR(block_cb)) { 1655 mlxsw_sp_acl_block_destroy(acl_block); 1656 err = PTR_ERR(block_cb); 1657 goto err_cb_register; 1658 } 1659 register_block = true; 1660 } else { 1661 acl_block = flow_block_cb_priv(block_cb); 1662 } 1663 flow_block_cb_incref(block_cb); 1664 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1665 mlxsw_sp_port, ingress, f->extack); 1666 if (err) 1667 goto err_block_bind; 1668 1669 if (ingress) 1670 mlxsw_sp_port->ing_acl_block = acl_block; 1671 else 1672 mlxsw_sp_port->eg_acl_block = acl_block; 1673 1674 if (register_block) { 1675 flow_block_cb_add(block_cb, f); 1676 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1677 } 1678 1679 return 0; 1680 1681 err_block_bind: 1682 if (!flow_block_cb_decref(block_cb)) 1683 flow_block_cb_free(block_cb); 1684 err_cb_register: 1685 return err; 1686 } 1687 1688 static void 1689 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1690 struct flow_block_offload *f, bool ingress) 1691 { 1692 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1693 struct mlxsw_sp_acl_block *acl_block; 1694 struct flow_block_cb *block_cb; 1695 int err; 1696 1697 block_cb = flow_block_cb_lookup(f->block, 1698 mlxsw_sp_setup_tc_block_cb_flower, 1699 mlxsw_sp); 1700 if (!block_cb) 1701 return; 1702 1703 if (ingress) 1704 mlxsw_sp_port->ing_acl_block = NULL; 1705 else 1706 mlxsw_sp_port->eg_acl_block = NULL; 1707 1708 acl_block = flow_block_cb_priv(block_cb); 1709 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1710 mlxsw_sp_port, ingress); 1711 if (!err && !flow_block_cb_decref(block_cb)) { 1712 flow_block_cb_remove(block_cb, f); 1713 list_del(&block_cb->driver_list); 1714 } 1715 } 1716 1717 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1718 struct flow_block_offload *f) 1719 { 1720 struct flow_block_cb *block_cb; 1721 flow_setup_cb_t *cb; 1722 bool ingress; 1723 int err; 1724 1725 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1726 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1727 ingress = true; 1728 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1729 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1730 ingress = false; 1731 } else { 1732 return -EOPNOTSUPP; 1733 } 1734 1735 f->driver_block_list = &mlxsw_sp_block_cb_list; 1736 1737 switch (f->command) { 1738 case FLOW_BLOCK_BIND: 1739 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1740 &mlxsw_sp_block_cb_list)) 1741 return -EBUSY; 1742 1743 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1744 mlxsw_sp_port, NULL); 1745 if (IS_ERR(block_cb)) 1746 return PTR_ERR(block_cb); 1747 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1748 ingress); 1749 if (err) { 1750 flow_block_cb_free(block_cb); 1751 return err; 1752 } 1753 flow_block_cb_add(block_cb, f); 1754 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1755 return 0; 1756 case FLOW_BLOCK_UNBIND: 1757 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1758 f, ingress); 1759 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1760 if (!block_cb) 1761 return -ENOENT; 1762 1763 flow_block_cb_remove(block_cb, f); 1764 list_del(&block_cb->driver_list); 1765 return 0; 1766 default: 1767 return -EOPNOTSUPP; 1768 } 1769 } 1770 1771 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1772 void *type_data) 1773 { 1774 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1775 1776 switch (type) { 1777 case TC_SETUP_BLOCK: 1778 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1779 case TC_SETUP_QDISC_RED: 1780 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1781 case TC_SETUP_QDISC_PRIO: 1782 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1783 case TC_SETUP_QDISC_ETS: 1784 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1785 case TC_SETUP_QDISC_TBF: 1786 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1787 case TC_SETUP_QDISC_FIFO: 1788 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1789 default: 1790 return -EOPNOTSUPP; 1791 } 1792 } 1793 1794 1795 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1796 { 1797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1798 1799 if (!enable) { 1800 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1801 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1802 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1803 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1804 return -EINVAL; 1805 } 1806 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1807 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1808 } else { 1809 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1810 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1811 } 1812 return 0; 1813 } 1814 1815 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1816 { 1817 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1818 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1819 int err; 1820 1821 if (netif_running(dev)) 1822 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1823 1824 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1825 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1826 pplr_pl); 1827 1828 if (netif_running(dev)) 1829 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1830 1831 return err; 1832 } 1833 1834 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1835 1836 static int mlxsw_sp_handle_feature(struct net_device *dev, 1837 netdev_features_t wanted_features, 1838 netdev_features_t feature, 1839 mlxsw_sp_feature_handler feature_handler) 1840 { 1841 netdev_features_t changes = wanted_features ^ dev->features; 1842 bool enable = !!(wanted_features & feature); 1843 int err; 1844 1845 if (!(changes & feature)) 1846 return 0; 1847 1848 err = feature_handler(dev, enable); 1849 if (err) { 1850 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1851 enable ? "Enable" : "Disable", &feature, err); 1852 return err; 1853 } 1854 1855 if (enable) 1856 dev->features |= feature; 1857 else 1858 dev->features &= ~feature; 1859 1860 return 0; 1861 } 1862 static int mlxsw_sp_set_features(struct net_device *dev, 1863 netdev_features_t features) 1864 { 1865 netdev_features_t oper_features = dev->features; 1866 int err = 0; 1867 1868 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1869 mlxsw_sp_feature_hw_tc); 1870 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1871 mlxsw_sp_feature_loopback); 1872 1873 if (err) { 1874 dev->features = oper_features; 1875 return -EINVAL; 1876 } 1877 1878 return 0; 1879 } 1880 1881 static struct devlink_port * 1882 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1883 { 1884 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1886 1887 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1888 mlxsw_sp_port->local_port); 1889 } 1890 1891 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1892 struct ifreq *ifr) 1893 { 1894 struct hwtstamp_config config; 1895 int err; 1896 1897 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1898 return -EFAULT; 1899 1900 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1901 &config); 1902 if (err) 1903 return err; 1904 1905 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1906 return -EFAULT; 1907 1908 return 0; 1909 } 1910 1911 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1912 struct ifreq *ifr) 1913 { 1914 struct hwtstamp_config config; 1915 int err; 1916 1917 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1918 &config); 1919 if (err) 1920 return err; 1921 1922 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1923 return -EFAULT; 1924 1925 return 0; 1926 } 1927 1928 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1929 { 1930 struct hwtstamp_config config = {0}; 1931 1932 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1933 } 1934 1935 static int 1936 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1937 { 1938 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1939 1940 switch (cmd) { 1941 case SIOCSHWTSTAMP: 1942 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1943 case SIOCGHWTSTAMP: 1944 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1945 default: 1946 return -EOPNOTSUPP; 1947 } 1948 } 1949 1950 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1951 .ndo_open = mlxsw_sp_port_open, 1952 .ndo_stop = mlxsw_sp_port_stop, 1953 .ndo_start_xmit = mlxsw_sp_port_xmit, 1954 .ndo_setup_tc = mlxsw_sp_setup_tc, 1955 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1956 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1957 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1958 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1959 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1960 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1961 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1962 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1963 .ndo_set_features = mlxsw_sp_set_features, 1964 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1965 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1966 }; 1967 1968 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1969 struct ethtool_drvinfo *drvinfo) 1970 { 1971 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1972 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1973 1974 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1975 sizeof(drvinfo->driver)); 1976 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1977 sizeof(drvinfo->version)); 1978 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1979 "%d.%d.%d", 1980 mlxsw_sp->bus_info->fw_rev.major, 1981 mlxsw_sp->bus_info->fw_rev.minor, 1982 mlxsw_sp->bus_info->fw_rev.subminor); 1983 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1984 sizeof(drvinfo->bus_info)); 1985 } 1986 1987 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1988 struct ethtool_pauseparam *pause) 1989 { 1990 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1991 1992 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1993 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1994 } 1995 1996 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1997 struct ethtool_pauseparam *pause) 1998 { 1999 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2000 2001 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2002 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2003 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2004 2005 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2006 pfcc_pl); 2007 } 2008 2009 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2010 struct ethtool_pauseparam *pause) 2011 { 2012 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2013 bool pause_en = pause->tx_pause || pause->rx_pause; 2014 int err; 2015 2016 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2017 netdev_err(dev, "PFC already enabled on port\n"); 2018 return -EINVAL; 2019 } 2020 2021 if (pause->autoneg) { 2022 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2023 return -EINVAL; 2024 } 2025 2026 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2027 if (err) { 2028 netdev_err(dev, "Failed to configure port's headroom\n"); 2029 return err; 2030 } 2031 2032 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2033 if (err) { 2034 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2035 goto err_port_pause_configure; 2036 } 2037 2038 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2039 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2040 2041 return 0; 2042 2043 err_port_pause_configure: 2044 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2045 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2046 return err; 2047 } 2048 2049 struct mlxsw_sp_port_hw_stats { 2050 char str[ETH_GSTRING_LEN]; 2051 u64 (*getter)(const char *payload); 2052 bool cells_bytes; 2053 }; 2054 2055 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2056 { 2057 .str = "a_frames_transmitted_ok", 2058 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2059 }, 2060 { 2061 .str = "a_frames_received_ok", 2062 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2063 }, 2064 { 2065 .str = "a_frame_check_sequence_errors", 2066 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2067 }, 2068 { 2069 .str = "a_alignment_errors", 2070 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2071 }, 2072 { 2073 .str = "a_octets_transmitted_ok", 2074 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2075 }, 2076 { 2077 .str = "a_octets_received_ok", 2078 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2079 }, 2080 { 2081 .str = "a_multicast_frames_xmitted_ok", 2082 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2083 }, 2084 { 2085 .str = "a_broadcast_frames_xmitted_ok", 2086 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2087 }, 2088 { 2089 .str = "a_multicast_frames_received_ok", 2090 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2091 }, 2092 { 2093 .str = "a_broadcast_frames_received_ok", 2094 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2095 }, 2096 { 2097 .str = "a_in_range_length_errors", 2098 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2099 }, 2100 { 2101 .str = "a_out_of_range_length_field", 2102 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2103 }, 2104 { 2105 .str = "a_frame_too_long_errors", 2106 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2107 }, 2108 { 2109 .str = "a_symbol_error_during_carrier", 2110 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2111 }, 2112 { 2113 .str = "a_mac_control_frames_transmitted", 2114 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2115 }, 2116 { 2117 .str = "a_mac_control_frames_received", 2118 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2119 }, 2120 { 2121 .str = "a_unsupported_opcodes_received", 2122 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2123 }, 2124 { 2125 .str = "a_pause_mac_ctrl_frames_received", 2126 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2127 }, 2128 { 2129 .str = "a_pause_mac_ctrl_frames_xmitted", 2130 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2131 }, 2132 }; 2133 2134 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2135 2136 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2137 { 2138 .str = "if_in_discards", 2139 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2140 }, 2141 { 2142 .str = "if_out_discards", 2143 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2144 }, 2145 { 2146 .str = "if_out_errors", 2147 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2148 }, 2149 }; 2150 2151 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2152 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2153 2154 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2155 { 2156 .str = "ether_stats_undersize_pkts", 2157 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2158 }, 2159 { 2160 .str = "ether_stats_oversize_pkts", 2161 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2162 }, 2163 { 2164 .str = "ether_stats_fragments", 2165 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2166 }, 2167 { 2168 .str = "ether_pkts64octets", 2169 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2170 }, 2171 { 2172 .str = "ether_pkts65to127octets", 2173 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2174 }, 2175 { 2176 .str = "ether_pkts128to255octets", 2177 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2178 }, 2179 { 2180 .str = "ether_pkts256to511octets", 2181 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2182 }, 2183 { 2184 .str = "ether_pkts512to1023octets", 2185 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2186 }, 2187 { 2188 .str = "ether_pkts1024to1518octets", 2189 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2190 }, 2191 { 2192 .str = "ether_pkts1519to2047octets", 2193 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2194 }, 2195 { 2196 .str = "ether_pkts2048to4095octets", 2197 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2198 }, 2199 { 2200 .str = "ether_pkts4096to8191octets", 2201 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2202 }, 2203 { 2204 .str = "ether_pkts8192to10239octets", 2205 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2206 }, 2207 }; 2208 2209 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2210 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2211 2212 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2213 { 2214 .str = "dot3stats_fcs_errors", 2215 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2216 }, 2217 { 2218 .str = "dot3stats_symbol_errors", 2219 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2220 }, 2221 { 2222 .str = "dot3control_in_unknown_opcodes", 2223 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2224 }, 2225 { 2226 .str = "dot3in_pause_frames", 2227 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2228 }, 2229 }; 2230 2231 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2232 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2233 2234 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { 2235 { 2236 .str = "ecn_marked", 2237 .getter = mlxsw_reg_ppcnt_ecn_marked_get, 2238 }, 2239 }; 2240 2241 #define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) 2242 2243 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2244 { 2245 .str = "discard_ingress_general", 2246 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2247 }, 2248 { 2249 .str = "discard_ingress_policy_engine", 2250 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2251 }, 2252 { 2253 .str = "discard_ingress_vlan_membership", 2254 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2255 }, 2256 { 2257 .str = "discard_ingress_tag_frame_type", 2258 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2259 }, 2260 { 2261 .str = "discard_egress_vlan_membership", 2262 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2263 }, 2264 { 2265 .str = "discard_loopback_filter", 2266 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2267 }, 2268 { 2269 .str = "discard_egress_general", 2270 .getter = mlxsw_reg_ppcnt_egress_general_get, 2271 }, 2272 { 2273 .str = "discard_egress_hoq", 2274 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2275 }, 2276 { 2277 .str = "discard_egress_policy_engine", 2278 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2279 }, 2280 { 2281 .str = "discard_ingress_tx_link_down", 2282 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2283 }, 2284 { 2285 .str = "discard_egress_stp_filter", 2286 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2287 }, 2288 { 2289 .str = "discard_egress_sll", 2290 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2291 }, 2292 }; 2293 2294 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2295 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2296 2297 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2298 { 2299 .str = "rx_octets_prio", 2300 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2301 }, 2302 { 2303 .str = "rx_frames_prio", 2304 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2305 }, 2306 { 2307 .str = "tx_octets_prio", 2308 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2309 }, 2310 { 2311 .str = "tx_frames_prio", 2312 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2313 }, 2314 { 2315 .str = "rx_pause_prio", 2316 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2317 }, 2318 { 2319 .str = "rx_pause_duration_prio", 2320 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2321 }, 2322 { 2323 .str = "tx_pause_prio", 2324 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2325 }, 2326 { 2327 .str = "tx_pause_duration_prio", 2328 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2329 }, 2330 }; 2331 2332 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2333 2334 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2335 { 2336 .str = "tc_transmit_queue_tc", 2337 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2338 .cells_bytes = true, 2339 }, 2340 { 2341 .str = "tc_no_buffer_discard_uc_tc", 2342 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2343 }, 2344 }; 2345 2346 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2347 2348 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2349 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2351 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2354 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2355 IEEE_8021QAZ_MAX_TCS) + \ 2356 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2357 TC_MAX_QUEUE)) 2358 2359 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2360 { 2361 int i; 2362 2363 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2364 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2365 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2366 *p += ETH_GSTRING_LEN; 2367 } 2368 } 2369 2370 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2371 { 2372 int i; 2373 2374 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2375 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2376 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2377 *p += ETH_GSTRING_LEN; 2378 } 2379 } 2380 2381 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2382 u32 stringset, u8 *data) 2383 { 2384 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2385 u8 *p = data; 2386 int i; 2387 2388 switch (stringset) { 2389 case ETH_SS_STATS: 2390 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2391 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2392 ETH_GSTRING_LEN); 2393 p += ETH_GSTRING_LEN; 2394 } 2395 2396 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2397 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2398 ETH_GSTRING_LEN); 2399 p += ETH_GSTRING_LEN; 2400 } 2401 2402 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2403 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2404 ETH_GSTRING_LEN); 2405 p += ETH_GSTRING_LEN; 2406 } 2407 2408 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2409 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2410 ETH_GSTRING_LEN); 2411 p += ETH_GSTRING_LEN; 2412 } 2413 2414 for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { 2415 memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, 2416 ETH_GSTRING_LEN); 2417 p += ETH_GSTRING_LEN; 2418 } 2419 2420 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2421 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2422 ETH_GSTRING_LEN); 2423 p += ETH_GSTRING_LEN; 2424 } 2425 2426 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2427 mlxsw_sp_port_get_prio_strings(&p, i); 2428 2429 for (i = 0; i < TC_MAX_QUEUE; i++) 2430 mlxsw_sp_port_get_tc_strings(&p, i); 2431 2432 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2433 break; 2434 } 2435 } 2436 2437 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2438 enum ethtool_phys_id_state state) 2439 { 2440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2442 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2443 bool active; 2444 2445 switch (state) { 2446 case ETHTOOL_ID_ACTIVE: 2447 active = true; 2448 break; 2449 case ETHTOOL_ID_INACTIVE: 2450 active = false; 2451 break; 2452 default: 2453 return -EOPNOTSUPP; 2454 } 2455 2456 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2457 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2458 } 2459 2460 static int 2461 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2462 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2463 { 2464 switch (grp) { 2465 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2466 *p_hw_stats = mlxsw_sp_port_hw_stats; 2467 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2468 break; 2469 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2470 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2471 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2472 break; 2473 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2474 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2475 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2476 break; 2477 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2478 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2479 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2480 break; 2481 case MLXSW_REG_PPCNT_EXT_CNT: 2482 *p_hw_stats = mlxsw_sp_port_hw_ext_stats; 2483 *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2484 break; 2485 case MLXSW_REG_PPCNT_DISCARD_CNT: 2486 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2487 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2488 break; 2489 case MLXSW_REG_PPCNT_PRIO_CNT: 2490 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2491 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2492 break; 2493 case MLXSW_REG_PPCNT_TC_CNT: 2494 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2495 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2496 break; 2497 default: 2498 WARN_ON(1); 2499 return -EOPNOTSUPP; 2500 } 2501 return 0; 2502 } 2503 2504 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2505 enum mlxsw_reg_ppcnt_grp grp, int prio, 2506 u64 *data, int data_index) 2507 { 2508 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2509 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2510 struct mlxsw_sp_port_hw_stats *hw_stats; 2511 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2512 int i, len; 2513 int err; 2514 2515 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2516 if (err) 2517 return; 2518 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2519 for (i = 0; i < len; i++) { 2520 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2521 if (!hw_stats[i].cells_bytes) 2522 continue; 2523 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2524 data[data_index + i]); 2525 } 2526 } 2527 2528 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2529 struct ethtool_stats *stats, u64 *data) 2530 { 2531 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2532 int i, data_index = 0; 2533 2534 /* IEEE 802.3 Counters */ 2535 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2536 data, data_index); 2537 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2538 2539 /* RFC 2863 Counters */ 2540 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2541 data, data_index); 2542 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2543 2544 /* RFC 2819 Counters */ 2545 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2546 data, data_index); 2547 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2548 2549 /* RFC 3635 Counters */ 2550 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2551 data, data_index); 2552 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2553 2554 /* Extended Counters */ 2555 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 2556 data, data_index); 2557 data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2558 2559 /* Discard Counters */ 2560 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2561 data, data_index); 2562 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2563 2564 /* Per-Priority Counters */ 2565 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2566 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2567 data, data_index); 2568 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2569 } 2570 2571 /* Per-TC Counters */ 2572 for (i = 0; i < TC_MAX_QUEUE; i++) { 2573 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2574 data, data_index); 2575 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2576 } 2577 2578 /* PTP counters */ 2579 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2580 data, data_index); 2581 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2582 } 2583 2584 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2585 { 2586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2587 2588 switch (sset) { 2589 case ETH_SS_STATS: 2590 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2591 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2592 default: 2593 return -EOPNOTSUPP; 2594 } 2595 } 2596 2597 struct mlxsw_sp1_port_link_mode { 2598 enum ethtool_link_mode_bit_indices mask_ethtool; 2599 u32 mask; 2600 u32 speed; 2601 }; 2602 2603 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2604 { 2605 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2606 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2607 .speed = SPEED_100, 2608 }, 2609 { 2610 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2611 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2612 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2613 .speed = SPEED_1000, 2614 }, 2615 { 2616 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2617 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2618 .speed = SPEED_10000, 2619 }, 2620 { 2621 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2622 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2624 .speed = SPEED_10000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2628 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2629 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2630 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2631 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2632 .speed = SPEED_10000, 2633 }, 2634 { 2635 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2636 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2637 .speed = SPEED_20000, 2638 }, 2639 { 2640 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2641 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2642 .speed = SPEED_40000, 2643 }, 2644 { 2645 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2646 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2647 .speed = SPEED_40000, 2648 }, 2649 { 2650 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2651 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2652 .speed = SPEED_40000, 2653 }, 2654 { 2655 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2656 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2657 .speed = SPEED_40000, 2658 }, 2659 { 2660 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2661 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2662 .speed = SPEED_25000, 2663 }, 2664 { 2665 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2666 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2667 .speed = SPEED_25000, 2668 }, 2669 { 2670 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2671 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2672 .speed = SPEED_25000, 2673 }, 2674 { 2675 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2676 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2677 .speed = SPEED_50000, 2678 }, 2679 { 2680 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2681 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2682 .speed = SPEED_50000, 2683 }, 2684 { 2685 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2686 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2687 .speed = SPEED_50000, 2688 }, 2689 { 2690 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2691 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2692 .speed = SPEED_100000, 2693 }, 2694 { 2695 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2696 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2697 .speed = SPEED_100000, 2698 }, 2699 { 2700 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2701 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2702 .speed = SPEED_100000, 2703 }, 2704 { 2705 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2706 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2707 .speed = SPEED_100000, 2708 }, 2709 }; 2710 2711 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2712 2713 static void 2714 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2715 u32 ptys_eth_proto, 2716 struct ethtool_link_ksettings *cmd) 2717 { 2718 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2719 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2720 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2721 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2722 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2723 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2724 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2725 2726 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2727 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2728 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2729 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2730 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2731 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2732 } 2733 2734 static void 2735 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2736 u8 width, unsigned long *mode) 2737 { 2738 int i; 2739 2740 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2741 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2742 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2743 mode); 2744 } 2745 } 2746 2747 static u32 2748 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2749 { 2750 int i; 2751 2752 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2753 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2754 return mlxsw_sp1_port_link_mode[i].speed; 2755 } 2756 2757 return SPEED_UNKNOWN; 2758 } 2759 2760 static void 2761 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2762 u32 ptys_eth_proto, 2763 struct ethtool_link_ksettings *cmd) 2764 { 2765 cmd->base.speed = SPEED_UNKNOWN; 2766 cmd->base.duplex = DUPLEX_UNKNOWN; 2767 2768 if (!carrier_ok) 2769 return; 2770 2771 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2772 if (cmd->base.speed != SPEED_UNKNOWN) 2773 cmd->base.duplex = DUPLEX_FULL; 2774 } 2775 2776 static u32 2777 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2778 const struct ethtool_link_ksettings *cmd) 2779 { 2780 u32 ptys_proto = 0; 2781 int i; 2782 2783 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2784 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2785 cmd->link_modes.advertising)) 2786 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2787 } 2788 return ptys_proto; 2789 } 2790 2791 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2792 u32 speed) 2793 { 2794 u32 ptys_proto = 0; 2795 int i; 2796 2797 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2798 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2799 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2800 } 2801 return ptys_proto; 2802 } 2803 2804 static void 2805 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2806 u8 local_port, u32 proto_admin, bool autoneg) 2807 { 2808 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2809 } 2810 2811 static void 2812 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2813 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2814 u32 *p_eth_proto_oper) 2815 { 2816 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2817 p_eth_proto_oper); 2818 } 2819 2820 static const struct mlxsw_sp_port_type_speed_ops 2821 mlxsw_sp1_port_type_speed_ops = { 2822 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2823 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2824 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2825 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2826 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2827 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2828 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2829 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2830 }; 2831 2832 static const enum ethtool_link_mode_bit_indices 2833 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2834 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2835 }; 2836 2837 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2838 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2839 2840 static const enum ethtool_link_mode_bit_indices 2841 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2842 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2843 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2844 }; 2845 2846 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2847 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2848 2849 static const enum ethtool_link_mode_bit_indices 2850 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2851 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2852 }; 2853 2854 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2855 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2856 2857 static const enum ethtool_link_mode_bit_indices 2858 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2859 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2860 }; 2861 2862 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2863 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2864 2865 static const enum ethtool_link_mode_bit_indices 2866 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2867 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2868 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2869 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2870 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2871 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2872 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2873 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2874 }; 2875 2876 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2877 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2878 2879 static const enum ethtool_link_mode_bit_indices 2880 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2881 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2882 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2883 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2884 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2885 }; 2886 2887 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2888 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2889 2890 static const enum ethtool_link_mode_bit_indices 2891 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2892 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2893 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2894 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2895 }; 2896 2897 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2898 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2899 2900 static const enum ethtool_link_mode_bit_indices 2901 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2902 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2903 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2904 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2905 }; 2906 2907 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2908 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2909 2910 static const enum ethtool_link_mode_bit_indices 2911 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2912 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2913 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2914 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2915 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2916 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2917 }; 2918 2919 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2920 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2921 2922 static const enum ethtool_link_mode_bit_indices 2923 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2924 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2925 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2926 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2927 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2928 }; 2929 2930 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2931 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2932 2933 static const enum ethtool_link_mode_bit_indices 2934 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2935 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2936 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2937 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2938 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2939 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2940 }; 2941 2942 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2943 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2944 2945 static const enum ethtool_link_mode_bit_indices 2946 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2947 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2948 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2949 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2950 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2951 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2952 }; 2953 2954 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2955 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2956 2957 static const enum ethtool_link_mode_bit_indices 2958 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2959 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2960 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2961 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2962 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2963 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2964 }; 2965 2966 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2967 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2968 2969 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2970 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2971 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2972 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2973 2974 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2975 { 2976 switch (width) { 2977 case 1: 2978 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2979 case 2: 2980 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2981 case 4: 2982 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2983 case 8: 2984 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2985 default: 2986 WARN_ON_ONCE(1); 2987 return 0; 2988 } 2989 } 2990 2991 struct mlxsw_sp2_port_link_mode { 2992 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2993 int m_ethtool_len; 2994 u32 mask; 2995 u32 speed; 2996 u8 mask_width; 2997 }; 2998 2999 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3000 { 3001 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3002 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3003 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3004 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3005 MLXSW_SP_PORT_MASK_WIDTH_2X | 3006 MLXSW_SP_PORT_MASK_WIDTH_4X | 3007 MLXSW_SP_PORT_MASK_WIDTH_8X, 3008 .speed = SPEED_100, 3009 }, 3010 { 3011 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3012 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3013 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3014 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3015 MLXSW_SP_PORT_MASK_WIDTH_2X | 3016 MLXSW_SP_PORT_MASK_WIDTH_4X | 3017 MLXSW_SP_PORT_MASK_WIDTH_8X, 3018 .speed = SPEED_1000, 3019 }, 3020 { 3021 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3022 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3023 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3024 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3025 MLXSW_SP_PORT_MASK_WIDTH_2X | 3026 MLXSW_SP_PORT_MASK_WIDTH_4X | 3027 MLXSW_SP_PORT_MASK_WIDTH_8X, 3028 .speed = SPEED_2500, 3029 }, 3030 { 3031 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3032 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3033 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3034 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3035 MLXSW_SP_PORT_MASK_WIDTH_2X | 3036 MLXSW_SP_PORT_MASK_WIDTH_4X | 3037 MLXSW_SP_PORT_MASK_WIDTH_8X, 3038 .speed = SPEED_5000, 3039 }, 3040 { 3041 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3042 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3043 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3044 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3045 MLXSW_SP_PORT_MASK_WIDTH_2X | 3046 MLXSW_SP_PORT_MASK_WIDTH_4X | 3047 MLXSW_SP_PORT_MASK_WIDTH_8X, 3048 .speed = SPEED_10000, 3049 }, 3050 { 3051 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3052 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3053 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3054 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3055 MLXSW_SP_PORT_MASK_WIDTH_8X, 3056 .speed = SPEED_40000, 3057 }, 3058 { 3059 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3060 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3061 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3062 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3063 MLXSW_SP_PORT_MASK_WIDTH_2X | 3064 MLXSW_SP_PORT_MASK_WIDTH_4X | 3065 MLXSW_SP_PORT_MASK_WIDTH_8X, 3066 .speed = SPEED_25000, 3067 }, 3068 { 3069 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3070 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3071 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3072 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3073 MLXSW_SP_PORT_MASK_WIDTH_4X | 3074 MLXSW_SP_PORT_MASK_WIDTH_8X, 3075 .speed = SPEED_50000, 3076 }, 3077 { 3078 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3079 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3080 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3081 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3082 .speed = SPEED_50000, 3083 }, 3084 { 3085 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3086 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3087 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3088 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3089 MLXSW_SP_PORT_MASK_WIDTH_8X, 3090 .speed = SPEED_100000, 3091 }, 3092 { 3093 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3094 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3095 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3096 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3097 .speed = SPEED_100000, 3098 }, 3099 { 3100 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3101 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3102 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3103 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3104 MLXSW_SP_PORT_MASK_WIDTH_8X, 3105 .speed = SPEED_200000, 3106 }, 3107 { 3108 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3109 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3110 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3111 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3112 .speed = SPEED_400000, 3113 }, 3114 }; 3115 3116 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3117 3118 static void 3119 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3120 u32 ptys_eth_proto, 3121 struct ethtool_link_ksettings *cmd) 3122 { 3123 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3124 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3125 } 3126 3127 static void 3128 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3129 unsigned long *mode) 3130 { 3131 int i; 3132 3133 for (i = 0; i < link_mode->m_ethtool_len; i++) 3134 __set_bit(link_mode->mask_ethtool[i], mode); 3135 } 3136 3137 static void 3138 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3139 u8 width, unsigned long *mode) 3140 { 3141 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3142 int i; 3143 3144 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3145 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3146 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3147 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3148 mode); 3149 } 3150 } 3151 3152 static u32 3153 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3154 { 3155 int i; 3156 3157 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3158 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3159 return mlxsw_sp2_port_link_mode[i].speed; 3160 } 3161 3162 return SPEED_UNKNOWN; 3163 } 3164 3165 static void 3166 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3167 u32 ptys_eth_proto, 3168 struct ethtool_link_ksettings *cmd) 3169 { 3170 cmd->base.speed = SPEED_UNKNOWN; 3171 cmd->base.duplex = DUPLEX_UNKNOWN; 3172 3173 if (!carrier_ok) 3174 return; 3175 3176 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3177 if (cmd->base.speed != SPEED_UNKNOWN) 3178 cmd->base.duplex = DUPLEX_FULL; 3179 } 3180 3181 static bool 3182 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3183 const unsigned long *mode) 3184 { 3185 int cnt = 0; 3186 int i; 3187 3188 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3189 if (test_bit(link_mode->mask_ethtool[i], mode)) 3190 cnt++; 3191 } 3192 3193 return cnt == link_mode->m_ethtool_len; 3194 } 3195 3196 static u32 3197 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3198 const struct ethtool_link_ksettings *cmd) 3199 { 3200 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3201 u32 ptys_proto = 0; 3202 int i; 3203 3204 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3205 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3206 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3207 cmd->link_modes.advertising)) 3208 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3209 } 3210 return ptys_proto; 3211 } 3212 3213 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3214 u8 width, u32 speed) 3215 { 3216 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3217 u32 ptys_proto = 0; 3218 int i; 3219 3220 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3221 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3222 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3223 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3224 } 3225 return ptys_proto; 3226 } 3227 3228 static void 3229 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3230 u8 local_port, u32 proto_admin, 3231 bool autoneg) 3232 { 3233 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3234 } 3235 3236 static void 3237 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3238 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3239 u32 *p_eth_proto_oper) 3240 { 3241 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3242 p_eth_proto_admin, p_eth_proto_oper); 3243 } 3244 3245 static const struct mlxsw_sp_port_type_speed_ops 3246 mlxsw_sp2_port_type_speed_ops = { 3247 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3248 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3249 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3250 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3251 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3252 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3253 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3254 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3255 }; 3256 3257 static void 3258 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3259 u8 width, struct ethtool_link_ksettings *cmd) 3260 { 3261 const struct mlxsw_sp_port_type_speed_ops *ops; 3262 3263 ops = mlxsw_sp->port_type_speed_ops; 3264 3265 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3266 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3267 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3268 3269 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3270 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3271 cmd->link_modes.supported); 3272 } 3273 3274 static void 3275 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3276 u32 eth_proto_admin, bool autoneg, u8 width, 3277 struct ethtool_link_ksettings *cmd) 3278 { 3279 const struct mlxsw_sp_port_type_speed_ops *ops; 3280 3281 ops = mlxsw_sp->port_type_speed_ops; 3282 3283 if (!autoneg) 3284 return; 3285 3286 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3287 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3288 cmd->link_modes.advertising); 3289 } 3290 3291 static u8 3292 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3293 { 3294 switch (connector_type) { 3295 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3296 return PORT_OTHER; 3297 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3298 return PORT_NONE; 3299 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3300 return PORT_TP; 3301 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3302 return PORT_AUI; 3303 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3304 return PORT_BNC; 3305 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3306 return PORT_MII; 3307 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3308 return PORT_FIBRE; 3309 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3310 return PORT_DA; 3311 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3312 return PORT_OTHER; 3313 default: 3314 WARN_ON_ONCE(1); 3315 return PORT_OTHER; 3316 } 3317 } 3318 3319 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3320 struct ethtool_link_ksettings *cmd) 3321 { 3322 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3323 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3325 const struct mlxsw_sp_port_type_speed_ops *ops; 3326 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3327 u8 connector_type; 3328 bool autoneg; 3329 int err; 3330 3331 ops = mlxsw_sp->port_type_speed_ops; 3332 3333 autoneg = mlxsw_sp_port->link.autoneg; 3334 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3335 0, false); 3336 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3337 if (err) 3338 return err; 3339 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3340 ð_proto_admin, ð_proto_oper); 3341 3342 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3343 mlxsw_sp_port->mapping.width, cmd); 3344 3345 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3346 mlxsw_sp_port->mapping.width, cmd); 3347 3348 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3349 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3350 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3351 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3352 eth_proto_oper, cmd); 3353 3354 return 0; 3355 } 3356 3357 static int 3358 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3359 const struct ethtool_link_ksettings *cmd) 3360 { 3361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3362 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3363 const struct mlxsw_sp_port_type_speed_ops *ops; 3364 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3365 u32 eth_proto_cap, eth_proto_new; 3366 bool autoneg; 3367 int err; 3368 3369 ops = mlxsw_sp->port_type_speed_ops; 3370 3371 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3372 0, false); 3373 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3374 if (err) 3375 return err; 3376 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3377 3378 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3379 eth_proto_new = autoneg ? 3380 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3381 cmd) : 3382 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3383 cmd->base.speed); 3384 3385 eth_proto_new = eth_proto_new & eth_proto_cap; 3386 if (!eth_proto_new) { 3387 netdev_err(dev, "No supported speed requested\n"); 3388 return -EINVAL; 3389 } 3390 3391 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3392 eth_proto_new, autoneg); 3393 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3394 if (err) 3395 return err; 3396 3397 mlxsw_sp_port->link.autoneg = autoneg; 3398 3399 if (!netif_running(dev)) 3400 return 0; 3401 3402 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3403 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3404 3405 return 0; 3406 } 3407 3408 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3409 struct ethtool_modinfo *modinfo) 3410 { 3411 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3413 int err; 3414 3415 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3416 mlxsw_sp_port->mapping.module, 3417 modinfo); 3418 3419 return err; 3420 } 3421 3422 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3423 struct ethtool_eeprom *ee, 3424 u8 *data) 3425 { 3426 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3428 int err; 3429 3430 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3431 mlxsw_sp_port->mapping.module, ee, 3432 data); 3433 3434 return err; 3435 } 3436 3437 static int 3438 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3439 { 3440 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3442 3443 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3444 } 3445 3446 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3447 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3448 .get_link = ethtool_op_get_link, 3449 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3450 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3451 .get_strings = mlxsw_sp_port_get_strings, 3452 .set_phys_id = mlxsw_sp_port_set_phys_id, 3453 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3454 .get_sset_count = mlxsw_sp_port_get_sset_count, 3455 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3456 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3457 .get_module_info = mlxsw_sp_get_module_info, 3458 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3459 .get_ts_info = mlxsw_sp_get_ts_info, 3460 }; 3461 3462 static int 3463 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3464 { 3465 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3466 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3467 const struct mlxsw_sp_port_type_speed_ops *ops; 3468 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3469 int err; 3470 3471 ops = mlxsw_sp->port_type_speed_ops; 3472 3473 /* Set advertised speeds to supported speeds. */ 3474 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3475 0, false); 3476 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3477 if (err) 3478 return err; 3479 3480 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3481 ð_proto_admin, ð_proto_oper); 3482 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3483 eth_proto_cap, mlxsw_sp_port->link.autoneg); 3484 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3485 } 3486 3487 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3488 { 3489 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3491 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3492 u32 eth_proto_oper; 3493 int err; 3494 3495 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3496 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3497 mlxsw_sp_port->local_port, 0, 3498 false); 3499 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3500 if (err) 3501 return err; 3502 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3503 ð_proto_oper); 3504 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3505 return 0; 3506 } 3507 3508 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3509 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3510 bool dwrr, u8 dwrr_weight) 3511 { 3512 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3513 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3514 3515 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3516 next_index); 3517 mlxsw_reg_qeec_de_set(qeec_pl, true); 3518 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3519 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3521 } 3522 3523 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3524 enum mlxsw_reg_qeec_hr hr, u8 index, 3525 u8 next_index, u32 maxrate, u8 burst_size) 3526 { 3527 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3528 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3529 3530 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3531 next_index); 3532 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3533 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3534 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3535 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3536 } 3537 3538 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3539 enum mlxsw_reg_qeec_hr hr, u8 index, 3540 u8 next_index, u32 minrate) 3541 { 3542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3543 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3544 3545 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3546 next_index); 3547 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3548 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3549 3550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3551 } 3552 3553 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3554 u8 switch_prio, u8 tclass) 3555 { 3556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3557 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3558 3559 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3560 tclass); 3561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3562 } 3563 3564 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3565 { 3566 int err, i; 3567 3568 /* Setup the elements hierarcy, so that each TC is linked to 3569 * one subgroup, which are all member in the same group. 3570 */ 3571 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3572 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3573 if (err) 3574 return err; 3575 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3576 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3577 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3578 0, false, 0); 3579 if (err) 3580 return err; 3581 } 3582 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3583 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3584 MLXSW_REG_QEEC_HR_TC, i, i, 3585 false, 0); 3586 if (err) 3587 return err; 3588 3589 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3590 MLXSW_REG_QEEC_HR_TC, 3591 i + 8, i, 3592 true, 100); 3593 if (err) 3594 return err; 3595 } 3596 3597 /* Make sure the max shaper is disabled in all hierarchies that support 3598 * it. Note that this disables ptps (PTP shaper), but that is intended 3599 * for the initial configuration. 3600 */ 3601 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3602 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3603 MLXSW_REG_QEEC_MAS_DIS, 0); 3604 if (err) 3605 return err; 3606 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3607 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3608 MLXSW_REG_QEEC_HR_SUBGROUP, 3609 i, 0, 3610 MLXSW_REG_QEEC_MAS_DIS, 0); 3611 if (err) 3612 return err; 3613 } 3614 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3615 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3616 MLXSW_REG_QEEC_HR_TC, 3617 i, i, 3618 MLXSW_REG_QEEC_MAS_DIS, 0); 3619 if (err) 3620 return err; 3621 3622 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3623 MLXSW_REG_QEEC_HR_TC, 3624 i + 8, i, 3625 MLXSW_REG_QEEC_MAS_DIS, 0); 3626 if (err) 3627 return err; 3628 } 3629 3630 /* Configure the min shaper for multicast TCs. */ 3631 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3632 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3633 MLXSW_REG_QEEC_HR_TC, 3634 i + 8, i, 3635 MLXSW_REG_QEEC_MIS_MIN); 3636 if (err) 3637 return err; 3638 } 3639 3640 /* Map all priorities to traffic class 0. */ 3641 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3642 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3643 if (err) 3644 return err; 3645 } 3646 3647 return 0; 3648 } 3649 3650 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3651 bool enable) 3652 { 3653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3654 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3655 3656 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3657 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3658 } 3659 3660 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3661 u8 split_base_local_port, 3662 struct mlxsw_sp_port_mapping *port_mapping) 3663 { 3664 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3665 bool split = !!split_base_local_port; 3666 struct mlxsw_sp_port *mlxsw_sp_port; 3667 struct net_device *dev; 3668 int err; 3669 3670 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3671 port_mapping->module + 1, split, 3672 port_mapping->lane / port_mapping->width, 3673 mlxsw_sp->base_mac, 3674 sizeof(mlxsw_sp->base_mac)); 3675 if (err) { 3676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3677 local_port); 3678 return err; 3679 } 3680 3681 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3682 if (!dev) { 3683 err = -ENOMEM; 3684 goto err_alloc_etherdev; 3685 } 3686 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3687 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3688 mlxsw_sp_port = netdev_priv(dev); 3689 mlxsw_sp_port->dev = dev; 3690 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3691 mlxsw_sp_port->local_port = local_port; 3692 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3693 mlxsw_sp_port->split = split; 3694 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3695 mlxsw_sp_port->mapping = *port_mapping; 3696 mlxsw_sp_port->link.autoneg = 1; 3697 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3698 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3699 3700 mlxsw_sp_port->pcpu_stats = 3701 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3702 if (!mlxsw_sp_port->pcpu_stats) { 3703 err = -ENOMEM; 3704 goto err_alloc_stats; 3705 } 3706 3707 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3708 GFP_KERNEL); 3709 if (!mlxsw_sp_port->sample) { 3710 err = -ENOMEM; 3711 goto err_alloc_sample; 3712 } 3713 3714 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3715 &update_stats_cache); 3716 3717 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3718 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3719 3720 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3723 mlxsw_sp_port->local_port); 3724 goto err_port_module_map; 3725 } 3726 3727 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3728 if (err) { 3729 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3730 mlxsw_sp_port->local_port); 3731 goto err_port_swid_set; 3732 } 3733 3734 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3735 if (err) { 3736 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3737 mlxsw_sp_port->local_port); 3738 goto err_dev_addr_init; 3739 } 3740 3741 netif_carrier_off(dev); 3742 3743 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3744 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3745 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3746 3747 dev->min_mtu = 0; 3748 dev->max_mtu = ETH_MAX_MTU; 3749 3750 /* Each packet needs to have a Tx header (metadata) on top all other 3751 * headers. 3752 */ 3753 dev->needed_headroom = MLXSW_TXHDR_LEN; 3754 3755 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3756 if (err) { 3757 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3758 mlxsw_sp_port->local_port); 3759 goto err_port_system_port_mapping_set; 3760 } 3761 3762 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3763 if (err) { 3764 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3765 mlxsw_sp_port->local_port); 3766 goto err_port_speed_by_width_set; 3767 } 3768 3769 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3770 if (err) { 3771 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3772 mlxsw_sp_port->local_port); 3773 goto err_port_mtu_set; 3774 } 3775 3776 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3777 if (err) 3778 goto err_port_admin_status_set; 3779 3780 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3781 if (err) { 3782 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3783 mlxsw_sp_port->local_port); 3784 goto err_port_buffers_init; 3785 } 3786 3787 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3788 if (err) { 3789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3790 mlxsw_sp_port->local_port); 3791 goto err_port_ets_init; 3792 } 3793 3794 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3795 if (err) { 3796 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3797 mlxsw_sp_port->local_port); 3798 goto err_port_tc_mc_mode; 3799 } 3800 3801 /* ETS and buffers must be initialized before DCB. */ 3802 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3803 if (err) { 3804 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3805 mlxsw_sp_port->local_port); 3806 goto err_port_dcb_init; 3807 } 3808 3809 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3810 if (err) { 3811 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3812 mlxsw_sp_port->local_port); 3813 goto err_port_fids_init; 3814 } 3815 3816 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3817 if (err) { 3818 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3819 mlxsw_sp_port->local_port); 3820 goto err_port_qdiscs_init; 3821 } 3822 3823 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3824 false); 3825 if (err) { 3826 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3827 mlxsw_sp_port->local_port); 3828 goto err_port_vlan_clear; 3829 } 3830 3831 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3832 if (err) { 3833 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3834 mlxsw_sp_port->local_port); 3835 goto err_port_nve_init; 3836 } 3837 3838 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3839 if (err) { 3840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3841 mlxsw_sp_port->local_port); 3842 goto err_port_pvid_set; 3843 } 3844 3845 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3846 MLXSW_SP_DEFAULT_VID); 3847 if (IS_ERR(mlxsw_sp_port_vlan)) { 3848 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3849 mlxsw_sp_port->local_port); 3850 err = PTR_ERR(mlxsw_sp_port_vlan); 3851 goto err_port_vlan_create; 3852 } 3853 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3854 3855 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3856 mlxsw_sp->ptp_ops->shaper_work); 3857 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3858 mlxsw_sp_span_speed_update_work); 3859 3860 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3861 err = register_netdev(dev); 3862 if (err) { 3863 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3864 mlxsw_sp_port->local_port); 3865 goto err_register_netdev; 3866 } 3867 3868 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3869 mlxsw_sp_port, dev); 3870 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3871 return 0; 3872 3873 err_register_netdev: 3874 mlxsw_sp->ports[local_port] = NULL; 3875 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3876 err_port_vlan_create: 3877 err_port_pvid_set: 3878 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3879 err_port_nve_init: 3880 err_port_vlan_clear: 3881 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3882 err_port_qdiscs_init: 3883 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3884 err_port_fids_init: 3885 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3886 err_port_dcb_init: 3887 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3888 err_port_tc_mc_mode: 3889 err_port_ets_init: 3890 err_port_buffers_init: 3891 err_port_admin_status_set: 3892 err_port_mtu_set: 3893 err_port_speed_by_width_set: 3894 err_port_system_port_mapping_set: 3895 err_dev_addr_init: 3896 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3897 err_port_swid_set: 3898 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3899 err_port_module_map: 3900 kfree(mlxsw_sp_port->sample); 3901 err_alloc_sample: 3902 free_percpu(mlxsw_sp_port->pcpu_stats); 3903 err_alloc_stats: 3904 free_netdev(dev); 3905 err_alloc_etherdev: 3906 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3907 return err; 3908 } 3909 3910 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3911 { 3912 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3913 3914 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3915 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3916 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3917 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3918 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3919 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3920 mlxsw_sp->ports[local_port] = NULL; 3921 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3922 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3923 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3924 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3925 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3926 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3927 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3928 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3929 kfree(mlxsw_sp_port->sample); 3930 free_percpu(mlxsw_sp_port->pcpu_stats); 3931 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3932 free_netdev(mlxsw_sp_port->dev); 3933 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3934 } 3935 3936 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3937 { 3938 struct mlxsw_sp_port *mlxsw_sp_port; 3939 int err; 3940 3941 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3942 if (!mlxsw_sp_port) 3943 return -ENOMEM; 3944 3945 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3946 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3947 3948 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3949 mlxsw_sp_port, 3950 mlxsw_sp->base_mac, 3951 sizeof(mlxsw_sp->base_mac)); 3952 if (err) { 3953 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3954 goto err_core_cpu_port_init; 3955 } 3956 3957 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3958 return 0; 3959 3960 err_core_cpu_port_init: 3961 kfree(mlxsw_sp_port); 3962 return err; 3963 } 3964 3965 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3966 { 3967 struct mlxsw_sp_port *mlxsw_sp_port = 3968 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3969 3970 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3971 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3972 kfree(mlxsw_sp_port); 3973 } 3974 3975 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3976 { 3977 return mlxsw_sp->ports[local_port] != NULL; 3978 } 3979 3980 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3981 { 3982 int i; 3983 3984 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3985 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 kfree(mlxsw_sp->ports); 3989 mlxsw_sp->ports = NULL; 3990 } 3991 3992 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3993 { 3994 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3995 struct mlxsw_sp_port_mapping *port_mapping; 3996 size_t alloc_size; 3997 int i; 3998 int err; 3999 4000 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4001 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4002 if (!mlxsw_sp->ports) 4003 return -ENOMEM; 4004 4005 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4006 if (err) 4007 goto err_cpu_port_create; 4008 4009 for (i = 1; i < max_ports; i++) { 4010 port_mapping = mlxsw_sp->port_mapping[i]; 4011 if (!port_mapping) 4012 continue; 4013 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4014 if (err) 4015 goto err_port_create; 4016 } 4017 return 0; 4018 4019 err_port_create: 4020 for (i--; i >= 1; i--) 4021 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4022 mlxsw_sp_port_remove(mlxsw_sp, i); 4023 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4024 err_cpu_port_create: 4025 kfree(mlxsw_sp->ports); 4026 mlxsw_sp->ports = NULL; 4027 return err; 4028 } 4029 4030 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4031 { 4032 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4033 struct mlxsw_sp_port_mapping port_mapping; 4034 int i; 4035 int err; 4036 4037 mlxsw_sp->port_mapping = kcalloc(max_ports, 4038 sizeof(struct mlxsw_sp_port_mapping *), 4039 GFP_KERNEL); 4040 if (!mlxsw_sp->port_mapping) 4041 return -ENOMEM; 4042 4043 for (i = 1; i < max_ports; i++) { 4044 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4045 if (err) 4046 goto err_port_module_info_get; 4047 if (!port_mapping.width) 4048 continue; 4049 4050 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4051 sizeof(port_mapping), 4052 GFP_KERNEL); 4053 if (!mlxsw_sp->port_mapping[i]) { 4054 err = -ENOMEM; 4055 goto err_port_module_info_dup; 4056 } 4057 } 4058 return 0; 4059 4060 err_port_module_info_get: 4061 err_port_module_info_dup: 4062 for (i--; i >= 1; i--) 4063 kfree(mlxsw_sp->port_mapping[i]); 4064 kfree(mlxsw_sp->port_mapping); 4065 return err; 4066 } 4067 4068 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4069 { 4070 int i; 4071 4072 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4073 kfree(mlxsw_sp->port_mapping[i]); 4074 kfree(mlxsw_sp->port_mapping); 4075 } 4076 4077 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4078 { 4079 u8 offset = (local_port - 1) % max_width; 4080 4081 return local_port - offset; 4082 } 4083 4084 static int 4085 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4086 struct mlxsw_sp_port_mapping *port_mapping, 4087 unsigned int count, u8 offset) 4088 { 4089 struct mlxsw_sp_port_mapping split_port_mapping; 4090 int err, i; 4091 4092 split_port_mapping = *port_mapping; 4093 split_port_mapping.width /= count; 4094 for (i = 0; i < count; i++) { 4095 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4096 base_port, &split_port_mapping); 4097 if (err) 4098 goto err_port_create; 4099 split_port_mapping.lane += split_port_mapping.width; 4100 } 4101 4102 return 0; 4103 4104 err_port_create: 4105 for (i--; i >= 0; i--) 4106 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4107 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4108 return err; 4109 } 4110 4111 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4112 u8 base_port, 4113 unsigned int count, u8 offset) 4114 { 4115 struct mlxsw_sp_port_mapping *port_mapping; 4116 int i; 4117 4118 /* Go over original unsplit ports in the gap and recreate them. */ 4119 for (i = 0; i < count * offset; i++) { 4120 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4121 if (!port_mapping) 4122 continue; 4123 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4124 } 4125 } 4126 4127 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4128 unsigned int count, 4129 unsigned int max_width) 4130 { 4131 enum mlxsw_res_id local_ports_in_x_res_id; 4132 int split_width = max_width / count; 4133 4134 if (split_width == 1) 4135 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4136 else if (split_width == 2) 4137 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4138 else if (split_width == 4) 4139 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4140 else 4141 return -EINVAL; 4142 4143 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4144 return -EINVAL; 4145 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4146 } 4147 4148 static struct mlxsw_sp_port * 4149 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4150 { 4151 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 4152 return mlxsw_sp->ports[local_port]; 4153 return NULL; 4154 } 4155 4156 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4157 unsigned int count, 4158 struct netlink_ext_ack *extack) 4159 { 4160 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4161 struct mlxsw_sp_port_mapping port_mapping; 4162 struct mlxsw_sp_port *mlxsw_sp_port; 4163 int max_width; 4164 u8 base_port; 4165 int offset; 4166 int i; 4167 int err; 4168 4169 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4170 if (!mlxsw_sp_port) { 4171 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4172 local_port); 4173 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4174 return -EINVAL; 4175 } 4176 4177 /* Split ports cannot be split. */ 4178 if (mlxsw_sp_port->split) { 4179 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4180 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4181 return -EINVAL; 4182 } 4183 4184 max_width = mlxsw_core_module_max_width(mlxsw_core, 4185 mlxsw_sp_port->mapping.module); 4186 if (max_width < 0) { 4187 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4188 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4189 return max_width; 4190 } 4191 4192 /* Split port with non-max and 1 module width cannot be split. */ 4193 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4194 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4195 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4196 return -EINVAL; 4197 } 4198 4199 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4200 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4201 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4202 return -EINVAL; 4203 } 4204 4205 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4206 if (offset < 0) { 4207 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4208 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4209 return -EINVAL; 4210 } 4211 4212 /* Only in case max split is being done, the local port and 4213 * base port may differ. 4214 */ 4215 base_port = count == max_width ? 4216 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4217 local_port; 4218 4219 for (i = 0; i < count * offset; i++) { 4220 /* Expect base port to exist and also the one in the middle in 4221 * case of maximal split count. 4222 */ 4223 if (i == 0 || (count == max_width && i == count / 2)) 4224 continue; 4225 4226 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4227 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4228 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4229 return -EINVAL; 4230 } 4231 } 4232 4233 port_mapping = mlxsw_sp_port->mapping; 4234 4235 for (i = 0; i < count; i++) 4236 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4237 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4238 4239 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4240 count, offset); 4241 if (err) { 4242 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4243 goto err_port_split_create; 4244 } 4245 4246 return 0; 4247 4248 err_port_split_create: 4249 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4250 return err; 4251 } 4252 4253 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4254 struct netlink_ext_ack *extack) 4255 { 4256 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4257 struct mlxsw_sp_port *mlxsw_sp_port; 4258 unsigned int count; 4259 int max_width; 4260 u8 base_port; 4261 int offset; 4262 int i; 4263 4264 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4265 if (!mlxsw_sp_port) { 4266 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4267 local_port); 4268 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4269 return -EINVAL; 4270 } 4271 4272 if (!mlxsw_sp_port->split) { 4273 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4274 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4275 return -EINVAL; 4276 } 4277 4278 max_width = mlxsw_core_module_max_width(mlxsw_core, 4279 mlxsw_sp_port->mapping.module); 4280 if (max_width < 0) { 4281 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4282 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4283 return max_width; 4284 } 4285 4286 count = max_width / mlxsw_sp_port->mapping.width; 4287 4288 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4289 if (WARN_ON(offset < 0)) { 4290 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4291 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4292 return -EINVAL; 4293 } 4294 4295 base_port = mlxsw_sp_port->split_base_local_port; 4296 4297 for (i = 0; i < count; i++) 4298 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4299 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4300 4301 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4302 4303 return 0; 4304 } 4305 4306 static void 4307 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 4308 { 4309 int i; 4310 4311 for (i = 0; i < TC_MAX_QUEUE; i++) 4312 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 4313 } 4314 4315 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4316 char *pude_pl, void *priv) 4317 { 4318 struct mlxsw_sp *mlxsw_sp = priv; 4319 struct mlxsw_sp_port *mlxsw_sp_port; 4320 enum mlxsw_reg_pude_oper_status status; 4321 u8 local_port; 4322 4323 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4324 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4325 if (!mlxsw_sp_port) 4326 return; 4327 4328 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4329 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4330 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4331 netif_carrier_on(mlxsw_sp_port->dev); 4332 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4333 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 4334 } else { 4335 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4336 netif_carrier_off(mlxsw_sp_port->dev); 4337 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 4338 } 4339 } 4340 4341 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4342 char *mtpptr_pl, bool ingress) 4343 { 4344 u8 local_port; 4345 u8 num_rec; 4346 int i; 4347 4348 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4349 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4350 for (i = 0; i < num_rec; i++) { 4351 u8 domain_number; 4352 u8 message_type; 4353 u16 sequence_id; 4354 u64 timestamp; 4355 4356 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4357 &domain_number, &sequence_id, 4358 ×tamp); 4359 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4360 message_type, domain_number, 4361 sequence_id, timestamp); 4362 } 4363 } 4364 4365 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4366 char *mtpptr_pl, void *priv) 4367 { 4368 struct mlxsw_sp *mlxsw_sp = priv; 4369 4370 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4371 } 4372 4373 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4374 char *mtpptr_pl, void *priv) 4375 { 4376 struct mlxsw_sp *mlxsw_sp = priv; 4377 4378 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4379 } 4380 4381 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4382 u8 local_port, void *priv) 4383 { 4384 struct mlxsw_sp *mlxsw_sp = priv; 4385 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4386 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4387 4388 if (unlikely(!mlxsw_sp_port)) { 4389 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4390 local_port); 4391 return; 4392 } 4393 4394 skb->dev = mlxsw_sp_port->dev; 4395 4396 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4397 u64_stats_update_begin(&pcpu_stats->syncp); 4398 pcpu_stats->rx_packets++; 4399 pcpu_stats->rx_bytes += skb->len; 4400 u64_stats_update_end(&pcpu_stats->syncp); 4401 4402 skb->protocol = eth_type_trans(skb, skb->dev); 4403 netif_receive_skb(skb); 4404 } 4405 4406 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4407 void *priv) 4408 { 4409 skb->offload_fwd_mark = 1; 4410 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4411 } 4412 4413 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4414 u8 local_port, void *priv) 4415 { 4416 skb->offload_l3_fwd_mark = 1; 4417 skb->offload_fwd_mark = 1; 4418 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4419 } 4420 4421 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4422 void *priv) 4423 { 4424 struct mlxsw_sp *mlxsw_sp = priv; 4425 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4426 struct psample_group *psample_group; 4427 u32 size; 4428 4429 if (unlikely(!mlxsw_sp_port)) { 4430 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4431 local_port); 4432 goto out; 4433 } 4434 if (unlikely(!mlxsw_sp_port->sample)) { 4435 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4436 local_port); 4437 goto out; 4438 } 4439 4440 size = mlxsw_sp_port->sample->truncate ? 4441 mlxsw_sp_port->sample->trunc_size : skb->len; 4442 4443 rcu_read_lock(); 4444 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4445 if (!psample_group) 4446 goto out_unlock; 4447 psample_sample_packet(psample_group, skb, size, 4448 mlxsw_sp_port->dev->ifindex, 0, 4449 mlxsw_sp_port->sample->rate); 4450 out_unlock: 4451 rcu_read_unlock(); 4452 out: 4453 consume_skb(skb); 4454 } 4455 4456 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4457 void *priv) 4458 { 4459 struct mlxsw_sp *mlxsw_sp = priv; 4460 4461 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4462 } 4463 4464 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4465 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4466 _is_ctrl, SP_##_trap_group, DISCARD) 4467 4468 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4469 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4470 _is_ctrl, SP_##_trap_group, DISCARD) 4471 4472 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4473 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4474 _is_ctrl, SP_##_trap_group, DISCARD) 4475 4476 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4477 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4478 4479 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4480 /* Events */ 4481 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4482 /* L2 traps */ 4483 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4484 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4485 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4486 false, SP_LLDP, DISCARD), 4487 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4488 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4489 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4490 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4491 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4492 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4493 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4494 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4495 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4496 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4497 false), 4498 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4499 false), 4500 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4501 false), 4502 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4503 false), 4504 /* L3 traps */ 4505 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4506 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4507 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4508 false), 4509 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4510 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4511 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4512 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4513 false), 4514 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4515 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4516 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4517 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4518 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4519 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4520 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4521 false), 4522 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4523 false), 4524 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4525 false), 4526 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4527 false), 4528 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4529 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4530 false), 4531 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4532 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4533 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4534 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4535 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4536 ROUTER_EXP, false), 4537 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4538 ROUTER_EXP, false), 4539 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4540 ROUTER_EXP, false), 4541 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4542 ROUTER_EXP, false), 4543 /* PKT Sample trap */ 4544 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4545 false, SP_IP2ME, DISCARD), 4546 /* ACL trap */ 4547 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4548 /* Multicast Router Traps */ 4549 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4550 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4551 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4552 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4553 /* NVE traps */ 4554 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4555 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4556 /* PTP traps */ 4557 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4558 false, SP_PTP0, DISCARD), 4559 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4560 }; 4561 4562 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4563 /* Events */ 4564 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4565 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4566 }; 4567 4568 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4569 { 4570 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4571 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4572 enum mlxsw_reg_qpcr_ir_units ir_units; 4573 int max_cpu_policers; 4574 bool is_bytes; 4575 u8 burst_size; 4576 u32 rate; 4577 int i, err; 4578 4579 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4580 return -EIO; 4581 4582 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4583 4584 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4585 for (i = 0; i < max_cpu_policers; i++) { 4586 is_bytes = false; 4587 switch (i) { 4588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4589 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4590 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4591 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4594 rate = 128; 4595 burst_size = 7; 4596 break; 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4599 rate = 16 * 1024; 4600 burst_size = 10; 4601 break; 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4603 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4604 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4605 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4607 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4608 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4609 rate = 1024; 4610 burst_size = 7; 4611 break; 4612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4613 rate = 1024; 4614 burst_size = 7; 4615 break; 4616 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4617 rate = 24 * 1024; 4618 burst_size = 12; 4619 break; 4620 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4621 rate = 19 * 1024; 4622 burst_size = 12; 4623 break; 4624 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4625 rate = 360; 4626 burst_size = 7; 4627 break; 4628 default: 4629 continue; 4630 } 4631 4632 __set_bit(i, mlxsw_sp->trap->policers_usage); 4633 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4634 burst_size); 4635 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4636 if (err) 4637 return err; 4638 } 4639 4640 return 0; 4641 } 4642 4643 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4644 { 4645 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4646 enum mlxsw_reg_htgt_trap_group i; 4647 int max_cpu_policers; 4648 int max_trap_groups; 4649 u8 priority, tc; 4650 u16 policer_id; 4651 int err; 4652 4653 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4654 return -EIO; 4655 4656 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4657 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4658 4659 for (i = 0; i < max_trap_groups; i++) { 4660 policer_id = i; 4661 switch (i) { 4662 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4663 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4664 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4665 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4666 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4669 priority = 5; 4670 tc = 5; 4671 break; 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4673 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4674 priority = 4; 4675 tc = 4; 4676 break; 4677 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4678 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4679 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4680 priority = 3; 4681 tc = 3; 4682 break; 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4684 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4685 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4686 priority = 2; 4687 tc = 2; 4688 break; 4689 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4690 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4691 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4692 priority = 1; 4693 tc = 1; 4694 break; 4695 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4696 priority = 0; 4697 tc = 1; 4698 break; 4699 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4700 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4701 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4702 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4703 break; 4704 default: 4705 continue; 4706 } 4707 4708 if (max_cpu_policers <= policer_id && 4709 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4710 return -EIO; 4711 4712 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4713 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4714 if (err) 4715 return err; 4716 } 4717 4718 return 0; 4719 } 4720 4721 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4722 const struct mlxsw_listener listeners[], 4723 size_t listeners_count) 4724 { 4725 int i; 4726 int err; 4727 4728 for (i = 0; i < listeners_count; i++) { 4729 err = mlxsw_core_trap_register(mlxsw_sp->core, 4730 &listeners[i], 4731 mlxsw_sp); 4732 if (err) 4733 goto err_listener_register; 4734 4735 } 4736 return 0; 4737 4738 err_listener_register: 4739 for (i--; i >= 0; i--) { 4740 mlxsw_core_trap_unregister(mlxsw_sp->core, 4741 &listeners[i], 4742 mlxsw_sp); 4743 } 4744 return err; 4745 } 4746 4747 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4748 const struct mlxsw_listener listeners[], 4749 size_t listeners_count) 4750 { 4751 int i; 4752 4753 for (i = 0; i < listeners_count; i++) { 4754 mlxsw_core_trap_unregister(mlxsw_sp->core, 4755 &listeners[i], 4756 mlxsw_sp); 4757 } 4758 } 4759 4760 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4761 { 4762 struct mlxsw_sp_trap *trap; 4763 u64 max_policers; 4764 int err; 4765 4766 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 4767 return -EIO; 4768 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 4769 trap = kzalloc(struct_size(trap, policers_usage, 4770 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 4771 if (!trap) 4772 return -ENOMEM; 4773 trap->max_policers = max_policers; 4774 mlxsw_sp->trap = trap; 4775 4776 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4777 if (err) 4778 goto err_cpu_policers_set; 4779 4780 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4781 if (err) 4782 goto err_trap_groups_set; 4783 4784 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4785 ARRAY_SIZE(mlxsw_sp_listener)); 4786 if (err) 4787 goto err_traps_register; 4788 4789 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4790 mlxsw_sp->listeners_count); 4791 if (err) 4792 goto err_extra_traps_init; 4793 4794 return 0; 4795 4796 err_extra_traps_init: 4797 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4798 ARRAY_SIZE(mlxsw_sp_listener)); 4799 err_traps_register: 4800 err_trap_groups_set: 4801 err_cpu_policers_set: 4802 kfree(trap); 4803 return err; 4804 } 4805 4806 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4807 { 4808 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4809 mlxsw_sp->listeners_count); 4810 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4811 ARRAY_SIZE(mlxsw_sp_listener)); 4812 kfree(mlxsw_sp->trap); 4813 } 4814 4815 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4816 4817 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4818 { 4819 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4820 u32 seed; 4821 int err; 4822 4823 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4824 MLXSW_SP_LAG_SEED_INIT); 4825 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4826 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4827 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4828 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4829 MLXSW_REG_SLCR_LAG_HASH_SIP | 4830 MLXSW_REG_SLCR_LAG_HASH_DIP | 4831 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4832 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4833 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4834 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4835 if (err) 4836 return err; 4837 4838 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4839 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4840 return -EIO; 4841 4842 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4843 sizeof(struct mlxsw_sp_upper), 4844 GFP_KERNEL); 4845 if (!mlxsw_sp->lags) 4846 return -ENOMEM; 4847 4848 return 0; 4849 } 4850 4851 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4852 { 4853 kfree(mlxsw_sp->lags); 4854 } 4855 4856 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4857 { 4858 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4859 4860 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4861 MLXSW_REG_HTGT_INVALID_POLICER, 4862 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4863 MLXSW_REG_HTGT_DEFAULT_TC); 4864 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4865 } 4866 4867 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4868 .clock_init = mlxsw_sp1_ptp_clock_init, 4869 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4870 .init = mlxsw_sp1_ptp_init, 4871 .fini = mlxsw_sp1_ptp_fini, 4872 .receive = mlxsw_sp1_ptp_receive, 4873 .transmitted = mlxsw_sp1_ptp_transmitted, 4874 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4875 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4876 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4877 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4878 .get_stats_count = mlxsw_sp1_get_stats_count, 4879 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4880 .get_stats = mlxsw_sp1_get_stats, 4881 }; 4882 4883 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4884 .clock_init = mlxsw_sp2_ptp_clock_init, 4885 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4886 .init = mlxsw_sp2_ptp_init, 4887 .fini = mlxsw_sp2_ptp_fini, 4888 .receive = mlxsw_sp2_ptp_receive, 4889 .transmitted = mlxsw_sp2_ptp_transmitted, 4890 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4891 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4892 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4893 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4894 .get_stats_count = mlxsw_sp2_get_stats_count, 4895 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4896 .get_stats = mlxsw_sp2_get_stats, 4897 }; 4898 4899 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4900 { 4901 return mtu * 5 / 2; 4902 } 4903 4904 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4905 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4906 }; 4907 4908 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4909 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 4910 4911 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 4912 { 4913 return 3 * mtu + buffer_factor * speed / 1000; 4914 } 4915 4916 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4917 { 4918 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 4919 4920 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4921 } 4922 4923 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4924 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4925 }; 4926 4927 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 4928 { 4929 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 4930 4931 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4932 } 4933 4934 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 4935 .buffsize_get = mlxsw_sp3_span_buffsize_get, 4936 }; 4937 4938 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4939 { 4940 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4941 4942 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4943 } 4944 4945 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4946 unsigned long event, void *ptr); 4947 4948 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4949 const struct mlxsw_bus_info *mlxsw_bus_info, 4950 struct netlink_ext_ack *extack) 4951 { 4952 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4953 int err; 4954 4955 mlxsw_sp->core = mlxsw_core; 4956 mlxsw_sp->bus_info = mlxsw_bus_info; 4957 4958 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4959 if (err) 4960 return err; 4961 4962 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4963 4964 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4965 if (err) { 4966 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4967 return err; 4968 } 4969 4970 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4971 if (err) { 4972 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4973 return err; 4974 } 4975 4976 err = mlxsw_sp_fids_init(mlxsw_sp); 4977 if (err) { 4978 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4979 goto err_fids_init; 4980 } 4981 4982 err = mlxsw_sp_traps_init(mlxsw_sp); 4983 if (err) { 4984 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4985 goto err_traps_init; 4986 } 4987 4988 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4989 if (err) { 4990 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4991 goto err_devlink_traps_init; 4992 } 4993 4994 err = mlxsw_sp_buffers_init(mlxsw_sp); 4995 if (err) { 4996 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4997 goto err_buffers_init; 4998 } 4999 5000 err = mlxsw_sp_lag_init(mlxsw_sp); 5001 if (err) { 5002 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 5003 goto err_lag_init; 5004 } 5005 5006 /* Initialize SPAN before router and switchdev, so that those components 5007 * can call mlxsw_sp_span_respin(). 5008 */ 5009 err = mlxsw_sp_span_init(mlxsw_sp); 5010 if (err) { 5011 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 5012 goto err_span_init; 5013 } 5014 5015 err = mlxsw_sp_switchdev_init(mlxsw_sp); 5016 if (err) { 5017 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 5018 goto err_switchdev_init; 5019 } 5020 5021 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 5022 if (err) { 5023 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 5024 goto err_counter_pool_init; 5025 } 5026 5027 err = mlxsw_sp_afa_init(mlxsw_sp); 5028 if (err) { 5029 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 5030 goto err_afa_init; 5031 } 5032 5033 err = mlxsw_sp_nve_init(mlxsw_sp); 5034 if (err) { 5035 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 5036 goto err_nve_init; 5037 } 5038 5039 err = mlxsw_sp_acl_init(mlxsw_sp); 5040 if (err) { 5041 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 5042 goto err_acl_init; 5043 } 5044 5045 err = mlxsw_sp_router_init(mlxsw_sp, extack); 5046 if (err) { 5047 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 5048 goto err_router_init; 5049 } 5050 5051 if (mlxsw_sp->bus_info->read_frc_capable) { 5052 /* NULL is a valid return value from clock_init */ 5053 mlxsw_sp->clock = 5054 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 5055 mlxsw_sp->bus_info->dev); 5056 if (IS_ERR(mlxsw_sp->clock)) { 5057 err = PTR_ERR(mlxsw_sp->clock); 5058 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 5059 goto err_ptp_clock_init; 5060 } 5061 } 5062 5063 if (mlxsw_sp->clock) { 5064 /* NULL is a valid return value from ptp_ops->init */ 5065 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5066 if (IS_ERR(mlxsw_sp->ptp_state)) { 5067 err = PTR_ERR(mlxsw_sp->ptp_state); 5068 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5069 goto err_ptp_init; 5070 } 5071 } 5072 5073 /* Initialize netdevice notifier after router and SPAN is initialized, 5074 * so that the event handler can use router structures and call SPAN 5075 * respin. 5076 */ 5077 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5078 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5079 &mlxsw_sp->netdevice_nb); 5080 if (err) { 5081 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5082 goto err_netdev_notifier; 5083 } 5084 5085 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5086 if (err) { 5087 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5088 goto err_dpipe_init; 5089 } 5090 5091 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5092 if (err) { 5093 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5094 goto err_port_module_info_init; 5095 } 5096 5097 err = mlxsw_sp_ports_create(mlxsw_sp); 5098 if (err) { 5099 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5100 goto err_ports_create; 5101 } 5102 5103 return 0; 5104 5105 err_ports_create: 5106 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5107 err_port_module_info_init: 5108 mlxsw_sp_dpipe_fini(mlxsw_sp); 5109 err_dpipe_init: 5110 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5111 &mlxsw_sp->netdevice_nb); 5112 err_netdev_notifier: 5113 if (mlxsw_sp->clock) 5114 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5115 err_ptp_init: 5116 if (mlxsw_sp->clock) 5117 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5118 err_ptp_clock_init: 5119 mlxsw_sp_router_fini(mlxsw_sp); 5120 err_router_init: 5121 mlxsw_sp_acl_fini(mlxsw_sp); 5122 err_acl_init: 5123 mlxsw_sp_nve_fini(mlxsw_sp); 5124 err_nve_init: 5125 mlxsw_sp_afa_fini(mlxsw_sp); 5126 err_afa_init: 5127 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5128 err_counter_pool_init: 5129 mlxsw_sp_switchdev_fini(mlxsw_sp); 5130 err_switchdev_init: 5131 mlxsw_sp_span_fini(mlxsw_sp); 5132 err_span_init: 5133 mlxsw_sp_lag_fini(mlxsw_sp); 5134 err_lag_init: 5135 mlxsw_sp_buffers_fini(mlxsw_sp); 5136 err_buffers_init: 5137 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5138 err_devlink_traps_init: 5139 mlxsw_sp_traps_fini(mlxsw_sp); 5140 err_traps_init: 5141 mlxsw_sp_fids_fini(mlxsw_sp); 5142 err_fids_init: 5143 mlxsw_sp_kvdl_fini(mlxsw_sp); 5144 return err; 5145 } 5146 5147 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5148 const struct mlxsw_bus_info *mlxsw_bus_info, 5149 struct netlink_ext_ack *extack) 5150 { 5151 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5152 5153 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5154 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5155 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5156 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5157 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5158 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5159 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5160 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5161 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5162 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5163 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5164 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5165 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5166 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 5167 mlxsw_sp->listeners = mlxsw_sp1_listener; 5168 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5169 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 5170 5171 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5172 } 5173 5174 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5175 const struct mlxsw_bus_info *mlxsw_bus_info, 5176 struct netlink_ext_ack *extack) 5177 { 5178 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5179 5180 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5181 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5182 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5183 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5184 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5185 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5186 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5187 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5188 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5189 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5190 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5191 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5192 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5193 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 5194 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 5195 5196 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5197 } 5198 5199 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 5200 const struct mlxsw_bus_info *mlxsw_bus_info, 5201 struct netlink_ext_ack *extack) 5202 { 5203 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5204 5205 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5206 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5207 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5208 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5209 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5210 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5211 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5212 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5213 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5214 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5215 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5216 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 5217 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 5218 5219 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5220 } 5221 5222 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5223 { 5224 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5225 5226 mlxsw_sp_ports_remove(mlxsw_sp); 5227 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5228 mlxsw_sp_dpipe_fini(mlxsw_sp); 5229 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5230 &mlxsw_sp->netdevice_nb); 5231 if (mlxsw_sp->clock) { 5232 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5233 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5234 } 5235 mlxsw_sp_router_fini(mlxsw_sp); 5236 mlxsw_sp_acl_fini(mlxsw_sp); 5237 mlxsw_sp_nve_fini(mlxsw_sp); 5238 mlxsw_sp_afa_fini(mlxsw_sp); 5239 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5240 mlxsw_sp_switchdev_fini(mlxsw_sp); 5241 mlxsw_sp_span_fini(mlxsw_sp); 5242 mlxsw_sp_lag_fini(mlxsw_sp); 5243 mlxsw_sp_buffers_fini(mlxsw_sp); 5244 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5245 mlxsw_sp_traps_fini(mlxsw_sp); 5246 mlxsw_sp_fids_fini(mlxsw_sp); 5247 mlxsw_sp_kvdl_fini(mlxsw_sp); 5248 } 5249 5250 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5251 * 802.1Q FIDs 5252 */ 5253 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5254 VLAN_VID_MASK - 1) 5255 5256 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5257 .used_max_mid = 1, 5258 .max_mid = MLXSW_SP_MID_MAX, 5259 .used_flood_tables = 1, 5260 .used_flood_mode = 1, 5261 .flood_mode = 3, 5262 .max_fid_flood_tables = 3, 5263 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5264 .used_max_ib_mc = 1, 5265 .max_ib_mc = 0, 5266 .used_max_pkey = 1, 5267 .max_pkey = 0, 5268 .used_kvd_sizes = 1, 5269 .kvd_hash_single_parts = 59, 5270 .kvd_hash_double_parts = 41, 5271 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5272 .swid_config = { 5273 { 5274 .used_type = 1, 5275 .type = MLXSW_PORT_SWID_TYPE_ETH, 5276 } 5277 }, 5278 }; 5279 5280 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5281 .used_max_mid = 1, 5282 .max_mid = MLXSW_SP_MID_MAX, 5283 .used_flood_tables = 1, 5284 .used_flood_mode = 1, 5285 .flood_mode = 3, 5286 .max_fid_flood_tables = 3, 5287 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5288 .used_max_ib_mc = 1, 5289 .max_ib_mc = 0, 5290 .used_max_pkey = 1, 5291 .max_pkey = 0, 5292 .swid_config = { 5293 { 5294 .used_type = 1, 5295 .type = MLXSW_PORT_SWID_TYPE_ETH, 5296 } 5297 }, 5298 }; 5299 5300 static void 5301 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5302 struct devlink_resource_size_params *kvd_size_params, 5303 struct devlink_resource_size_params *linear_size_params, 5304 struct devlink_resource_size_params *hash_double_size_params, 5305 struct devlink_resource_size_params *hash_single_size_params) 5306 { 5307 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5308 KVD_SINGLE_MIN_SIZE); 5309 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5310 KVD_DOUBLE_MIN_SIZE); 5311 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5312 u32 linear_size_min = 0; 5313 5314 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5315 MLXSW_SP_KVD_GRANULARITY, 5316 DEVLINK_RESOURCE_UNIT_ENTRY); 5317 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5318 kvd_size - single_size_min - 5319 double_size_min, 5320 MLXSW_SP_KVD_GRANULARITY, 5321 DEVLINK_RESOURCE_UNIT_ENTRY); 5322 devlink_resource_size_params_init(hash_double_size_params, 5323 double_size_min, 5324 kvd_size - single_size_min - 5325 linear_size_min, 5326 MLXSW_SP_KVD_GRANULARITY, 5327 DEVLINK_RESOURCE_UNIT_ENTRY); 5328 devlink_resource_size_params_init(hash_single_size_params, 5329 single_size_min, 5330 kvd_size - double_size_min - 5331 linear_size_min, 5332 MLXSW_SP_KVD_GRANULARITY, 5333 DEVLINK_RESOURCE_UNIT_ENTRY); 5334 } 5335 5336 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5337 { 5338 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5339 struct devlink_resource_size_params hash_single_size_params; 5340 struct devlink_resource_size_params hash_double_size_params; 5341 struct devlink_resource_size_params linear_size_params; 5342 struct devlink_resource_size_params kvd_size_params; 5343 u32 kvd_size, single_size, double_size, linear_size; 5344 const struct mlxsw_config_profile *profile; 5345 int err; 5346 5347 profile = &mlxsw_sp1_config_profile; 5348 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5349 return -EIO; 5350 5351 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5352 &linear_size_params, 5353 &hash_double_size_params, 5354 &hash_single_size_params); 5355 5356 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5357 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5358 kvd_size, MLXSW_SP_RESOURCE_KVD, 5359 DEVLINK_RESOURCE_ID_PARENT_TOP, 5360 &kvd_size_params); 5361 if (err) 5362 return err; 5363 5364 linear_size = profile->kvd_linear_size; 5365 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5366 linear_size, 5367 MLXSW_SP_RESOURCE_KVD_LINEAR, 5368 MLXSW_SP_RESOURCE_KVD, 5369 &linear_size_params); 5370 if (err) 5371 return err; 5372 5373 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5374 if (err) 5375 return err; 5376 5377 double_size = kvd_size - linear_size; 5378 double_size *= profile->kvd_hash_double_parts; 5379 double_size /= profile->kvd_hash_double_parts + 5380 profile->kvd_hash_single_parts; 5381 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5382 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5383 double_size, 5384 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5385 MLXSW_SP_RESOURCE_KVD, 5386 &hash_double_size_params); 5387 if (err) 5388 return err; 5389 5390 single_size = kvd_size - double_size - linear_size; 5391 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5392 single_size, 5393 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5394 MLXSW_SP_RESOURCE_KVD, 5395 &hash_single_size_params); 5396 if (err) 5397 return err; 5398 5399 return 0; 5400 } 5401 5402 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5403 { 5404 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5405 struct devlink_resource_size_params kvd_size_params; 5406 u32 kvd_size; 5407 5408 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5409 return -EIO; 5410 5411 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5412 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5413 MLXSW_SP_KVD_GRANULARITY, 5414 DEVLINK_RESOURCE_UNIT_ENTRY); 5415 5416 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5417 kvd_size, MLXSW_SP_RESOURCE_KVD, 5418 DEVLINK_RESOURCE_ID_PARENT_TOP, 5419 &kvd_size_params); 5420 } 5421 5422 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5423 { 5424 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5425 struct devlink_resource_size_params span_size_params; 5426 u32 max_span; 5427 5428 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5429 return -EIO; 5430 5431 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5432 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5433 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5434 5435 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5436 max_span, MLXSW_SP_RESOURCE_SPAN, 5437 DEVLINK_RESOURCE_ID_PARENT_TOP, 5438 &span_size_params); 5439 } 5440 5441 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5442 { 5443 int err; 5444 5445 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5446 if (err) 5447 return err; 5448 5449 err = mlxsw_sp_resources_span_register(mlxsw_core); 5450 if (err) 5451 goto err_resources_span_register; 5452 5453 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5454 if (err) 5455 goto err_resources_counter_register; 5456 5457 return 0; 5458 5459 err_resources_counter_register: 5460 err_resources_span_register: 5461 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5462 return err; 5463 } 5464 5465 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5466 { 5467 int err; 5468 5469 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5470 if (err) 5471 return err; 5472 5473 err = mlxsw_sp_resources_span_register(mlxsw_core); 5474 if (err) 5475 goto err_resources_span_register; 5476 5477 err = mlxsw_sp_counter_resources_register(mlxsw_core); 5478 if (err) 5479 goto err_resources_counter_register; 5480 5481 return 0; 5482 5483 err_resources_counter_register: 5484 err_resources_span_register: 5485 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5486 return err; 5487 } 5488 5489 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5490 const struct mlxsw_config_profile *profile, 5491 u64 *p_single_size, u64 *p_double_size, 5492 u64 *p_linear_size) 5493 { 5494 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5495 u32 double_size; 5496 int err; 5497 5498 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5499 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5500 return -EIO; 5501 5502 /* The hash part is what left of the kvd without the 5503 * linear part. It is split to the single size and 5504 * double size by the parts ratio from the profile. 5505 * Both sizes must be a multiplications of the 5506 * granularity from the profile. In case the user 5507 * provided the sizes they are obtained via devlink. 5508 */ 5509 err = devlink_resource_size_get(devlink, 5510 MLXSW_SP_RESOURCE_KVD_LINEAR, 5511 p_linear_size); 5512 if (err) 5513 *p_linear_size = profile->kvd_linear_size; 5514 5515 err = devlink_resource_size_get(devlink, 5516 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5517 p_double_size); 5518 if (err) { 5519 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5520 *p_linear_size; 5521 double_size *= profile->kvd_hash_double_parts; 5522 double_size /= profile->kvd_hash_double_parts + 5523 profile->kvd_hash_single_parts; 5524 *p_double_size = rounddown(double_size, 5525 MLXSW_SP_KVD_GRANULARITY); 5526 } 5527 5528 err = devlink_resource_size_get(devlink, 5529 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5530 p_single_size); 5531 if (err) 5532 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5533 *p_double_size - *p_linear_size; 5534 5535 /* Check results are legal. */ 5536 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5537 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5538 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5539 return -EIO; 5540 5541 return 0; 5542 } 5543 5544 static int 5545 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5546 union devlink_param_value val, 5547 struct netlink_ext_ack *extack) 5548 { 5549 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5550 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5551 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5552 return -EINVAL; 5553 } 5554 5555 return 0; 5556 } 5557 5558 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5559 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5560 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5561 NULL, NULL, 5562 mlxsw_sp_devlink_param_fw_load_policy_validate), 5563 }; 5564 5565 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5566 { 5567 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5568 union devlink_param_value value; 5569 int err; 5570 5571 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5572 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5573 if (err) 5574 return err; 5575 5576 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5577 devlink_param_driverinit_value_set(devlink, 5578 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5579 value); 5580 return 0; 5581 } 5582 5583 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5584 { 5585 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5586 mlxsw_sp_devlink_params, 5587 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5588 } 5589 5590 static int 5591 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5592 struct devlink_param_gset_ctx *ctx) 5593 { 5594 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5595 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5596 5597 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5598 return 0; 5599 } 5600 5601 static int 5602 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5603 struct devlink_param_gset_ctx *ctx) 5604 { 5605 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5606 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5607 5608 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5609 } 5610 5611 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5612 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5613 "acl_region_rehash_interval", 5614 DEVLINK_PARAM_TYPE_U32, 5615 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5616 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5617 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5618 NULL), 5619 }; 5620 5621 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5622 { 5623 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5624 union devlink_param_value value; 5625 int err; 5626 5627 err = mlxsw_sp_params_register(mlxsw_core); 5628 if (err) 5629 return err; 5630 5631 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5632 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5633 if (err) 5634 goto err_devlink_params_register; 5635 5636 value.vu32 = 0; 5637 devlink_param_driverinit_value_set(devlink, 5638 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5639 value); 5640 return 0; 5641 5642 err_devlink_params_register: 5643 mlxsw_sp_params_unregister(mlxsw_core); 5644 return err; 5645 } 5646 5647 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5648 { 5649 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5650 mlxsw_sp2_devlink_params, 5651 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5652 mlxsw_sp_params_unregister(mlxsw_core); 5653 } 5654 5655 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5656 struct sk_buff *skb, u8 local_port) 5657 { 5658 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5659 5660 skb_pull(skb, MLXSW_TXHDR_LEN); 5661 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5662 } 5663 5664 static struct mlxsw_driver mlxsw_sp1_driver = { 5665 .kind = mlxsw_sp1_driver_name, 5666 .priv_size = sizeof(struct mlxsw_sp), 5667 .init = mlxsw_sp1_init, 5668 .fini = mlxsw_sp_fini, 5669 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5670 .port_split = mlxsw_sp_port_split, 5671 .port_unsplit = mlxsw_sp_port_unsplit, 5672 .sb_pool_get = mlxsw_sp_sb_pool_get, 5673 .sb_pool_set = mlxsw_sp_sb_pool_set, 5674 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5675 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5676 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5677 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5678 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5679 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5680 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5681 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5682 .flash_update = mlxsw_sp_flash_update, 5683 .trap_init = mlxsw_sp_trap_init, 5684 .trap_fini = mlxsw_sp_trap_fini, 5685 .trap_action_set = mlxsw_sp_trap_action_set, 5686 .trap_group_init = mlxsw_sp_trap_group_init, 5687 .trap_group_set = mlxsw_sp_trap_group_set, 5688 .trap_policer_init = mlxsw_sp_trap_policer_init, 5689 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5690 .trap_policer_set = mlxsw_sp_trap_policer_set, 5691 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5692 .txhdr_construct = mlxsw_sp_txhdr_construct, 5693 .resources_register = mlxsw_sp1_resources_register, 5694 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5695 .params_register = mlxsw_sp_params_register, 5696 .params_unregister = mlxsw_sp_params_unregister, 5697 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5698 .txhdr_len = MLXSW_TXHDR_LEN, 5699 .profile = &mlxsw_sp1_config_profile, 5700 .res_query_enabled = true, 5701 }; 5702 5703 static struct mlxsw_driver mlxsw_sp2_driver = { 5704 .kind = mlxsw_sp2_driver_name, 5705 .priv_size = sizeof(struct mlxsw_sp), 5706 .init = mlxsw_sp2_init, 5707 .fini = mlxsw_sp_fini, 5708 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5709 .port_split = mlxsw_sp_port_split, 5710 .port_unsplit = mlxsw_sp_port_unsplit, 5711 .sb_pool_get = mlxsw_sp_sb_pool_get, 5712 .sb_pool_set = mlxsw_sp_sb_pool_set, 5713 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5714 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5715 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5716 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5717 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5718 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5719 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5720 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5721 .flash_update = mlxsw_sp_flash_update, 5722 .trap_init = mlxsw_sp_trap_init, 5723 .trap_fini = mlxsw_sp_trap_fini, 5724 .trap_action_set = mlxsw_sp_trap_action_set, 5725 .trap_group_init = mlxsw_sp_trap_group_init, 5726 .trap_group_set = mlxsw_sp_trap_group_set, 5727 .trap_policer_init = mlxsw_sp_trap_policer_init, 5728 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5729 .trap_policer_set = mlxsw_sp_trap_policer_set, 5730 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5731 .txhdr_construct = mlxsw_sp_txhdr_construct, 5732 .resources_register = mlxsw_sp2_resources_register, 5733 .params_register = mlxsw_sp2_params_register, 5734 .params_unregister = mlxsw_sp2_params_unregister, 5735 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5736 .txhdr_len = MLXSW_TXHDR_LEN, 5737 .profile = &mlxsw_sp2_config_profile, 5738 .res_query_enabled = true, 5739 }; 5740 5741 static struct mlxsw_driver mlxsw_sp3_driver = { 5742 .kind = mlxsw_sp3_driver_name, 5743 .priv_size = sizeof(struct mlxsw_sp), 5744 .init = mlxsw_sp3_init, 5745 .fini = mlxsw_sp_fini, 5746 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5747 .port_split = mlxsw_sp_port_split, 5748 .port_unsplit = mlxsw_sp_port_unsplit, 5749 .sb_pool_get = mlxsw_sp_sb_pool_get, 5750 .sb_pool_set = mlxsw_sp_sb_pool_set, 5751 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5752 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5753 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5754 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5755 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5756 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5757 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5758 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5759 .flash_update = mlxsw_sp_flash_update, 5760 .trap_init = mlxsw_sp_trap_init, 5761 .trap_fini = mlxsw_sp_trap_fini, 5762 .trap_action_set = mlxsw_sp_trap_action_set, 5763 .trap_group_init = mlxsw_sp_trap_group_init, 5764 .trap_group_set = mlxsw_sp_trap_group_set, 5765 .trap_policer_init = mlxsw_sp_trap_policer_init, 5766 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5767 .trap_policer_set = mlxsw_sp_trap_policer_set, 5768 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5769 .txhdr_construct = mlxsw_sp_txhdr_construct, 5770 .resources_register = mlxsw_sp2_resources_register, 5771 .params_register = mlxsw_sp2_params_register, 5772 .params_unregister = mlxsw_sp2_params_unregister, 5773 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5774 .txhdr_len = MLXSW_TXHDR_LEN, 5775 .profile = &mlxsw_sp2_config_profile, 5776 .res_query_enabled = true, 5777 }; 5778 5779 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5780 { 5781 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5782 } 5783 5784 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5785 { 5786 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5787 int ret = 0; 5788 5789 if (mlxsw_sp_port_dev_check(lower_dev)) { 5790 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5791 ret = 1; 5792 } 5793 5794 return ret; 5795 } 5796 5797 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5798 { 5799 struct mlxsw_sp_port *mlxsw_sp_port; 5800 5801 if (mlxsw_sp_port_dev_check(dev)) 5802 return netdev_priv(dev); 5803 5804 mlxsw_sp_port = NULL; 5805 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5806 5807 return mlxsw_sp_port; 5808 } 5809 5810 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5811 { 5812 struct mlxsw_sp_port *mlxsw_sp_port; 5813 5814 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5815 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5816 } 5817 5818 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5819 { 5820 struct mlxsw_sp_port *mlxsw_sp_port; 5821 5822 if (mlxsw_sp_port_dev_check(dev)) 5823 return netdev_priv(dev); 5824 5825 mlxsw_sp_port = NULL; 5826 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5827 &mlxsw_sp_port); 5828 5829 return mlxsw_sp_port; 5830 } 5831 5832 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5833 { 5834 struct mlxsw_sp_port *mlxsw_sp_port; 5835 5836 rcu_read_lock(); 5837 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5838 if (mlxsw_sp_port) 5839 dev_hold(mlxsw_sp_port->dev); 5840 rcu_read_unlock(); 5841 return mlxsw_sp_port; 5842 } 5843 5844 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5845 { 5846 dev_put(mlxsw_sp_port->dev); 5847 } 5848 5849 static void 5850 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5851 struct net_device *lag_dev) 5852 { 5853 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5854 struct net_device *upper_dev; 5855 struct list_head *iter; 5856 5857 if (netif_is_bridge_port(lag_dev)) 5858 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5859 5860 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5861 if (!netif_is_bridge_port(upper_dev)) 5862 continue; 5863 br_dev = netdev_master_upper_dev_get(upper_dev); 5864 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5865 } 5866 } 5867 5868 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5869 { 5870 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5871 5872 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5873 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5874 } 5875 5876 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5877 { 5878 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5879 5880 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5881 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5882 } 5883 5884 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5885 u16 lag_id, u8 port_index) 5886 { 5887 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5888 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5889 5890 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5891 lag_id, port_index); 5892 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5893 } 5894 5895 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5896 u16 lag_id) 5897 { 5898 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5899 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5900 5901 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5902 lag_id); 5903 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5904 } 5905 5906 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5907 u16 lag_id) 5908 { 5909 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5910 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5911 5912 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5913 lag_id); 5914 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5915 } 5916 5917 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5918 u16 lag_id) 5919 { 5920 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5921 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5922 5923 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5924 lag_id); 5925 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5926 } 5927 5928 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5929 struct net_device *lag_dev, 5930 u16 *p_lag_id) 5931 { 5932 struct mlxsw_sp_upper *lag; 5933 int free_lag_id = -1; 5934 u64 max_lag; 5935 int i; 5936 5937 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5938 for (i = 0; i < max_lag; i++) { 5939 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5940 if (lag->ref_count) { 5941 if (lag->dev == lag_dev) { 5942 *p_lag_id = i; 5943 return 0; 5944 } 5945 } else if (free_lag_id < 0) { 5946 free_lag_id = i; 5947 } 5948 } 5949 if (free_lag_id < 0) 5950 return -EBUSY; 5951 *p_lag_id = free_lag_id; 5952 return 0; 5953 } 5954 5955 static bool 5956 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5957 struct net_device *lag_dev, 5958 struct netdev_lag_upper_info *lag_upper_info, 5959 struct netlink_ext_ack *extack) 5960 { 5961 u16 lag_id; 5962 5963 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5964 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5965 return false; 5966 } 5967 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5968 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5969 return false; 5970 } 5971 return true; 5972 } 5973 5974 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5975 u16 lag_id, u8 *p_port_index) 5976 { 5977 u64 max_lag_members; 5978 int i; 5979 5980 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5981 MAX_LAG_MEMBERS); 5982 for (i = 0; i < max_lag_members; i++) { 5983 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5984 *p_port_index = i; 5985 return 0; 5986 } 5987 } 5988 return -EBUSY; 5989 } 5990 5991 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5992 struct net_device *lag_dev) 5993 { 5994 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5995 struct mlxsw_sp_upper *lag; 5996 u16 lag_id; 5997 u8 port_index; 5998 int err; 5999 6000 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 6001 if (err) 6002 return err; 6003 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 6004 if (!lag->ref_count) { 6005 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 6006 if (err) 6007 return err; 6008 lag->dev = lag_dev; 6009 } 6010 6011 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 6012 if (err) 6013 return err; 6014 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 6015 if (err) 6016 goto err_col_port_add; 6017 6018 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 6019 mlxsw_sp_port->local_port); 6020 mlxsw_sp_port->lag_id = lag_id; 6021 mlxsw_sp_port->lagged = 1; 6022 lag->ref_count++; 6023 6024 /* Port is no longer usable as a router interface */ 6025 if (mlxsw_sp_port->default_vlan->fid) 6026 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 6027 6028 return 0; 6029 6030 err_col_port_add: 6031 if (!lag->ref_count) 6032 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6033 return err; 6034 } 6035 6036 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 6037 struct net_device *lag_dev) 6038 { 6039 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6040 u16 lag_id = mlxsw_sp_port->lag_id; 6041 struct mlxsw_sp_upper *lag; 6042 6043 if (!mlxsw_sp_port->lagged) 6044 return; 6045 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 6046 WARN_ON(lag->ref_count == 0); 6047 6048 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 6049 6050 /* Any VLANs configured on the port are no longer valid */ 6051 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 6052 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 6053 /* Make the LAG and its directly linked uppers leave bridges they 6054 * are memeber in 6055 */ 6056 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 6057 6058 if (lag->ref_count == 1) 6059 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6060 6061 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 6062 mlxsw_sp_port->local_port); 6063 mlxsw_sp_port->lagged = 0; 6064 lag->ref_count--; 6065 6066 /* Make sure untagged frames are allowed to ingress */ 6067 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 6068 } 6069 6070 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 6071 u16 lag_id) 6072 { 6073 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6074 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6075 6076 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 6077 mlxsw_sp_port->local_port); 6078 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6079 } 6080 6081 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 6082 u16 lag_id) 6083 { 6084 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6085 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6086 6087 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 6088 mlxsw_sp_port->local_port); 6089 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6090 } 6091 6092 static int 6093 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 6094 { 6095 int err; 6096 6097 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 6098 mlxsw_sp_port->lag_id); 6099 if (err) 6100 return err; 6101 6102 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6103 if (err) 6104 goto err_dist_port_add; 6105 6106 return 0; 6107 6108 err_dist_port_add: 6109 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6110 return err; 6111 } 6112 6113 static int 6114 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6115 { 6116 int err; 6117 6118 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6119 mlxsw_sp_port->lag_id); 6120 if (err) 6121 return err; 6122 6123 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6124 mlxsw_sp_port->lag_id); 6125 if (err) 6126 goto err_col_port_disable; 6127 6128 return 0; 6129 6130 err_col_port_disable: 6131 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6132 return err; 6133 } 6134 6135 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6136 struct netdev_lag_lower_state_info *info) 6137 { 6138 if (info->tx_enabled) 6139 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6140 else 6141 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6142 } 6143 6144 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6145 bool enable) 6146 { 6147 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6148 enum mlxsw_reg_spms_state spms_state; 6149 char *spms_pl; 6150 u16 vid; 6151 int err; 6152 6153 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6154 MLXSW_REG_SPMS_STATE_DISCARDING; 6155 6156 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6157 if (!spms_pl) 6158 return -ENOMEM; 6159 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6160 6161 for (vid = 0; vid < VLAN_N_VID; vid++) 6162 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6163 6164 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6165 kfree(spms_pl); 6166 return err; 6167 } 6168 6169 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6170 { 6171 u16 vid = 1; 6172 int err; 6173 6174 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6175 if (err) 6176 return err; 6177 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6178 if (err) 6179 goto err_port_stp_set; 6180 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6181 true, false); 6182 if (err) 6183 goto err_port_vlan_set; 6184 6185 for (; vid <= VLAN_N_VID - 1; vid++) { 6186 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6187 vid, false); 6188 if (err) 6189 goto err_vid_learning_set; 6190 } 6191 6192 return 0; 6193 6194 err_vid_learning_set: 6195 for (vid--; vid >= 1; vid--) 6196 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6197 err_port_vlan_set: 6198 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6199 err_port_stp_set: 6200 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6201 return err; 6202 } 6203 6204 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6205 { 6206 u16 vid; 6207 6208 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6209 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6210 vid, true); 6211 6212 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6213 false, false); 6214 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6215 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6216 } 6217 6218 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6219 { 6220 unsigned int num_vxlans = 0; 6221 struct net_device *dev; 6222 struct list_head *iter; 6223 6224 netdev_for_each_lower_dev(br_dev, dev, iter) { 6225 if (netif_is_vxlan(dev)) 6226 num_vxlans++; 6227 } 6228 6229 return num_vxlans > 1; 6230 } 6231 6232 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6233 { 6234 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6235 struct net_device *dev; 6236 struct list_head *iter; 6237 6238 netdev_for_each_lower_dev(br_dev, dev, iter) { 6239 u16 pvid; 6240 int err; 6241 6242 if (!netif_is_vxlan(dev)) 6243 continue; 6244 6245 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6246 if (err || !pvid) 6247 continue; 6248 6249 if (test_and_set_bit(pvid, vlans)) 6250 return false; 6251 } 6252 6253 return true; 6254 } 6255 6256 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6257 struct netlink_ext_ack *extack) 6258 { 6259 if (br_multicast_enabled(br_dev)) { 6260 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6261 return false; 6262 } 6263 6264 if (!br_vlan_enabled(br_dev) && 6265 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6266 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6267 return false; 6268 } 6269 6270 if (br_vlan_enabled(br_dev) && 6271 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6272 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6273 return false; 6274 } 6275 6276 return true; 6277 } 6278 6279 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6280 struct net_device *dev, 6281 unsigned long event, void *ptr) 6282 { 6283 struct netdev_notifier_changeupper_info *info; 6284 struct mlxsw_sp_port *mlxsw_sp_port; 6285 struct netlink_ext_ack *extack; 6286 struct net_device *upper_dev; 6287 struct mlxsw_sp *mlxsw_sp; 6288 int err = 0; 6289 6290 mlxsw_sp_port = netdev_priv(dev); 6291 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6292 info = ptr; 6293 extack = netdev_notifier_info_to_extack(&info->info); 6294 6295 switch (event) { 6296 case NETDEV_PRECHANGEUPPER: 6297 upper_dev = info->upper_dev; 6298 if (!is_vlan_dev(upper_dev) && 6299 !netif_is_lag_master(upper_dev) && 6300 !netif_is_bridge_master(upper_dev) && 6301 !netif_is_ovs_master(upper_dev) && 6302 !netif_is_macvlan(upper_dev)) { 6303 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6304 return -EINVAL; 6305 } 6306 if (!info->linking) 6307 break; 6308 if (netif_is_bridge_master(upper_dev) && 6309 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6310 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6311 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6312 return -EOPNOTSUPP; 6313 if (netdev_has_any_upper_dev(upper_dev) && 6314 (!netif_is_bridge_master(upper_dev) || 6315 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6316 upper_dev))) { 6317 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6318 return -EINVAL; 6319 } 6320 if (netif_is_lag_master(upper_dev) && 6321 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6322 info->upper_info, extack)) 6323 return -EINVAL; 6324 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6325 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6326 return -EINVAL; 6327 } 6328 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6329 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6330 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6331 return -EINVAL; 6332 } 6333 if (netif_is_macvlan(upper_dev) && 6334 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 6335 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6336 return -EOPNOTSUPP; 6337 } 6338 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6339 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6340 return -EINVAL; 6341 } 6342 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6343 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6344 return -EINVAL; 6345 } 6346 break; 6347 case NETDEV_CHANGEUPPER: 6348 upper_dev = info->upper_dev; 6349 if (netif_is_bridge_master(upper_dev)) { 6350 if (info->linking) 6351 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6352 lower_dev, 6353 upper_dev, 6354 extack); 6355 else 6356 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6357 lower_dev, 6358 upper_dev); 6359 } else if (netif_is_lag_master(upper_dev)) { 6360 if (info->linking) { 6361 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6362 upper_dev); 6363 } else { 6364 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6365 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6366 upper_dev); 6367 } 6368 } else if (netif_is_ovs_master(upper_dev)) { 6369 if (info->linking) 6370 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6371 else 6372 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6373 } else if (netif_is_macvlan(upper_dev)) { 6374 if (!info->linking) 6375 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6376 } else if (is_vlan_dev(upper_dev)) { 6377 struct net_device *br_dev; 6378 6379 if (!netif_is_bridge_port(upper_dev)) 6380 break; 6381 if (info->linking) 6382 break; 6383 br_dev = netdev_master_upper_dev_get(upper_dev); 6384 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6385 br_dev); 6386 } 6387 break; 6388 } 6389 6390 return err; 6391 } 6392 6393 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6394 unsigned long event, void *ptr) 6395 { 6396 struct netdev_notifier_changelowerstate_info *info; 6397 struct mlxsw_sp_port *mlxsw_sp_port; 6398 int err; 6399 6400 mlxsw_sp_port = netdev_priv(dev); 6401 info = ptr; 6402 6403 switch (event) { 6404 case NETDEV_CHANGELOWERSTATE: 6405 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6406 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6407 info->lower_state_info); 6408 if (err) 6409 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6410 } 6411 break; 6412 } 6413 6414 return 0; 6415 } 6416 6417 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6418 struct net_device *port_dev, 6419 unsigned long event, void *ptr) 6420 { 6421 switch (event) { 6422 case NETDEV_PRECHANGEUPPER: 6423 case NETDEV_CHANGEUPPER: 6424 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6425 event, ptr); 6426 case NETDEV_CHANGELOWERSTATE: 6427 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6428 ptr); 6429 } 6430 6431 return 0; 6432 } 6433 6434 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6435 unsigned long event, void *ptr) 6436 { 6437 struct net_device *dev; 6438 struct list_head *iter; 6439 int ret; 6440 6441 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6442 if (mlxsw_sp_port_dev_check(dev)) { 6443 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6444 ptr); 6445 if (ret) 6446 return ret; 6447 } 6448 } 6449 6450 return 0; 6451 } 6452 6453 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6454 struct net_device *dev, 6455 unsigned long event, void *ptr, 6456 u16 vid) 6457 { 6458 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6460 struct netdev_notifier_changeupper_info *info = ptr; 6461 struct netlink_ext_ack *extack; 6462 struct net_device *upper_dev; 6463 int err = 0; 6464 6465 extack = netdev_notifier_info_to_extack(&info->info); 6466 6467 switch (event) { 6468 case NETDEV_PRECHANGEUPPER: 6469 upper_dev = info->upper_dev; 6470 if (!netif_is_bridge_master(upper_dev) && 6471 !netif_is_macvlan(upper_dev)) { 6472 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6473 return -EINVAL; 6474 } 6475 if (!info->linking) 6476 break; 6477 if (netif_is_bridge_master(upper_dev) && 6478 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6479 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6480 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6481 return -EOPNOTSUPP; 6482 if (netdev_has_any_upper_dev(upper_dev) && 6483 (!netif_is_bridge_master(upper_dev) || 6484 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6485 upper_dev))) { 6486 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6487 return -EINVAL; 6488 } 6489 if (netif_is_macvlan(upper_dev) && 6490 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6491 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6492 return -EOPNOTSUPP; 6493 } 6494 break; 6495 case NETDEV_CHANGEUPPER: 6496 upper_dev = info->upper_dev; 6497 if (netif_is_bridge_master(upper_dev)) { 6498 if (info->linking) 6499 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6500 vlan_dev, 6501 upper_dev, 6502 extack); 6503 else 6504 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6505 vlan_dev, 6506 upper_dev); 6507 } else if (netif_is_macvlan(upper_dev)) { 6508 if (!info->linking) 6509 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6510 } else { 6511 err = -EINVAL; 6512 WARN_ON(1); 6513 } 6514 break; 6515 } 6516 6517 return err; 6518 } 6519 6520 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6521 struct net_device *lag_dev, 6522 unsigned long event, 6523 void *ptr, u16 vid) 6524 { 6525 struct net_device *dev; 6526 struct list_head *iter; 6527 int ret; 6528 6529 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6530 if (mlxsw_sp_port_dev_check(dev)) { 6531 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6532 event, ptr, 6533 vid); 6534 if (ret) 6535 return ret; 6536 } 6537 } 6538 6539 return 0; 6540 } 6541 6542 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6543 struct net_device *br_dev, 6544 unsigned long event, void *ptr, 6545 u16 vid) 6546 { 6547 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6548 struct netdev_notifier_changeupper_info *info = ptr; 6549 struct netlink_ext_ack *extack; 6550 struct net_device *upper_dev; 6551 6552 if (!mlxsw_sp) 6553 return 0; 6554 6555 extack = netdev_notifier_info_to_extack(&info->info); 6556 6557 switch (event) { 6558 case NETDEV_PRECHANGEUPPER: 6559 upper_dev = info->upper_dev; 6560 if (!netif_is_macvlan(upper_dev)) { 6561 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6562 return -EOPNOTSUPP; 6563 } 6564 if (!info->linking) 6565 break; 6566 if (netif_is_macvlan(upper_dev) && 6567 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6568 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6569 return -EOPNOTSUPP; 6570 } 6571 break; 6572 case NETDEV_CHANGEUPPER: 6573 upper_dev = info->upper_dev; 6574 if (info->linking) 6575 break; 6576 if (netif_is_macvlan(upper_dev)) 6577 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6578 break; 6579 } 6580 6581 return 0; 6582 } 6583 6584 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6585 unsigned long event, void *ptr) 6586 { 6587 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6588 u16 vid = vlan_dev_vlan_id(vlan_dev); 6589 6590 if (mlxsw_sp_port_dev_check(real_dev)) 6591 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6592 event, ptr, vid); 6593 else if (netif_is_lag_master(real_dev)) 6594 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6595 real_dev, event, 6596 ptr, vid); 6597 else if (netif_is_bridge_master(real_dev)) 6598 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6599 event, ptr, vid); 6600 6601 return 0; 6602 } 6603 6604 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6605 unsigned long event, void *ptr) 6606 { 6607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6608 struct netdev_notifier_changeupper_info *info = ptr; 6609 struct netlink_ext_ack *extack; 6610 struct net_device *upper_dev; 6611 6612 if (!mlxsw_sp) 6613 return 0; 6614 6615 extack = netdev_notifier_info_to_extack(&info->info); 6616 6617 switch (event) { 6618 case NETDEV_PRECHANGEUPPER: 6619 upper_dev = info->upper_dev; 6620 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6621 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6622 return -EOPNOTSUPP; 6623 } 6624 if (!info->linking) 6625 break; 6626 if (netif_is_macvlan(upper_dev) && 6627 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 6628 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6629 return -EOPNOTSUPP; 6630 } 6631 break; 6632 case NETDEV_CHANGEUPPER: 6633 upper_dev = info->upper_dev; 6634 if (info->linking) 6635 break; 6636 if (is_vlan_dev(upper_dev)) 6637 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6638 if (netif_is_macvlan(upper_dev)) 6639 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6640 break; 6641 } 6642 6643 return 0; 6644 } 6645 6646 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6647 unsigned long event, void *ptr) 6648 { 6649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6650 struct netdev_notifier_changeupper_info *info = ptr; 6651 struct netlink_ext_ack *extack; 6652 6653 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6654 return 0; 6655 6656 extack = netdev_notifier_info_to_extack(&info->info); 6657 6658 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6659 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6660 6661 return -EOPNOTSUPP; 6662 } 6663 6664 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6665 { 6666 struct netdev_notifier_changeupper_info *info = ptr; 6667 6668 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6669 return false; 6670 return netif_is_l3_master(info->upper_dev); 6671 } 6672 6673 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6674 struct net_device *dev, 6675 unsigned long event, void *ptr) 6676 { 6677 struct netdev_notifier_changeupper_info *cu_info; 6678 struct netdev_notifier_info *info = ptr; 6679 struct netlink_ext_ack *extack; 6680 struct net_device *upper_dev; 6681 6682 extack = netdev_notifier_info_to_extack(info); 6683 6684 switch (event) { 6685 case NETDEV_CHANGEUPPER: 6686 cu_info = container_of(info, 6687 struct netdev_notifier_changeupper_info, 6688 info); 6689 upper_dev = cu_info->upper_dev; 6690 if (!netif_is_bridge_master(upper_dev)) 6691 return 0; 6692 if (!mlxsw_sp_lower_get(upper_dev)) 6693 return 0; 6694 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6695 return -EOPNOTSUPP; 6696 if (cu_info->linking) { 6697 if (!netif_running(dev)) 6698 return 0; 6699 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6700 * device needs to be mapped to a VLAN, but at this 6701 * point no VLANs are configured on the VxLAN device 6702 */ 6703 if (br_vlan_enabled(upper_dev)) 6704 return 0; 6705 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6706 dev, 0, extack); 6707 } else { 6708 /* VLANs were already flushed, which triggered the 6709 * necessary cleanup 6710 */ 6711 if (br_vlan_enabled(upper_dev)) 6712 return 0; 6713 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6714 } 6715 break; 6716 case NETDEV_PRE_UP: 6717 upper_dev = netdev_master_upper_dev_get(dev); 6718 if (!upper_dev) 6719 return 0; 6720 if (!netif_is_bridge_master(upper_dev)) 6721 return 0; 6722 if (!mlxsw_sp_lower_get(upper_dev)) 6723 return 0; 6724 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6725 extack); 6726 case NETDEV_DOWN: 6727 upper_dev = netdev_master_upper_dev_get(dev); 6728 if (!upper_dev) 6729 return 0; 6730 if (!netif_is_bridge_master(upper_dev)) 6731 return 0; 6732 if (!mlxsw_sp_lower_get(upper_dev)) 6733 return 0; 6734 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6735 break; 6736 } 6737 6738 return 0; 6739 } 6740 6741 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6742 unsigned long event, void *ptr) 6743 { 6744 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6745 struct mlxsw_sp_span_entry *span_entry; 6746 struct mlxsw_sp *mlxsw_sp; 6747 int err = 0; 6748 6749 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6750 if (event == NETDEV_UNREGISTER) { 6751 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6752 if (span_entry) 6753 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6754 } 6755 mlxsw_sp_span_respin(mlxsw_sp); 6756 6757 if (netif_is_vxlan(dev)) 6758 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6759 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6760 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6761 event, ptr); 6762 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6763 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6764 event, ptr); 6765 else if (event == NETDEV_PRE_CHANGEADDR || 6766 event == NETDEV_CHANGEADDR || 6767 event == NETDEV_CHANGEMTU) 6768 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6769 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6770 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6771 else if (mlxsw_sp_port_dev_check(dev)) 6772 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6773 else if (netif_is_lag_master(dev)) 6774 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6775 else if (is_vlan_dev(dev)) 6776 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6777 else if (netif_is_bridge_master(dev)) 6778 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6779 else if (netif_is_macvlan(dev)) 6780 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6781 6782 return notifier_from_errno(err); 6783 } 6784 6785 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6786 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6787 }; 6788 6789 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6790 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6791 }; 6792 6793 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6794 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6795 {0, }, 6796 }; 6797 6798 static struct pci_driver mlxsw_sp1_pci_driver = { 6799 .name = mlxsw_sp1_driver_name, 6800 .id_table = mlxsw_sp1_pci_id_table, 6801 }; 6802 6803 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6804 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6805 {0, }, 6806 }; 6807 6808 static struct pci_driver mlxsw_sp2_pci_driver = { 6809 .name = mlxsw_sp2_driver_name, 6810 .id_table = mlxsw_sp2_pci_id_table, 6811 }; 6812 6813 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6814 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6815 {0, }, 6816 }; 6817 6818 static struct pci_driver mlxsw_sp3_pci_driver = { 6819 .name = mlxsw_sp3_driver_name, 6820 .id_table = mlxsw_sp3_pci_id_table, 6821 }; 6822 6823 static int __init mlxsw_sp_module_init(void) 6824 { 6825 int err; 6826 6827 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6828 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6829 6830 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6831 if (err) 6832 goto err_sp1_core_driver_register; 6833 6834 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6835 if (err) 6836 goto err_sp2_core_driver_register; 6837 6838 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6839 if (err) 6840 goto err_sp3_core_driver_register; 6841 6842 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6843 if (err) 6844 goto err_sp1_pci_driver_register; 6845 6846 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6847 if (err) 6848 goto err_sp2_pci_driver_register; 6849 6850 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6851 if (err) 6852 goto err_sp3_pci_driver_register; 6853 6854 return 0; 6855 6856 err_sp3_pci_driver_register: 6857 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6858 err_sp2_pci_driver_register: 6859 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6860 err_sp1_pci_driver_register: 6861 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6862 err_sp3_core_driver_register: 6863 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6864 err_sp2_core_driver_register: 6865 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6866 err_sp1_core_driver_register: 6867 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6868 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6869 return err; 6870 } 6871 6872 static void __exit mlxsw_sp_module_exit(void) 6873 { 6874 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6875 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6876 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6877 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6878 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6879 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6880 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6881 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6882 } 6883 6884 module_init(mlxsw_sp_module_init); 6885 module_exit(mlxsw_sp_module_exit); 6886 6887 MODULE_LICENSE("Dual BSD/GPL"); 6888 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6889 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6890 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6891 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6892 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6893 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6894 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6895