1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2000 50 #define MLXSW_SP1_FWREV_SUBMINOR 2714 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2000 67 #define MLXSW_SP2_FWREV_SUBMINOR 2714 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 81 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 82 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 83 static const char mlxsw_sp_driver_version[] = "1.0"; 84 85 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 86 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 87 }; 88 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 89 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 90 }; 91 92 /* tx_hdr_version 93 * Tx header version. 94 * Must be set to 1. 95 */ 96 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 97 98 /* tx_hdr_ctl 99 * Packet control type. 100 * 0 - Ethernet control (e.g. EMADs, LACP) 101 * 1 - Ethernet data 102 */ 103 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 104 105 /* tx_hdr_proto 106 * Packet protocol type. Must be set to 1 (Ethernet). 107 */ 108 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 109 110 /* tx_hdr_rx_is_router 111 * Packet is sent from the router. Valid for data packets only. 112 */ 113 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 114 115 /* tx_hdr_fid_valid 116 * Indicates if the 'fid' field is valid and should be used for 117 * forwarding lookup. Valid for data packets only. 118 */ 119 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 120 121 /* tx_hdr_swid 122 * Switch partition ID. Must be set to 0. 123 */ 124 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 125 126 /* tx_hdr_control_tclass 127 * Indicates if the packet should use the control TClass and not one 128 * of the data TClasses. 129 */ 130 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 131 132 /* tx_hdr_etclass 133 * Egress TClass to be used on the egress device on the egress port. 134 */ 135 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 136 137 /* tx_hdr_port_mid 138 * Destination local port for unicast packets. 139 * Destination multicast ID for multicast packets. 140 * 141 * Control packets are directed to a specific egress port, while data 142 * packets are transmitted through the CPU port (0) into the switch partition, 143 * where forwarding rules are applied. 144 */ 145 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 146 147 /* tx_hdr_fid 148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 149 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 150 * Valid for data packets only. 151 */ 152 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 153 154 /* tx_hdr_type 155 * 0 - Data packets 156 * 6 - Control packets 157 */ 158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 159 160 struct mlxsw_sp_mlxfw_dev { 161 struct mlxfw_dev mlxfw_dev; 162 struct mlxsw_sp *mlxsw_sp; 163 }; 164 165 struct mlxsw_sp_ptp_ops { 166 struct mlxsw_sp_ptp_clock * 167 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 168 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 169 170 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 171 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 172 173 /* Notify a driver that a packet that might be PTP was received. Driver 174 * is responsible for freeing the passed-in SKB. 175 */ 176 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 177 u8 local_port); 178 179 /* Notify a driver that a timestamped packet was transmitted. Driver 180 * is responsible for freeing the passed-in SKB. 181 */ 182 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 183 u8 local_port); 184 185 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 186 struct hwtstamp_config *config); 187 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 void (*shaper_work)(struct work_struct *work); 190 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 191 struct ethtool_ts_info *info); 192 int (*get_stats_count)(void); 193 void (*get_stats_strings)(u8 **p); 194 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 195 u64 *data, int data_index); 196 }; 197 198 struct mlxsw_sp_span_ops { 199 u32 (*buffsize_get)(int mtu, u32 speed); 200 }; 201 202 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 203 u16 component_index, u32 *p_max_size, 204 u8 *p_align_bits, u16 *p_max_write_size) 205 { 206 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 207 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 208 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 209 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 210 int err; 211 212 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 213 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 214 if (err) 215 return err; 216 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 217 p_max_write_size); 218 219 *p_align_bits = max_t(u8, *p_align_bits, 2); 220 *p_max_write_size = min_t(u16, *p_max_write_size, 221 MLXSW_REG_MCDA_MAX_DATA_LEN); 222 return 0; 223 } 224 225 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 226 { 227 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 228 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 230 char mcc_pl[MLXSW_REG_MCC_LEN]; 231 u8 control_state; 232 int err; 233 234 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 235 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 236 if (err) 237 return err; 238 239 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 240 if (control_state != MLXFW_FSM_STATE_IDLE) 241 return -EBUSY; 242 243 mlxsw_reg_mcc_pack(mcc_pl, 244 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 245 0, *fwhandle, 0); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 247 } 248 249 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 250 u32 fwhandle, u16 component_index, 251 u32 component_size) 252 { 253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 256 char mcc_pl[MLXSW_REG_MCC_LEN]; 257 258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 259 component_index, fwhandle, component_size); 260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 261 } 262 263 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 264 u32 fwhandle, u8 *data, u16 size, 265 u32 offset) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcda_pl[MLXSW_REG_MCDA_LEN]; 271 272 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 273 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 274 } 275 276 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 277 u32 fwhandle, u16 component_index) 278 { 279 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 280 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 282 char mcc_pl[MLXSW_REG_MCC_LEN]; 283 284 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 285 component_index, fwhandle, 0); 286 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 287 } 288 289 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 290 { 291 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 292 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 294 char mcc_pl[MLXSW_REG_MCC_LEN]; 295 296 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 297 fwhandle, 0); 298 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 299 } 300 301 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 302 enum mlxfw_fsm_state *fsm_state, 303 enum mlxfw_fsm_state_err *fsm_state_err) 304 { 305 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 306 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 308 char mcc_pl[MLXSW_REG_MCC_LEN]; 309 u8 control_state; 310 u8 error_code; 311 int err; 312 313 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 314 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 315 if (err) 316 return err; 317 318 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 319 *fsm_state = control_state; 320 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 321 MLXFW_FSM_STATE_ERR_MAX); 322 return 0; 323 } 324 325 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 326 { 327 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 328 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 330 char mcc_pl[MLXSW_REG_MCC_LEN]; 331 332 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 333 fwhandle, 0); 334 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 335 } 336 337 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 338 { 339 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 340 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 342 char mcc_pl[MLXSW_REG_MCC_LEN]; 343 344 mlxsw_reg_mcc_pack(mcc_pl, 345 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 346 fwhandle, 0); 347 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 348 } 349 350 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 351 .component_query = mlxsw_sp_component_query, 352 .fsm_lock = mlxsw_sp_fsm_lock, 353 .fsm_component_update = mlxsw_sp_fsm_component_update, 354 .fsm_block_download = mlxsw_sp_fsm_block_download, 355 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 356 .fsm_activate = mlxsw_sp_fsm_activate, 357 .fsm_query_state = mlxsw_sp_fsm_query_state, 358 .fsm_cancel = mlxsw_sp_fsm_cancel, 359 .fsm_release = mlxsw_sp_fsm_release, 360 }; 361 362 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 363 const struct firmware *firmware, 364 struct netlink_ext_ack *extack) 365 { 366 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 367 .mlxfw_dev = { 368 .ops = &mlxsw_sp_mlxfw_dev_ops, 369 .psid = mlxsw_sp->bus_info->psid, 370 .psid_size = strlen(mlxsw_sp->bus_info->psid), 371 .devlink = priv_to_devlink(mlxsw_sp->core), 372 }, 373 .mlxsw_sp = mlxsw_sp 374 }; 375 int err; 376 377 mlxsw_core_fw_flash_start(mlxsw_sp->core); 378 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 379 firmware, extack); 380 mlxsw_core_fw_flash_end(mlxsw_sp->core); 381 382 return err; 383 } 384 385 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 386 { 387 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 388 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 389 const char *fw_filename = mlxsw_sp->fw_filename; 390 union devlink_param_value value; 391 const struct firmware *firmware; 392 int err; 393 394 /* Don't check if driver does not require it */ 395 if (!req_rev || !fw_filename) 396 return 0; 397 398 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 399 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 400 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 401 &value); 402 if (err) 403 return err; 404 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 405 return 0; 406 407 /* Validate driver & FW are compatible */ 408 if (rev->major != req_rev->major) { 409 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 410 rev->major, req_rev->major); 411 return -EINVAL; 412 } 413 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 414 return 0; 415 416 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 417 rev->major, rev->minor, rev->subminor, req_rev->major, 418 req_rev->minor, req_rev->subminor); 419 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 420 fw_filename); 421 422 err = request_firmware_direct(&firmware, fw_filename, 423 mlxsw_sp->bus_info->dev); 424 if (err) { 425 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 426 fw_filename); 427 return err; 428 } 429 430 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 431 release_firmware(firmware); 432 if (err) 433 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 434 435 /* On FW flash success, tell the caller FW reset is needed 436 * if current FW supports it. 437 */ 438 if (rev->minor >= req_rev->can_reset_minor) 439 return err ? err : -EAGAIN; 440 else 441 return 0; 442 } 443 444 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 445 const char *file_name, const char *component, 446 struct netlink_ext_ack *extack) 447 { 448 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 449 const struct firmware *firmware; 450 int err; 451 452 if (component) 453 return -EOPNOTSUPP; 454 455 err = request_firmware_direct(&firmware, file_name, 456 mlxsw_sp->bus_info->dev); 457 if (err) 458 return err; 459 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 460 release_firmware(firmware); 461 462 return err; 463 } 464 465 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 466 unsigned int counter_index, u64 *packets, 467 u64 *bytes) 468 { 469 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 470 int err; 471 472 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 473 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 474 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 475 if (err) 476 return err; 477 if (packets) 478 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 479 if (bytes) 480 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 481 return 0; 482 } 483 484 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 485 unsigned int counter_index) 486 { 487 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 488 489 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 490 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 492 } 493 494 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 495 unsigned int *p_counter_index) 496 { 497 int err; 498 499 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 500 p_counter_index); 501 if (err) 502 return err; 503 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 504 if (err) 505 goto err_counter_clear; 506 return 0; 507 508 err_counter_clear: 509 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 510 *p_counter_index); 511 return err; 512 } 513 514 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 515 unsigned int counter_index) 516 { 517 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 518 counter_index); 519 } 520 521 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 522 const struct mlxsw_tx_info *tx_info) 523 { 524 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 525 526 memset(txhdr, 0, MLXSW_TXHDR_LEN); 527 528 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 529 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 530 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 531 mlxsw_tx_hdr_swid_set(txhdr, 0); 532 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 533 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 534 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 535 } 536 537 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 538 { 539 switch (state) { 540 case BR_STATE_FORWARDING: 541 return MLXSW_REG_SPMS_STATE_FORWARDING; 542 case BR_STATE_LEARNING: 543 return MLXSW_REG_SPMS_STATE_LEARNING; 544 case BR_STATE_LISTENING: /* fall-through */ 545 case BR_STATE_DISABLED: /* fall-through */ 546 case BR_STATE_BLOCKING: 547 return MLXSW_REG_SPMS_STATE_DISCARDING; 548 default: 549 BUG(); 550 } 551 } 552 553 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 554 u8 state) 555 { 556 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 558 char *spms_pl; 559 int err; 560 561 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 562 if (!spms_pl) 563 return -ENOMEM; 564 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 565 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 566 567 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 568 kfree(spms_pl); 569 return err; 570 } 571 572 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 573 { 574 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 575 int err; 576 577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 578 if (err) 579 return err; 580 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 581 return 0; 582 } 583 584 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 585 bool enable, u32 rate) 586 { 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 589 590 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 592 } 593 594 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 595 bool is_up) 596 { 597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 598 char paos_pl[MLXSW_REG_PAOS_LEN]; 599 600 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 601 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 602 MLXSW_PORT_ADMIN_STATUS_DOWN); 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 604 } 605 606 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 607 unsigned char *addr) 608 { 609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 610 char ppad_pl[MLXSW_REG_PPAD_LEN]; 611 612 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 613 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 615 } 616 617 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 618 { 619 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 620 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 621 622 ether_addr_copy(addr, mlxsw_sp->base_mac); 623 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 624 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 625 } 626 627 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 628 { 629 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 630 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 631 int max_mtu; 632 int err; 633 634 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 635 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 636 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 637 if (err) 638 return err; 639 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 640 641 if (mtu > max_mtu) 642 return -EINVAL; 643 644 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 645 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 646 } 647 648 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 649 { 650 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 651 char pspa_pl[MLXSW_REG_PSPA_LEN]; 652 653 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 655 } 656 657 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 658 { 659 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 660 char svpe_pl[MLXSW_REG_SVPE_LEN]; 661 662 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 664 } 665 666 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 667 bool learn_enable) 668 { 669 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 670 char *spvmlr_pl; 671 int err; 672 673 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 674 if (!spvmlr_pl) 675 return -ENOMEM; 676 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 677 learn_enable); 678 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 679 kfree(spvmlr_pl); 680 return err; 681 } 682 683 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 684 u16 vid) 685 { 686 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 687 char spvid_pl[MLXSW_REG_SPVID_LEN]; 688 689 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 691 } 692 693 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 694 bool allow) 695 { 696 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 697 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 698 699 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 700 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 701 } 702 703 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 704 { 705 int err; 706 707 if (!vid) { 708 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 709 if (err) 710 return err; 711 } else { 712 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 713 if (err) 714 return err; 715 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 716 if (err) 717 goto err_port_allow_untagged_set; 718 } 719 720 mlxsw_sp_port->pvid = vid; 721 return 0; 722 723 err_port_allow_untagged_set: 724 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 725 return err; 726 } 727 728 static int 729 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 730 { 731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 732 char sspr_pl[MLXSW_REG_SSPR_LEN]; 733 734 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 736 } 737 738 static int 739 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 740 struct mlxsw_sp_port_mapping *port_mapping) 741 { 742 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 743 bool separate_rxtx; 744 u8 module; 745 u8 width; 746 int err; 747 int i; 748 749 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 750 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 751 if (err) 752 return err; 753 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 754 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 755 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 756 757 if (width && !is_power_of_2(width)) { 758 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 759 local_port); 760 return -EINVAL; 761 } 762 763 for (i = 0; i < width; i++) { 764 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 765 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 766 local_port); 767 return -EINVAL; 768 } 769 if (separate_rxtx && 770 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 771 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 773 local_port); 774 return -EINVAL; 775 } 776 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 778 local_port); 779 return -EINVAL; 780 } 781 } 782 783 port_mapping->module = module; 784 port_mapping->width = width; 785 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 790 { 791 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 792 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 793 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 794 int i; 795 796 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 797 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 798 for (i = 0; i < port_mapping->width; i++) { 799 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 800 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 801 } 802 803 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 804 } 805 806 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 807 { 808 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 809 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 810 811 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 812 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 813 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 814 } 815 816 static int mlxsw_sp_port_open(struct net_device *dev) 817 { 818 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 819 int err; 820 821 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 822 if (err) 823 return err; 824 netif_start_queue(dev); 825 return 0; 826 } 827 828 static int mlxsw_sp_port_stop(struct net_device *dev) 829 { 830 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 831 832 netif_stop_queue(dev); 833 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 834 } 835 836 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 837 struct net_device *dev) 838 { 839 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 841 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 842 const struct mlxsw_tx_info tx_info = { 843 .local_port = mlxsw_sp_port->local_port, 844 .is_emad = false, 845 }; 846 u64 len; 847 int err; 848 849 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 dev_kfree_skb_any(skb); 852 return NETDEV_TX_OK; 853 } 854 855 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 856 857 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 858 return NETDEV_TX_BUSY; 859 860 if (eth_skb_pad(skb)) { 861 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 862 return NETDEV_TX_OK; 863 } 864 865 mlxsw_sp_txhdr_construct(skb, &tx_info); 866 /* TX header is consumed by HW on the way so we shouldn't count its 867 * bytes as being sent. 868 */ 869 len = skb->len - MLXSW_TXHDR_LEN; 870 871 /* Due to a race we might fail here because of a full queue. In that 872 * unlikely case we simply drop the packet. 873 */ 874 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 875 876 if (!err) { 877 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 878 u64_stats_update_begin(&pcpu_stats->syncp); 879 pcpu_stats->tx_packets++; 880 pcpu_stats->tx_bytes += len; 881 u64_stats_update_end(&pcpu_stats->syncp); 882 } else { 883 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 884 dev_kfree_skb_any(skb); 885 } 886 return NETDEV_TX_OK; 887 } 888 889 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 890 { 891 } 892 893 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 894 { 895 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 896 struct sockaddr *addr = p; 897 int err; 898 899 if (!is_valid_ether_addr(addr->sa_data)) 900 return -EADDRNOTAVAIL; 901 902 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 903 if (err) 904 return err; 905 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 906 return 0; 907 } 908 909 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 910 int mtu) 911 { 912 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 913 } 914 915 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 916 917 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 918 u16 delay) 919 { 920 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 921 BITS_PER_BYTE)); 922 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 923 mtu); 924 } 925 926 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 927 * Assumes 100m cable and maximum MTU. 928 */ 929 #define MLXSW_SP_PAUSE_DELAY 58752 930 931 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 932 u16 delay, bool pfc, bool pause) 933 { 934 if (pfc) 935 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 936 else if (pause) 937 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 938 else 939 return 0; 940 } 941 942 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 943 bool lossy) 944 { 945 if (lossy) 946 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 947 else 948 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 949 thres); 950 } 951 952 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 953 u8 *prio_tc, bool pause_en, 954 struct ieee_pfc *my_pfc) 955 { 956 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 957 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 958 u16 delay = !!my_pfc ? my_pfc->delay : 0; 959 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 960 u32 taken_headroom_cells = 0; 961 u32 max_headroom_cells; 962 int i, j, err; 963 964 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 965 966 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 967 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 968 if (err) 969 return err; 970 971 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 972 bool configure = false; 973 bool pfc = false; 974 u16 thres_cells; 975 u16 delay_cells; 976 u16 total_cells; 977 bool lossy; 978 979 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 980 if (prio_tc[j] == i) { 981 pfc = pfc_en & BIT(j); 982 configure = true; 983 break; 984 } 985 } 986 987 if (!configure) 988 continue; 989 990 lossy = !(pfc || pause_en); 991 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 992 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 993 pfc, pause_en); 994 total_cells = thres_cells + delay_cells; 995 996 taken_headroom_cells += total_cells; 997 if (taken_headroom_cells > max_headroom_cells) 998 return -ENOBUFS; 999 1000 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1001 thres_cells, lossy); 1002 } 1003 1004 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1005 } 1006 1007 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1008 int mtu, bool pause_en) 1009 { 1010 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1011 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1012 struct ieee_pfc *my_pfc; 1013 u8 *prio_tc; 1014 1015 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1016 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1017 1018 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1019 pause_en, my_pfc); 1020 } 1021 1022 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1023 { 1024 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1025 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1026 int err; 1027 1028 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1029 if (err) 1030 return err; 1031 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1032 if (err) 1033 goto err_span_port_mtu_update; 1034 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1035 if (err) 1036 goto err_port_mtu_set; 1037 dev->mtu = mtu; 1038 return 0; 1039 1040 err_port_mtu_set: 1041 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1042 err_span_port_mtu_update: 1043 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1044 return err; 1045 } 1046 1047 static int 1048 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1049 struct rtnl_link_stats64 *stats) 1050 { 1051 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1052 struct mlxsw_sp_port_pcpu_stats *p; 1053 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1054 u32 tx_dropped = 0; 1055 unsigned int start; 1056 int i; 1057 1058 for_each_possible_cpu(i) { 1059 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1060 do { 1061 start = u64_stats_fetch_begin_irq(&p->syncp); 1062 rx_packets = p->rx_packets; 1063 rx_bytes = p->rx_bytes; 1064 tx_packets = p->tx_packets; 1065 tx_bytes = p->tx_bytes; 1066 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1067 1068 stats->rx_packets += rx_packets; 1069 stats->rx_bytes += rx_bytes; 1070 stats->tx_packets += tx_packets; 1071 stats->tx_bytes += tx_bytes; 1072 /* tx_dropped is u32, updated without syncp protection. */ 1073 tx_dropped += p->tx_dropped; 1074 } 1075 stats->tx_dropped = tx_dropped; 1076 return 0; 1077 } 1078 1079 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1080 { 1081 switch (attr_id) { 1082 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1083 return true; 1084 } 1085 1086 return false; 1087 } 1088 1089 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1090 void *sp) 1091 { 1092 switch (attr_id) { 1093 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1094 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1095 } 1096 1097 return -EINVAL; 1098 } 1099 1100 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1101 int prio, char *ppcnt_pl) 1102 { 1103 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1105 1106 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1107 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1108 } 1109 1110 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1111 struct rtnl_link_stats64 *stats) 1112 { 1113 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1114 int err; 1115 1116 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1117 0, ppcnt_pl); 1118 if (err) 1119 goto out; 1120 1121 stats->tx_packets = 1122 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1123 stats->rx_packets = 1124 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1125 stats->tx_bytes = 1126 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1127 stats->rx_bytes = 1128 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1129 stats->multicast = 1130 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1131 1132 stats->rx_crc_errors = 1133 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1134 stats->rx_frame_errors = 1135 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1136 1137 stats->rx_length_errors = ( 1138 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1139 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1140 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1141 1142 stats->rx_errors = (stats->rx_crc_errors + 1143 stats->rx_frame_errors + stats->rx_length_errors); 1144 1145 out: 1146 return err; 1147 } 1148 1149 static void 1150 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1151 struct mlxsw_sp_port_xstats *xstats) 1152 { 1153 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1154 int err, i; 1155 1156 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1157 ppcnt_pl); 1158 if (!err) 1159 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1160 1161 for (i = 0; i < TC_MAX_QUEUE; i++) { 1162 err = mlxsw_sp_port_get_stats_raw(dev, 1163 MLXSW_REG_PPCNT_TC_CONG_TC, 1164 i, ppcnt_pl); 1165 if (!err) 1166 xstats->wred_drop[i] = 1167 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1168 1169 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1170 i, ppcnt_pl); 1171 if (err) 1172 continue; 1173 1174 xstats->backlog[i] = 1175 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1176 xstats->tail_drop[i] = 1177 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1178 } 1179 1180 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1181 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1182 i, ppcnt_pl); 1183 if (err) 1184 continue; 1185 1186 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1187 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1188 } 1189 } 1190 1191 static void update_stats_cache(struct work_struct *work) 1192 { 1193 struct mlxsw_sp_port *mlxsw_sp_port = 1194 container_of(work, struct mlxsw_sp_port, 1195 periodic_hw_stats.update_dw.work); 1196 1197 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1198 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1199 * necessary when port goes down. 1200 */ 1201 goto out; 1202 1203 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1204 &mlxsw_sp_port->periodic_hw_stats.stats); 1205 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1206 &mlxsw_sp_port->periodic_hw_stats.xstats); 1207 1208 out: 1209 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1210 MLXSW_HW_STATS_UPDATE_TIME); 1211 } 1212 1213 /* Return the stats from a cache that is updated periodically, 1214 * as this function might get called in an atomic context. 1215 */ 1216 static void 1217 mlxsw_sp_port_get_stats64(struct net_device *dev, 1218 struct rtnl_link_stats64 *stats) 1219 { 1220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1221 1222 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1223 } 1224 1225 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1226 u16 vid_begin, u16 vid_end, 1227 bool is_member, bool untagged) 1228 { 1229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1230 char *spvm_pl; 1231 int err; 1232 1233 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1234 if (!spvm_pl) 1235 return -ENOMEM; 1236 1237 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1238 vid_end, is_member, untagged); 1239 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1240 kfree(spvm_pl); 1241 return err; 1242 } 1243 1244 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1245 u16 vid_end, bool is_member, bool untagged) 1246 { 1247 u16 vid, vid_e; 1248 int err; 1249 1250 for (vid = vid_begin; vid <= vid_end; 1251 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1252 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1253 vid_end); 1254 1255 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1256 is_member, untagged); 1257 if (err) 1258 return err; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1265 bool flush_default) 1266 { 1267 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1268 1269 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1270 &mlxsw_sp_port->vlans_list, list) { 1271 if (!flush_default && 1272 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1273 continue; 1274 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1275 } 1276 } 1277 1278 static void 1279 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1280 { 1281 if (mlxsw_sp_port_vlan->bridge_port) 1282 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1283 else if (mlxsw_sp_port_vlan->fid) 1284 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1285 } 1286 1287 struct mlxsw_sp_port_vlan * 1288 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1289 { 1290 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1291 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1292 int err; 1293 1294 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1295 if (mlxsw_sp_port_vlan) 1296 return ERR_PTR(-EEXIST); 1297 1298 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1299 if (err) 1300 return ERR_PTR(err); 1301 1302 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1303 if (!mlxsw_sp_port_vlan) { 1304 err = -ENOMEM; 1305 goto err_port_vlan_alloc; 1306 } 1307 1308 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1309 mlxsw_sp_port_vlan->vid = vid; 1310 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1311 1312 return mlxsw_sp_port_vlan; 1313 1314 err_port_vlan_alloc: 1315 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1316 return ERR_PTR(err); 1317 } 1318 1319 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1320 { 1321 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1322 u16 vid = mlxsw_sp_port_vlan->vid; 1323 1324 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1325 list_del(&mlxsw_sp_port_vlan->list); 1326 kfree(mlxsw_sp_port_vlan); 1327 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1328 } 1329 1330 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1331 __be16 __always_unused proto, u16 vid) 1332 { 1333 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1334 1335 /* VLAN 0 is added to HW filter when device goes up, but it is 1336 * reserved in our case, so simply return. 1337 */ 1338 if (!vid) 1339 return 0; 1340 1341 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1342 } 1343 1344 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1345 __be16 __always_unused proto, u16 vid) 1346 { 1347 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1348 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1349 1350 /* VLAN 0 is removed from HW filter when device goes down, but 1351 * it is reserved in our case, so simply return. 1352 */ 1353 if (!vid) 1354 return 0; 1355 1356 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1357 if (!mlxsw_sp_port_vlan) 1358 return 0; 1359 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1360 1361 return 0; 1362 } 1363 1364 static struct mlxsw_sp_port_mall_tc_entry * 1365 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1366 unsigned long cookie) { 1367 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1368 1369 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1370 if (mall_tc_entry->cookie == cookie) 1371 return mall_tc_entry; 1372 1373 return NULL; 1374 } 1375 1376 static int 1377 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1378 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1379 const struct flow_action_entry *act, 1380 bool ingress) 1381 { 1382 enum mlxsw_sp_span_type span_type; 1383 1384 if (!act->dev) { 1385 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1386 return -EINVAL; 1387 } 1388 1389 mirror->ingress = ingress; 1390 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1391 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1392 true, &mirror->span_id); 1393 } 1394 1395 static void 1396 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1397 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1398 { 1399 enum mlxsw_sp_span_type span_type; 1400 1401 span_type = mirror->ingress ? 1402 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1403 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1404 span_type, true); 1405 } 1406 1407 static int 1408 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1409 struct tc_cls_matchall_offload *cls, 1410 const struct flow_action_entry *act, 1411 bool ingress) 1412 { 1413 int err; 1414 1415 if (!mlxsw_sp_port->sample) 1416 return -EOPNOTSUPP; 1417 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1418 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1419 return -EEXIST; 1420 } 1421 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1422 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1423 return -EOPNOTSUPP; 1424 } 1425 1426 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1427 act->sample.psample_group); 1428 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1429 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1430 mlxsw_sp_port->sample->rate = act->sample.rate; 1431 1432 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1433 if (err) 1434 goto err_port_sample_set; 1435 return 0; 1436 1437 err_port_sample_set: 1438 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1439 return err; 1440 } 1441 1442 static void 1443 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1444 { 1445 if (!mlxsw_sp_port->sample) 1446 return; 1447 1448 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1449 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1450 } 1451 1452 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1453 struct tc_cls_matchall_offload *f, 1454 bool ingress) 1455 { 1456 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1457 __be16 protocol = f->common.protocol; 1458 struct flow_action_entry *act; 1459 int err; 1460 1461 if (!flow_offload_has_one_action(&f->rule->action)) { 1462 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1463 return -EOPNOTSUPP; 1464 } 1465 1466 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1467 if (!mall_tc_entry) 1468 return -ENOMEM; 1469 mall_tc_entry->cookie = f->cookie; 1470 1471 act = &f->rule->action.entries[0]; 1472 1473 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1474 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1475 1476 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1477 mirror = &mall_tc_entry->mirror; 1478 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1479 mirror, act, 1480 ingress); 1481 } else if (act->id == FLOW_ACTION_SAMPLE && 1482 protocol == htons(ETH_P_ALL)) { 1483 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1484 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1485 act, ingress); 1486 } else { 1487 err = -EOPNOTSUPP; 1488 } 1489 1490 if (err) 1491 goto err_add_action; 1492 1493 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1494 return 0; 1495 1496 err_add_action: 1497 kfree(mall_tc_entry); 1498 return err; 1499 } 1500 1501 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1502 struct tc_cls_matchall_offload *f) 1503 { 1504 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1505 1506 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1507 f->cookie); 1508 if (!mall_tc_entry) { 1509 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1510 return; 1511 } 1512 list_del(&mall_tc_entry->list); 1513 1514 switch (mall_tc_entry->type) { 1515 case MLXSW_SP_PORT_MALL_MIRROR: 1516 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1517 &mall_tc_entry->mirror); 1518 break; 1519 case MLXSW_SP_PORT_MALL_SAMPLE: 1520 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1521 break; 1522 default: 1523 WARN_ON(1); 1524 } 1525 1526 kfree(mall_tc_entry); 1527 } 1528 1529 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1530 struct tc_cls_matchall_offload *f, 1531 bool ingress) 1532 { 1533 switch (f->command) { 1534 case TC_CLSMATCHALL_REPLACE: 1535 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1536 ingress); 1537 case TC_CLSMATCHALL_DESTROY: 1538 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1539 return 0; 1540 default: 1541 return -EOPNOTSUPP; 1542 } 1543 } 1544 1545 static int 1546 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1547 struct flow_cls_offload *f) 1548 { 1549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1550 1551 switch (f->command) { 1552 case FLOW_CLS_REPLACE: 1553 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1554 case FLOW_CLS_DESTROY: 1555 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1556 return 0; 1557 case FLOW_CLS_STATS: 1558 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1559 case FLOW_CLS_TMPLT_CREATE: 1560 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1561 case FLOW_CLS_TMPLT_DESTROY: 1562 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1563 return 0; 1564 default: 1565 return -EOPNOTSUPP; 1566 } 1567 } 1568 1569 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1570 void *type_data, 1571 void *cb_priv, bool ingress) 1572 { 1573 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1574 1575 switch (type) { 1576 case TC_SETUP_CLSMATCHALL: 1577 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1578 type_data)) 1579 return -EOPNOTSUPP; 1580 1581 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1582 ingress); 1583 case TC_SETUP_CLSFLOWER: 1584 return 0; 1585 default: 1586 return -EOPNOTSUPP; 1587 } 1588 } 1589 1590 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1591 void *type_data, 1592 void *cb_priv) 1593 { 1594 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1595 cb_priv, true); 1596 } 1597 1598 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1599 void *type_data, 1600 void *cb_priv) 1601 { 1602 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1603 cb_priv, false); 1604 } 1605 1606 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1607 void *type_data, void *cb_priv) 1608 { 1609 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1610 1611 switch (type) { 1612 case TC_SETUP_CLSMATCHALL: 1613 return 0; 1614 case TC_SETUP_CLSFLOWER: 1615 if (mlxsw_sp_acl_block_disabled(acl_block)) 1616 return -EOPNOTSUPP; 1617 1618 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1619 default: 1620 return -EOPNOTSUPP; 1621 } 1622 } 1623 1624 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1625 { 1626 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1627 1628 mlxsw_sp_acl_block_destroy(acl_block); 1629 } 1630 1631 static LIST_HEAD(mlxsw_sp_block_cb_list); 1632 1633 static int 1634 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1635 struct flow_block_offload *f, bool ingress) 1636 { 1637 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1638 struct mlxsw_sp_acl_block *acl_block; 1639 struct flow_block_cb *block_cb; 1640 bool register_block = false; 1641 int err; 1642 1643 block_cb = flow_block_cb_lookup(f->block, 1644 mlxsw_sp_setup_tc_block_cb_flower, 1645 mlxsw_sp); 1646 if (!block_cb) { 1647 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1648 if (!acl_block) 1649 return -ENOMEM; 1650 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1651 mlxsw_sp, acl_block, 1652 mlxsw_sp_tc_block_flower_release); 1653 if (IS_ERR(block_cb)) { 1654 mlxsw_sp_acl_block_destroy(acl_block); 1655 err = PTR_ERR(block_cb); 1656 goto err_cb_register; 1657 } 1658 register_block = true; 1659 } else { 1660 acl_block = flow_block_cb_priv(block_cb); 1661 } 1662 flow_block_cb_incref(block_cb); 1663 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1664 mlxsw_sp_port, ingress, f->extack); 1665 if (err) 1666 goto err_block_bind; 1667 1668 if (ingress) 1669 mlxsw_sp_port->ing_acl_block = acl_block; 1670 else 1671 mlxsw_sp_port->eg_acl_block = acl_block; 1672 1673 if (register_block) { 1674 flow_block_cb_add(block_cb, f); 1675 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1676 } 1677 1678 return 0; 1679 1680 err_block_bind: 1681 if (!flow_block_cb_decref(block_cb)) 1682 flow_block_cb_free(block_cb); 1683 err_cb_register: 1684 return err; 1685 } 1686 1687 static void 1688 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1689 struct flow_block_offload *f, bool ingress) 1690 { 1691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1692 struct mlxsw_sp_acl_block *acl_block; 1693 struct flow_block_cb *block_cb; 1694 int err; 1695 1696 block_cb = flow_block_cb_lookup(f->block, 1697 mlxsw_sp_setup_tc_block_cb_flower, 1698 mlxsw_sp); 1699 if (!block_cb) 1700 return; 1701 1702 if (ingress) 1703 mlxsw_sp_port->ing_acl_block = NULL; 1704 else 1705 mlxsw_sp_port->eg_acl_block = NULL; 1706 1707 acl_block = flow_block_cb_priv(block_cb); 1708 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1709 mlxsw_sp_port, ingress); 1710 if (!err && !flow_block_cb_decref(block_cb)) { 1711 flow_block_cb_remove(block_cb, f); 1712 list_del(&block_cb->driver_list); 1713 } 1714 } 1715 1716 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1717 struct flow_block_offload *f) 1718 { 1719 struct flow_block_cb *block_cb; 1720 flow_setup_cb_t *cb; 1721 bool ingress; 1722 int err; 1723 1724 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1725 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1726 ingress = true; 1727 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1728 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1729 ingress = false; 1730 } else { 1731 return -EOPNOTSUPP; 1732 } 1733 1734 f->driver_block_list = &mlxsw_sp_block_cb_list; 1735 1736 switch (f->command) { 1737 case FLOW_BLOCK_BIND: 1738 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1739 &mlxsw_sp_block_cb_list)) 1740 return -EBUSY; 1741 1742 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1743 mlxsw_sp_port, NULL); 1744 if (IS_ERR(block_cb)) 1745 return PTR_ERR(block_cb); 1746 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1747 ingress); 1748 if (err) { 1749 flow_block_cb_free(block_cb); 1750 return err; 1751 } 1752 flow_block_cb_add(block_cb, f); 1753 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1754 return 0; 1755 case FLOW_BLOCK_UNBIND: 1756 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1757 f, ingress); 1758 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1759 if (!block_cb) 1760 return -ENOENT; 1761 1762 flow_block_cb_remove(block_cb, f); 1763 list_del(&block_cb->driver_list); 1764 return 0; 1765 default: 1766 return -EOPNOTSUPP; 1767 } 1768 } 1769 1770 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1771 void *type_data) 1772 { 1773 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1774 1775 switch (type) { 1776 case TC_SETUP_BLOCK: 1777 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1778 case TC_SETUP_QDISC_RED: 1779 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1780 case TC_SETUP_QDISC_PRIO: 1781 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1782 case TC_SETUP_QDISC_ETS: 1783 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1784 case TC_SETUP_QDISC_TBF: 1785 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1786 case TC_SETUP_QDISC_FIFO: 1787 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1788 default: 1789 return -EOPNOTSUPP; 1790 } 1791 } 1792 1793 1794 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1795 { 1796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1797 1798 if (!enable) { 1799 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1800 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1801 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1802 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1803 return -EINVAL; 1804 } 1805 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1806 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1807 } else { 1808 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1809 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1810 } 1811 return 0; 1812 } 1813 1814 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1815 { 1816 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1817 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1818 int err; 1819 1820 if (netif_running(dev)) 1821 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1822 1823 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1824 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1825 pplr_pl); 1826 1827 if (netif_running(dev)) 1828 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1829 1830 return err; 1831 } 1832 1833 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1834 1835 static int mlxsw_sp_handle_feature(struct net_device *dev, 1836 netdev_features_t wanted_features, 1837 netdev_features_t feature, 1838 mlxsw_sp_feature_handler feature_handler) 1839 { 1840 netdev_features_t changes = wanted_features ^ dev->features; 1841 bool enable = !!(wanted_features & feature); 1842 int err; 1843 1844 if (!(changes & feature)) 1845 return 0; 1846 1847 err = feature_handler(dev, enable); 1848 if (err) { 1849 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1850 enable ? "Enable" : "Disable", &feature, err); 1851 return err; 1852 } 1853 1854 if (enable) 1855 dev->features |= feature; 1856 else 1857 dev->features &= ~feature; 1858 1859 return 0; 1860 } 1861 static int mlxsw_sp_set_features(struct net_device *dev, 1862 netdev_features_t features) 1863 { 1864 netdev_features_t oper_features = dev->features; 1865 int err = 0; 1866 1867 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1868 mlxsw_sp_feature_hw_tc); 1869 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1870 mlxsw_sp_feature_loopback); 1871 1872 if (err) { 1873 dev->features = oper_features; 1874 return -EINVAL; 1875 } 1876 1877 return 0; 1878 } 1879 1880 static struct devlink_port * 1881 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1882 { 1883 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1884 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1885 1886 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1887 mlxsw_sp_port->local_port); 1888 } 1889 1890 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1891 struct ifreq *ifr) 1892 { 1893 struct hwtstamp_config config; 1894 int err; 1895 1896 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1897 return -EFAULT; 1898 1899 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1900 &config); 1901 if (err) 1902 return err; 1903 1904 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1905 return -EFAULT; 1906 1907 return 0; 1908 } 1909 1910 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1911 struct ifreq *ifr) 1912 { 1913 struct hwtstamp_config config; 1914 int err; 1915 1916 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1917 &config); 1918 if (err) 1919 return err; 1920 1921 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1922 return -EFAULT; 1923 1924 return 0; 1925 } 1926 1927 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1928 { 1929 struct hwtstamp_config config = {0}; 1930 1931 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1932 } 1933 1934 static int 1935 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1936 { 1937 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1938 1939 switch (cmd) { 1940 case SIOCSHWTSTAMP: 1941 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1942 case SIOCGHWTSTAMP: 1943 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1944 default: 1945 return -EOPNOTSUPP; 1946 } 1947 } 1948 1949 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1950 .ndo_open = mlxsw_sp_port_open, 1951 .ndo_stop = mlxsw_sp_port_stop, 1952 .ndo_start_xmit = mlxsw_sp_port_xmit, 1953 .ndo_setup_tc = mlxsw_sp_setup_tc, 1954 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1955 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1956 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1957 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1958 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1959 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1960 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1961 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1962 .ndo_set_features = mlxsw_sp_set_features, 1963 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1964 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1965 }; 1966 1967 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1968 struct ethtool_drvinfo *drvinfo) 1969 { 1970 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1971 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1972 1973 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1974 sizeof(drvinfo->driver)); 1975 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1976 sizeof(drvinfo->version)); 1977 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1978 "%d.%d.%d", 1979 mlxsw_sp->bus_info->fw_rev.major, 1980 mlxsw_sp->bus_info->fw_rev.minor, 1981 mlxsw_sp->bus_info->fw_rev.subminor); 1982 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1983 sizeof(drvinfo->bus_info)); 1984 } 1985 1986 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1987 struct ethtool_pauseparam *pause) 1988 { 1989 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1990 1991 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1992 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1993 } 1994 1995 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1996 struct ethtool_pauseparam *pause) 1997 { 1998 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1999 2000 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2001 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2002 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2003 2004 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2005 pfcc_pl); 2006 } 2007 2008 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2009 struct ethtool_pauseparam *pause) 2010 { 2011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2012 bool pause_en = pause->tx_pause || pause->rx_pause; 2013 int err; 2014 2015 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2016 netdev_err(dev, "PFC already enabled on port\n"); 2017 return -EINVAL; 2018 } 2019 2020 if (pause->autoneg) { 2021 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2022 return -EINVAL; 2023 } 2024 2025 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2026 if (err) { 2027 netdev_err(dev, "Failed to configure port's headroom\n"); 2028 return err; 2029 } 2030 2031 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2032 if (err) { 2033 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2034 goto err_port_pause_configure; 2035 } 2036 2037 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2038 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2039 2040 return 0; 2041 2042 err_port_pause_configure: 2043 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2044 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2045 return err; 2046 } 2047 2048 struct mlxsw_sp_port_hw_stats { 2049 char str[ETH_GSTRING_LEN]; 2050 u64 (*getter)(const char *payload); 2051 bool cells_bytes; 2052 }; 2053 2054 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2055 { 2056 .str = "a_frames_transmitted_ok", 2057 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2058 }, 2059 { 2060 .str = "a_frames_received_ok", 2061 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2062 }, 2063 { 2064 .str = "a_frame_check_sequence_errors", 2065 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2066 }, 2067 { 2068 .str = "a_alignment_errors", 2069 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2070 }, 2071 { 2072 .str = "a_octets_transmitted_ok", 2073 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2074 }, 2075 { 2076 .str = "a_octets_received_ok", 2077 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2078 }, 2079 { 2080 .str = "a_multicast_frames_xmitted_ok", 2081 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2082 }, 2083 { 2084 .str = "a_broadcast_frames_xmitted_ok", 2085 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2086 }, 2087 { 2088 .str = "a_multicast_frames_received_ok", 2089 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2090 }, 2091 { 2092 .str = "a_broadcast_frames_received_ok", 2093 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2094 }, 2095 { 2096 .str = "a_in_range_length_errors", 2097 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2098 }, 2099 { 2100 .str = "a_out_of_range_length_field", 2101 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2102 }, 2103 { 2104 .str = "a_frame_too_long_errors", 2105 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2106 }, 2107 { 2108 .str = "a_symbol_error_during_carrier", 2109 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2110 }, 2111 { 2112 .str = "a_mac_control_frames_transmitted", 2113 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2114 }, 2115 { 2116 .str = "a_mac_control_frames_received", 2117 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2118 }, 2119 { 2120 .str = "a_unsupported_opcodes_received", 2121 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2122 }, 2123 { 2124 .str = "a_pause_mac_ctrl_frames_received", 2125 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2126 }, 2127 { 2128 .str = "a_pause_mac_ctrl_frames_xmitted", 2129 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2130 }, 2131 }; 2132 2133 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2134 2135 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2136 { 2137 .str = "if_in_discards", 2138 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2139 }, 2140 { 2141 .str = "if_out_discards", 2142 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2143 }, 2144 { 2145 .str = "if_out_errors", 2146 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2147 }, 2148 }; 2149 2150 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2151 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2152 2153 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2154 { 2155 .str = "ether_stats_undersize_pkts", 2156 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2157 }, 2158 { 2159 .str = "ether_stats_oversize_pkts", 2160 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2161 }, 2162 { 2163 .str = "ether_stats_fragments", 2164 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2165 }, 2166 { 2167 .str = "ether_pkts64octets", 2168 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2169 }, 2170 { 2171 .str = "ether_pkts65to127octets", 2172 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2173 }, 2174 { 2175 .str = "ether_pkts128to255octets", 2176 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2177 }, 2178 { 2179 .str = "ether_pkts256to511octets", 2180 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2181 }, 2182 { 2183 .str = "ether_pkts512to1023octets", 2184 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2185 }, 2186 { 2187 .str = "ether_pkts1024to1518octets", 2188 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2189 }, 2190 { 2191 .str = "ether_pkts1519to2047octets", 2192 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2193 }, 2194 { 2195 .str = "ether_pkts2048to4095octets", 2196 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2197 }, 2198 { 2199 .str = "ether_pkts4096to8191octets", 2200 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2201 }, 2202 { 2203 .str = "ether_pkts8192to10239octets", 2204 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2205 }, 2206 }; 2207 2208 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2209 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2210 2211 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2212 { 2213 .str = "dot3stats_fcs_errors", 2214 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2215 }, 2216 { 2217 .str = "dot3stats_symbol_errors", 2218 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2219 }, 2220 { 2221 .str = "dot3control_in_unknown_opcodes", 2222 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2223 }, 2224 { 2225 .str = "dot3in_pause_frames", 2226 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2227 }, 2228 }; 2229 2230 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2231 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2232 2233 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { 2234 { 2235 .str = "ecn_marked", 2236 .getter = mlxsw_reg_ppcnt_ecn_marked_get, 2237 }, 2238 }; 2239 2240 #define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) 2241 2242 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2243 { 2244 .str = "discard_ingress_general", 2245 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2246 }, 2247 { 2248 .str = "discard_ingress_policy_engine", 2249 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2250 }, 2251 { 2252 .str = "discard_ingress_vlan_membership", 2253 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2254 }, 2255 { 2256 .str = "discard_ingress_tag_frame_type", 2257 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2258 }, 2259 { 2260 .str = "discard_egress_vlan_membership", 2261 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2262 }, 2263 { 2264 .str = "discard_loopback_filter", 2265 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2266 }, 2267 { 2268 .str = "discard_egress_general", 2269 .getter = mlxsw_reg_ppcnt_egress_general_get, 2270 }, 2271 { 2272 .str = "discard_egress_hoq", 2273 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2274 }, 2275 { 2276 .str = "discard_egress_policy_engine", 2277 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2278 }, 2279 { 2280 .str = "discard_ingress_tx_link_down", 2281 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2282 }, 2283 { 2284 .str = "discard_egress_stp_filter", 2285 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2286 }, 2287 { 2288 .str = "discard_egress_sll", 2289 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2290 }, 2291 }; 2292 2293 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2294 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2295 2296 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2297 { 2298 .str = "rx_octets_prio", 2299 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2300 }, 2301 { 2302 .str = "rx_frames_prio", 2303 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2304 }, 2305 { 2306 .str = "tx_octets_prio", 2307 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2308 }, 2309 { 2310 .str = "tx_frames_prio", 2311 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2312 }, 2313 { 2314 .str = "rx_pause_prio", 2315 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2316 }, 2317 { 2318 .str = "rx_pause_duration_prio", 2319 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2320 }, 2321 { 2322 .str = "tx_pause_prio", 2323 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2324 }, 2325 { 2326 .str = "tx_pause_duration_prio", 2327 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2328 }, 2329 }; 2330 2331 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2332 2333 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2334 { 2335 .str = "tc_transmit_queue_tc", 2336 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2337 .cells_bytes = true, 2338 }, 2339 { 2340 .str = "tc_no_buffer_discard_uc_tc", 2341 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2342 }, 2343 }; 2344 2345 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2346 2347 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2348 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2349 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2351 MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2353 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2354 IEEE_8021QAZ_MAX_TCS) + \ 2355 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2356 TC_MAX_QUEUE)) 2357 2358 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2359 { 2360 int i; 2361 2362 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2363 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2364 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2365 *p += ETH_GSTRING_LEN; 2366 } 2367 } 2368 2369 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2370 { 2371 int i; 2372 2373 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2374 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2375 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2376 *p += ETH_GSTRING_LEN; 2377 } 2378 } 2379 2380 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2381 u32 stringset, u8 *data) 2382 { 2383 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2384 u8 *p = data; 2385 int i; 2386 2387 switch (stringset) { 2388 case ETH_SS_STATS: 2389 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2390 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2391 ETH_GSTRING_LEN); 2392 p += ETH_GSTRING_LEN; 2393 } 2394 2395 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2396 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2397 ETH_GSTRING_LEN); 2398 p += ETH_GSTRING_LEN; 2399 } 2400 2401 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2402 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2403 ETH_GSTRING_LEN); 2404 p += ETH_GSTRING_LEN; 2405 } 2406 2407 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2408 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2409 ETH_GSTRING_LEN); 2410 p += ETH_GSTRING_LEN; 2411 } 2412 2413 for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { 2414 memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, 2415 ETH_GSTRING_LEN); 2416 p += ETH_GSTRING_LEN; 2417 } 2418 2419 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2420 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2421 ETH_GSTRING_LEN); 2422 p += ETH_GSTRING_LEN; 2423 } 2424 2425 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2426 mlxsw_sp_port_get_prio_strings(&p, i); 2427 2428 for (i = 0; i < TC_MAX_QUEUE; i++) 2429 mlxsw_sp_port_get_tc_strings(&p, i); 2430 2431 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2432 break; 2433 } 2434 } 2435 2436 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2437 enum ethtool_phys_id_state state) 2438 { 2439 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2441 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2442 bool active; 2443 2444 switch (state) { 2445 case ETHTOOL_ID_ACTIVE: 2446 active = true; 2447 break; 2448 case ETHTOOL_ID_INACTIVE: 2449 active = false; 2450 break; 2451 default: 2452 return -EOPNOTSUPP; 2453 } 2454 2455 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2456 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2457 } 2458 2459 static int 2460 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2461 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2462 { 2463 switch (grp) { 2464 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2465 *p_hw_stats = mlxsw_sp_port_hw_stats; 2466 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2467 break; 2468 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2469 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2470 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2471 break; 2472 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2473 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2474 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2475 break; 2476 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2477 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2478 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2479 break; 2480 case MLXSW_REG_PPCNT_EXT_CNT: 2481 *p_hw_stats = mlxsw_sp_port_hw_ext_stats; 2482 *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2483 break; 2484 case MLXSW_REG_PPCNT_DISCARD_CNT: 2485 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2486 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2487 break; 2488 case MLXSW_REG_PPCNT_PRIO_CNT: 2489 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2490 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2491 break; 2492 case MLXSW_REG_PPCNT_TC_CNT: 2493 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2494 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2495 break; 2496 default: 2497 WARN_ON(1); 2498 return -EOPNOTSUPP; 2499 } 2500 return 0; 2501 } 2502 2503 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2504 enum mlxsw_reg_ppcnt_grp grp, int prio, 2505 u64 *data, int data_index) 2506 { 2507 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2509 struct mlxsw_sp_port_hw_stats *hw_stats; 2510 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2511 int i, len; 2512 int err; 2513 2514 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2515 if (err) 2516 return; 2517 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2518 for (i = 0; i < len; i++) { 2519 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2520 if (!hw_stats[i].cells_bytes) 2521 continue; 2522 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2523 data[data_index + i]); 2524 } 2525 } 2526 2527 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2528 struct ethtool_stats *stats, u64 *data) 2529 { 2530 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2531 int i, data_index = 0; 2532 2533 /* IEEE 802.3 Counters */ 2534 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2535 data, data_index); 2536 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2537 2538 /* RFC 2863 Counters */ 2539 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2540 data, data_index); 2541 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2542 2543 /* RFC 2819 Counters */ 2544 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2545 data, data_index); 2546 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2547 2548 /* RFC 3635 Counters */ 2549 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2550 data, data_index); 2551 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2552 2553 /* Extended Counters */ 2554 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 2555 data, data_index); 2556 data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2557 2558 /* Discard Counters */ 2559 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2560 data, data_index); 2561 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2562 2563 /* Per-Priority Counters */ 2564 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2565 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2566 data, data_index); 2567 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2568 } 2569 2570 /* Per-TC Counters */ 2571 for (i = 0; i < TC_MAX_QUEUE; i++) { 2572 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2573 data, data_index); 2574 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2575 } 2576 2577 /* PTP counters */ 2578 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2579 data, data_index); 2580 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2581 } 2582 2583 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2584 { 2585 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2586 2587 switch (sset) { 2588 case ETH_SS_STATS: 2589 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2590 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2591 default: 2592 return -EOPNOTSUPP; 2593 } 2594 } 2595 2596 struct mlxsw_sp1_port_link_mode { 2597 enum ethtool_link_mode_bit_indices mask_ethtool; 2598 u32 mask; 2599 u32 speed; 2600 }; 2601 2602 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2603 { 2604 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2605 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2606 .speed = SPEED_100, 2607 }, 2608 { 2609 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2610 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2611 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2612 .speed = SPEED_1000, 2613 }, 2614 { 2615 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2616 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2617 .speed = SPEED_10000, 2618 }, 2619 { 2620 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2621 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2622 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2623 .speed = SPEED_10000, 2624 }, 2625 { 2626 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2628 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2629 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2630 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2631 .speed = SPEED_10000, 2632 }, 2633 { 2634 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2635 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2636 .speed = SPEED_20000, 2637 }, 2638 { 2639 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2640 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2641 .speed = SPEED_40000, 2642 }, 2643 { 2644 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2645 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2646 .speed = SPEED_40000, 2647 }, 2648 { 2649 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2650 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2651 .speed = SPEED_40000, 2652 }, 2653 { 2654 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2655 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2656 .speed = SPEED_40000, 2657 }, 2658 { 2659 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2660 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2661 .speed = SPEED_25000, 2662 }, 2663 { 2664 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2665 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2666 .speed = SPEED_25000, 2667 }, 2668 { 2669 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2670 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2671 .speed = SPEED_25000, 2672 }, 2673 { 2674 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2675 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2676 .speed = SPEED_50000, 2677 }, 2678 { 2679 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2680 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2681 .speed = SPEED_50000, 2682 }, 2683 { 2684 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2685 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2686 .speed = SPEED_50000, 2687 }, 2688 { 2689 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2690 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2691 .speed = SPEED_100000, 2692 }, 2693 { 2694 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2695 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2696 .speed = SPEED_100000, 2697 }, 2698 { 2699 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2700 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2701 .speed = SPEED_100000, 2702 }, 2703 { 2704 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2705 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2706 .speed = SPEED_100000, 2707 }, 2708 }; 2709 2710 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2711 2712 static void 2713 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2714 u32 ptys_eth_proto, 2715 struct ethtool_link_ksettings *cmd) 2716 { 2717 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2718 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2719 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2720 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2721 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2722 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2723 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2724 2725 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2726 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2727 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2728 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2729 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2730 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2731 } 2732 2733 static void 2734 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2735 u8 width, unsigned long *mode) 2736 { 2737 int i; 2738 2739 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2740 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2741 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2742 mode); 2743 } 2744 } 2745 2746 static u32 2747 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2748 { 2749 int i; 2750 2751 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2752 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2753 return mlxsw_sp1_port_link_mode[i].speed; 2754 } 2755 2756 return SPEED_UNKNOWN; 2757 } 2758 2759 static void 2760 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2761 u32 ptys_eth_proto, 2762 struct ethtool_link_ksettings *cmd) 2763 { 2764 cmd->base.speed = SPEED_UNKNOWN; 2765 cmd->base.duplex = DUPLEX_UNKNOWN; 2766 2767 if (!carrier_ok) 2768 return; 2769 2770 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2771 if (cmd->base.speed != SPEED_UNKNOWN) 2772 cmd->base.duplex = DUPLEX_FULL; 2773 } 2774 2775 static u32 2776 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2777 const struct ethtool_link_ksettings *cmd) 2778 { 2779 u32 ptys_proto = 0; 2780 int i; 2781 2782 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2783 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2784 cmd->link_modes.advertising)) 2785 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2786 } 2787 return ptys_proto; 2788 } 2789 2790 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2791 u32 speed) 2792 { 2793 u32 ptys_proto = 0; 2794 int i; 2795 2796 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2797 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2798 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2799 } 2800 return ptys_proto; 2801 } 2802 2803 static void 2804 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2805 u8 local_port, u32 proto_admin, bool autoneg) 2806 { 2807 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2808 } 2809 2810 static void 2811 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2812 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2813 u32 *p_eth_proto_oper) 2814 { 2815 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2816 p_eth_proto_oper); 2817 } 2818 2819 static const struct mlxsw_sp_port_type_speed_ops 2820 mlxsw_sp1_port_type_speed_ops = { 2821 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2822 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2823 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2824 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2825 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2826 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2827 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2828 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2829 }; 2830 2831 static const enum ethtool_link_mode_bit_indices 2832 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2833 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2834 }; 2835 2836 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2837 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2838 2839 static const enum ethtool_link_mode_bit_indices 2840 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2841 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2842 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2843 }; 2844 2845 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2846 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2847 2848 static const enum ethtool_link_mode_bit_indices 2849 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2850 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2851 }; 2852 2853 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2854 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2855 2856 static const enum ethtool_link_mode_bit_indices 2857 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2858 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2859 }; 2860 2861 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2862 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2863 2864 static const enum ethtool_link_mode_bit_indices 2865 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2866 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2867 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2868 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2869 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2870 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2871 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2872 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2873 }; 2874 2875 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2876 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2877 2878 static const enum ethtool_link_mode_bit_indices 2879 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2880 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2881 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2882 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2883 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2884 }; 2885 2886 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2887 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2888 2889 static const enum ethtool_link_mode_bit_indices 2890 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2891 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2892 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2893 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2894 }; 2895 2896 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2897 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2898 2899 static const enum ethtool_link_mode_bit_indices 2900 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2901 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2902 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2903 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2904 }; 2905 2906 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2907 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2908 2909 static const enum ethtool_link_mode_bit_indices 2910 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2911 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2912 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2913 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2914 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2915 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2916 }; 2917 2918 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2919 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2920 2921 static const enum ethtool_link_mode_bit_indices 2922 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2923 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2924 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2925 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2926 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2927 }; 2928 2929 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2930 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2931 2932 static const enum ethtool_link_mode_bit_indices 2933 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2934 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2935 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2936 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2937 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2938 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2939 }; 2940 2941 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2942 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2943 2944 static const enum ethtool_link_mode_bit_indices 2945 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2946 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2947 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2948 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2949 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2950 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2951 }; 2952 2953 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2954 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2955 2956 static const enum ethtool_link_mode_bit_indices 2957 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2958 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2959 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2960 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2961 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2962 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2963 }; 2964 2965 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2966 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2967 2968 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2969 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2970 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2971 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2972 2973 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2974 { 2975 switch (width) { 2976 case 1: 2977 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2978 case 2: 2979 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2980 case 4: 2981 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2982 case 8: 2983 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2984 default: 2985 WARN_ON_ONCE(1); 2986 return 0; 2987 } 2988 } 2989 2990 struct mlxsw_sp2_port_link_mode { 2991 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2992 int m_ethtool_len; 2993 u32 mask; 2994 u32 speed; 2995 u8 mask_width; 2996 }; 2997 2998 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2999 { 3000 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3001 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3002 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3003 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3004 MLXSW_SP_PORT_MASK_WIDTH_2X | 3005 MLXSW_SP_PORT_MASK_WIDTH_4X | 3006 MLXSW_SP_PORT_MASK_WIDTH_8X, 3007 .speed = SPEED_100, 3008 }, 3009 { 3010 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3011 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3012 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3013 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3014 MLXSW_SP_PORT_MASK_WIDTH_2X | 3015 MLXSW_SP_PORT_MASK_WIDTH_4X | 3016 MLXSW_SP_PORT_MASK_WIDTH_8X, 3017 .speed = SPEED_1000, 3018 }, 3019 { 3020 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3021 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3022 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3023 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3024 MLXSW_SP_PORT_MASK_WIDTH_2X | 3025 MLXSW_SP_PORT_MASK_WIDTH_4X | 3026 MLXSW_SP_PORT_MASK_WIDTH_8X, 3027 .speed = SPEED_2500, 3028 }, 3029 { 3030 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3031 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3032 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3033 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3034 MLXSW_SP_PORT_MASK_WIDTH_2X | 3035 MLXSW_SP_PORT_MASK_WIDTH_4X | 3036 MLXSW_SP_PORT_MASK_WIDTH_8X, 3037 .speed = SPEED_5000, 3038 }, 3039 { 3040 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3041 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3042 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3043 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3044 MLXSW_SP_PORT_MASK_WIDTH_2X | 3045 MLXSW_SP_PORT_MASK_WIDTH_4X | 3046 MLXSW_SP_PORT_MASK_WIDTH_8X, 3047 .speed = SPEED_10000, 3048 }, 3049 { 3050 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3051 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3052 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3053 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3054 MLXSW_SP_PORT_MASK_WIDTH_8X, 3055 .speed = SPEED_40000, 3056 }, 3057 { 3058 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3059 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3060 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3061 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3062 MLXSW_SP_PORT_MASK_WIDTH_2X | 3063 MLXSW_SP_PORT_MASK_WIDTH_4X | 3064 MLXSW_SP_PORT_MASK_WIDTH_8X, 3065 .speed = SPEED_25000, 3066 }, 3067 { 3068 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3069 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3070 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3071 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3072 MLXSW_SP_PORT_MASK_WIDTH_4X | 3073 MLXSW_SP_PORT_MASK_WIDTH_8X, 3074 .speed = SPEED_50000, 3075 }, 3076 { 3077 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3078 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3079 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3080 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3081 .speed = SPEED_50000, 3082 }, 3083 { 3084 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3085 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3086 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3087 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3088 MLXSW_SP_PORT_MASK_WIDTH_8X, 3089 .speed = SPEED_100000, 3090 }, 3091 { 3092 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3093 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3094 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3095 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3096 .speed = SPEED_100000, 3097 }, 3098 { 3099 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3100 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3101 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3102 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3103 MLXSW_SP_PORT_MASK_WIDTH_8X, 3104 .speed = SPEED_200000, 3105 }, 3106 { 3107 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3108 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3109 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3110 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3111 .speed = SPEED_400000, 3112 }, 3113 }; 3114 3115 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3116 3117 static void 3118 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3119 u32 ptys_eth_proto, 3120 struct ethtool_link_ksettings *cmd) 3121 { 3122 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3123 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3124 } 3125 3126 static void 3127 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3128 unsigned long *mode) 3129 { 3130 int i; 3131 3132 for (i = 0; i < link_mode->m_ethtool_len; i++) 3133 __set_bit(link_mode->mask_ethtool[i], mode); 3134 } 3135 3136 static void 3137 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3138 u8 width, unsigned long *mode) 3139 { 3140 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3141 int i; 3142 3143 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3144 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3145 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3146 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3147 mode); 3148 } 3149 } 3150 3151 static u32 3152 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3153 { 3154 int i; 3155 3156 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3157 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3158 return mlxsw_sp2_port_link_mode[i].speed; 3159 } 3160 3161 return SPEED_UNKNOWN; 3162 } 3163 3164 static void 3165 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3166 u32 ptys_eth_proto, 3167 struct ethtool_link_ksettings *cmd) 3168 { 3169 cmd->base.speed = SPEED_UNKNOWN; 3170 cmd->base.duplex = DUPLEX_UNKNOWN; 3171 3172 if (!carrier_ok) 3173 return; 3174 3175 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3176 if (cmd->base.speed != SPEED_UNKNOWN) 3177 cmd->base.duplex = DUPLEX_FULL; 3178 } 3179 3180 static bool 3181 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3182 const unsigned long *mode) 3183 { 3184 int cnt = 0; 3185 int i; 3186 3187 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3188 if (test_bit(link_mode->mask_ethtool[i], mode)) 3189 cnt++; 3190 } 3191 3192 return cnt == link_mode->m_ethtool_len; 3193 } 3194 3195 static u32 3196 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3197 const struct ethtool_link_ksettings *cmd) 3198 { 3199 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3200 u32 ptys_proto = 0; 3201 int i; 3202 3203 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3204 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3205 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3206 cmd->link_modes.advertising)) 3207 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3208 } 3209 return ptys_proto; 3210 } 3211 3212 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3213 u8 width, u32 speed) 3214 { 3215 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3216 u32 ptys_proto = 0; 3217 int i; 3218 3219 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3220 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3221 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3222 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3223 } 3224 return ptys_proto; 3225 } 3226 3227 static void 3228 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3229 u8 local_port, u32 proto_admin, 3230 bool autoneg) 3231 { 3232 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3233 } 3234 3235 static void 3236 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3237 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3238 u32 *p_eth_proto_oper) 3239 { 3240 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3241 p_eth_proto_admin, p_eth_proto_oper); 3242 } 3243 3244 static const struct mlxsw_sp_port_type_speed_ops 3245 mlxsw_sp2_port_type_speed_ops = { 3246 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3247 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3248 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3249 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3250 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3251 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3252 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3253 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3254 }; 3255 3256 static void 3257 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3258 u8 width, struct ethtool_link_ksettings *cmd) 3259 { 3260 const struct mlxsw_sp_port_type_speed_ops *ops; 3261 3262 ops = mlxsw_sp->port_type_speed_ops; 3263 3264 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3265 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3266 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3267 3268 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3269 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3270 cmd->link_modes.supported); 3271 } 3272 3273 static void 3274 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3275 u32 eth_proto_admin, bool autoneg, u8 width, 3276 struct ethtool_link_ksettings *cmd) 3277 { 3278 const struct mlxsw_sp_port_type_speed_ops *ops; 3279 3280 ops = mlxsw_sp->port_type_speed_ops; 3281 3282 if (!autoneg) 3283 return; 3284 3285 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3286 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3287 cmd->link_modes.advertising); 3288 } 3289 3290 static u8 3291 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3292 { 3293 switch (connector_type) { 3294 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3295 return PORT_OTHER; 3296 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3297 return PORT_NONE; 3298 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3299 return PORT_TP; 3300 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3301 return PORT_AUI; 3302 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3303 return PORT_BNC; 3304 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3305 return PORT_MII; 3306 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3307 return PORT_FIBRE; 3308 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3309 return PORT_DA; 3310 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3311 return PORT_OTHER; 3312 default: 3313 WARN_ON_ONCE(1); 3314 return PORT_OTHER; 3315 } 3316 } 3317 3318 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3319 struct ethtool_link_ksettings *cmd) 3320 { 3321 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3322 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3324 const struct mlxsw_sp_port_type_speed_ops *ops; 3325 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3326 u8 connector_type; 3327 bool autoneg; 3328 int err; 3329 3330 ops = mlxsw_sp->port_type_speed_ops; 3331 3332 autoneg = mlxsw_sp_port->link.autoneg; 3333 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3334 0, false); 3335 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3336 if (err) 3337 return err; 3338 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3339 ð_proto_admin, ð_proto_oper); 3340 3341 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3342 mlxsw_sp_port->mapping.width, cmd); 3343 3344 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3345 mlxsw_sp_port->mapping.width, cmd); 3346 3347 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3348 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3349 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3350 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3351 eth_proto_oper, cmd); 3352 3353 return 0; 3354 } 3355 3356 static int 3357 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3358 const struct ethtool_link_ksettings *cmd) 3359 { 3360 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3361 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3362 const struct mlxsw_sp_port_type_speed_ops *ops; 3363 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3364 u32 eth_proto_cap, eth_proto_new; 3365 bool autoneg; 3366 int err; 3367 3368 ops = mlxsw_sp->port_type_speed_ops; 3369 3370 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3371 0, false); 3372 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3373 if (err) 3374 return err; 3375 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3376 3377 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3378 eth_proto_new = autoneg ? 3379 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3380 cmd) : 3381 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3382 cmd->base.speed); 3383 3384 eth_proto_new = eth_proto_new & eth_proto_cap; 3385 if (!eth_proto_new) { 3386 netdev_err(dev, "No supported speed requested\n"); 3387 return -EINVAL; 3388 } 3389 3390 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3391 eth_proto_new, autoneg); 3392 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3393 if (err) 3394 return err; 3395 3396 mlxsw_sp_port->link.autoneg = autoneg; 3397 3398 if (!netif_running(dev)) 3399 return 0; 3400 3401 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3402 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3403 3404 return 0; 3405 } 3406 3407 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3408 struct ethtool_modinfo *modinfo) 3409 { 3410 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3412 int err; 3413 3414 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3415 mlxsw_sp_port->mapping.module, 3416 modinfo); 3417 3418 return err; 3419 } 3420 3421 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3422 struct ethtool_eeprom *ee, 3423 u8 *data) 3424 { 3425 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3427 int err; 3428 3429 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3430 mlxsw_sp_port->mapping.module, ee, 3431 data); 3432 3433 return err; 3434 } 3435 3436 static int 3437 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3438 { 3439 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3441 3442 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3443 } 3444 3445 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3446 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3447 .get_link = ethtool_op_get_link, 3448 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3449 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3450 .get_strings = mlxsw_sp_port_get_strings, 3451 .set_phys_id = mlxsw_sp_port_set_phys_id, 3452 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3453 .get_sset_count = mlxsw_sp_port_get_sset_count, 3454 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3455 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3456 .get_module_info = mlxsw_sp_get_module_info, 3457 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3458 .get_ts_info = mlxsw_sp_get_ts_info, 3459 }; 3460 3461 static int 3462 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3463 { 3464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3465 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3466 const struct mlxsw_sp_port_type_speed_ops *ops; 3467 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3468 int err; 3469 3470 ops = mlxsw_sp->port_type_speed_ops; 3471 3472 /* Set advertised speeds to supported speeds. */ 3473 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3474 0, false); 3475 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3476 if (err) 3477 return err; 3478 3479 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3480 ð_proto_admin, ð_proto_oper); 3481 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3482 eth_proto_cap, mlxsw_sp_port->link.autoneg); 3483 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3484 } 3485 3486 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3487 { 3488 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3489 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3490 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3491 u32 eth_proto_oper; 3492 int err; 3493 3494 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3495 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3496 mlxsw_sp_port->local_port, 0, 3497 false); 3498 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3499 if (err) 3500 return err; 3501 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3502 ð_proto_oper); 3503 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3504 return 0; 3505 } 3506 3507 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3508 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3509 bool dwrr, u8 dwrr_weight) 3510 { 3511 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3512 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3513 3514 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3515 next_index); 3516 mlxsw_reg_qeec_de_set(qeec_pl, true); 3517 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3518 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3519 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3520 } 3521 3522 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3523 enum mlxsw_reg_qeec_hr hr, u8 index, 3524 u8 next_index, u32 maxrate, u8 burst_size) 3525 { 3526 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3527 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3528 3529 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3530 next_index); 3531 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3532 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3533 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3534 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3535 } 3536 3537 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3538 enum mlxsw_reg_qeec_hr hr, u8 index, 3539 u8 next_index, u32 minrate) 3540 { 3541 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3542 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3543 3544 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3545 next_index); 3546 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3547 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3548 3549 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3550 } 3551 3552 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3553 u8 switch_prio, u8 tclass) 3554 { 3555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3556 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3557 3558 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3559 tclass); 3560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3561 } 3562 3563 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3564 { 3565 int err, i; 3566 3567 /* Setup the elements hierarcy, so that each TC is linked to 3568 * one subgroup, which are all member in the same group. 3569 */ 3570 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3571 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3572 if (err) 3573 return err; 3574 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3575 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3576 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3577 0, false, 0); 3578 if (err) 3579 return err; 3580 } 3581 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3582 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3583 MLXSW_REG_QEEC_HR_TC, i, i, 3584 false, 0); 3585 if (err) 3586 return err; 3587 3588 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3589 MLXSW_REG_QEEC_HR_TC, 3590 i + 8, i, 3591 true, 100); 3592 if (err) 3593 return err; 3594 } 3595 3596 /* Make sure the max shaper is disabled in all hierarchies that support 3597 * it. Note that this disables ptps (PTP shaper), but that is intended 3598 * for the initial configuration. 3599 */ 3600 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3601 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3602 MLXSW_REG_QEEC_MAS_DIS, 0); 3603 if (err) 3604 return err; 3605 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3606 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3607 MLXSW_REG_QEEC_HR_SUBGROUP, 3608 i, 0, 3609 MLXSW_REG_QEEC_MAS_DIS, 0); 3610 if (err) 3611 return err; 3612 } 3613 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3614 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3615 MLXSW_REG_QEEC_HR_TC, 3616 i, i, 3617 MLXSW_REG_QEEC_MAS_DIS, 0); 3618 if (err) 3619 return err; 3620 3621 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3622 MLXSW_REG_QEEC_HR_TC, 3623 i + 8, i, 3624 MLXSW_REG_QEEC_MAS_DIS, 0); 3625 if (err) 3626 return err; 3627 } 3628 3629 /* Configure the min shaper for multicast TCs. */ 3630 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3631 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3632 MLXSW_REG_QEEC_HR_TC, 3633 i + 8, i, 3634 MLXSW_REG_QEEC_MIS_MIN); 3635 if (err) 3636 return err; 3637 } 3638 3639 /* Map all priorities to traffic class 0. */ 3640 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3641 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3642 if (err) 3643 return err; 3644 } 3645 3646 return 0; 3647 } 3648 3649 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3650 bool enable) 3651 { 3652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3653 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3654 3655 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3657 } 3658 3659 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3660 u8 split_base_local_port, 3661 struct mlxsw_sp_port_mapping *port_mapping) 3662 { 3663 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3664 bool split = !!split_base_local_port; 3665 struct mlxsw_sp_port *mlxsw_sp_port; 3666 struct net_device *dev; 3667 int err; 3668 3669 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3670 port_mapping->module + 1, split, 3671 port_mapping->lane / port_mapping->width, 3672 mlxsw_sp->base_mac, 3673 sizeof(mlxsw_sp->base_mac)); 3674 if (err) { 3675 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3676 local_port); 3677 return err; 3678 } 3679 3680 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3681 if (!dev) { 3682 err = -ENOMEM; 3683 goto err_alloc_etherdev; 3684 } 3685 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3686 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3687 mlxsw_sp_port = netdev_priv(dev); 3688 mlxsw_sp_port->dev = dev; 3689 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3690 mlxsw_sp_port->local_port = local_port; 3691 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3692 mlxsw_sp_port->split = split; 3693 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3694 mlxsw_sp_port->mapping = *port_mapping; 3695 mlxsw_sp_port->link.autoneg = 1; 3696 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3697 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3698 3699 mlxsw_sp_port->pcpu_stats = 3700 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3701 if (!mlxsw_sp_port->pcpu_stats) { 3702 err = -ENOMEM; 3703 goto err_alloc_stats; 3704 } 3705 3706 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3707 GFP_KERNEL); 3708 if (!mlxsw_sp_port->sample) { 3709 err = -ENOMEM; 3710 goto err_alloc_sample; 3711 } 3712 3713 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3714 &update_stats_cache); 3715 3716 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3717 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3718 3719 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3720 if (err) { 3721 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3722 mlxsw_sp_port->local_port); 3723 goto err_port_module_map; 3724 } 3725 3726 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3727 if (err) { 3728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3729 mlxsw_sp_port->local_port); 3730 goto err_port_swid_set; 3731 } 3732 3733 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3734 if (err) { 3735 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3736 mlxsw_sp_port->local_port); 3737 goto err_dev_addr_init; 3738 } 3739 3740 netif_carrier_off(dev); 3741 3742 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3743 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3744 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3745 3746 dev->min_mtu = 0; 3747 dev->max_mtu = ETH_MAX_MTU; 3748 3749 /* Each packet needs to have a Tx header (metadata) on top all other 3750 * headers. 3751 */ 3752 dev->needed_headroom = MLXSW_TXHDR_LEN; 3753 3754 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3755 if (err) { 3756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3757 mlxsw_sp_port->local_port); 3758 goto err_port_system_port_mapping_set; 3759 } 3760 3761 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3762 if (err) { 3763 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3764 mlxsw_sp_port->local_port); 3765 goto err_port_speed_by_width_set; 3766 } 3767 3768 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3769 if (err) { 3770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3771 mlxsw_sp_port->local_port); 3772 goto err_port_mtu_set; 3773 } 3774 3775 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3776 if (err) 3777 goto err_port_admin_status_set; 3778 3779 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3780 if (err) { 3781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3782 mlxsw_sp_port->local_port); 3783 goto err_port_buffers_init; 3784 } 3785 3786 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3787 if (err) { 3788 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3789 mlxsw_sp_port->local_port); 3790 goto err_port_ets_init; 3791 } 3792 3793 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3794 if (err) { 3795 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3796 mlxsw_sp_port->local_port); 3797 goto err_port_tc_mc_mode; 3798 } 3799 3800 /* ETS and buffers must be initialized before DCB. */ 3801 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3802 if (err) { 3803 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3804 mlxsw_sp_port->local_port); 3805 goto err_port_dcb_init; 3806 } 3807 3808 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3809 if (err) { 3810 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3811 mlxsw_sp_port->local_port); 3812 goto err_port_fids_init; 3813 } 3814 3815 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3816 if (err) { 3817 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3818 mlxsw_sp_port->local_port); 3819 goto err_port_qdiscs_init; 3820 } 3821 3822 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3823 false); 3824 if (err) { 3825 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3826 mlxsw_sp_port->local_port); 3827 goto err_port_vlan_clear; 3828 } 3829 3830 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3831 if (err) { 3832 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3833 mlxsw_sp_port->local_port); 3834 goto err_port_nve_init; 3835 } 3836 3837 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3838 if (err) { 3839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3840 mlxsw_sp_port->local_port); 3841 goto err_port_pvid_set; 3842 } 3843 3844 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3845 MLXSW_SP_DEFAULT_VID); 3846 if (IS_ERR(mlxsw_sp_port_vlan)) { 3847 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3848 mlxsw_sp_port->local_port); 3849 err = PTR_ERR(mlxsw_sp_port_vlan); 3850 goto err_port_vlan_create; 3851 } 3852 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3853 3854 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3855 mlxsw_sp->ptp_ops->shaper_work); 3856 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3857 mlxsw_sp_span_speed_update_work); 3858 3859 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3860 err = register_netdev(dev); 3861 if (err) { 3862 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3863 mlxsw_sp_port->local_port); 3864 goto err_register_netdev; 3865 } 3866 3867 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3868 mlxsw_sp_port, dev); 3869 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3870 return 0; 3871 3872 err_register_netdev: 3873 mlxsw_sp->ports[local_port] = NULL; 3874 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3875 err_port_vlan_create: 3876 err_port_pvid_set: 3877 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3878 err_port_nve_init: 3879 err_port_vlan_clear: 3880 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3881 err_port_qdiscs_init: 3882 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3883 err_port_fids_init: 3884 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3885 err_port_dcb_init: 3886 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3887 err_port_tc_mc_mode: 3888 err_port_ets_init: 3889 err_port_buffers_init: 3890 err_port_admin_status_set: 3891 err_port_mtu_set: 3892 err_port_speed_by_width_set: 3893 err_port_system_port_mapping_set: 3894 err_dev_addr_init: 3895 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3896 err_port_swid_set: 3897 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3898 err_port_module_map: 3899 kfree(mlxsw_sp_port->sample); 3900 err_alloc_sample: 3901 free_percpu(mlxsw_sp_port->pcpu_stats); 3902 err_alloc_stats: 3903 free_netdev(dev); 3904 err_alloc_etherdev: 3905 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3906 return err; 3907 } 3908 3909 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3910 { 3911 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3912 3913 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3914 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3915 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3916 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3917 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3918 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3919 mlxsw_sp->ports[local_port] = NULL; 3920 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3921 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3922 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3923 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3924 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3925 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3926 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3927 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3928 kfree(mlxsw_sp_port->sample); 3929 free_percpu(mlxsw_sp_port->pcpu_stats); 3930 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3931 free_netdev(mlxsw_sp_port->dev); 3932 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3933 } 3934 3935 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3936 { 3937 struct mlxsw_sp_port *mlxsw_sp_port; 3938 int err; 3939 3940 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3941 if (!mlxsw_sp_port) 3942 return -ENOMEM; 3943 3944 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3945 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3946 3947 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3948 mlxsw_sp_port, 3949 mlxsw_sp->base_mac, 3950 sizeof(mlxsw_sp->base_mac)); 3951 if (err) { 3952 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3953 goto err_core_cpu_port_init; 3954 } 3955 3956 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3957 return 0; 3958 3959 err_core_cpu_port_init: 3960 kfree(mlxsw_sp_port); 3961 return err; 3962 } 3963 3964 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3965 { 3966 struct mlxsw_sp_port *mlxsw_sp_port = 3967 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3968 3969 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3970 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3971 kfree(mlxsw_sp_port); 3972 } 3973 3974 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3975 { 3976 return mlxsw_sp->ports[local_port] != NULL; 3977 } 3978 3979 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3980 { 3981 int i; 3982 3983 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3984 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3985 mlxsw_sp_port_remove(mlxsw_sp, i); 3986 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3987 kfree(mlxsw_sp->ports); 3988 } 3989 3990 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3991 { 3992 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3993 struct mlxsw_sp_port_mapping *port_mapping; 3994 size_t alloc_size; 3995 int i; 3996 int err; 3997 3998 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3999 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4000 if (!mlxsw_sp->ports) 4001 return -ENOMEM; 4002 4003 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4004 if (err) 4005 goto err_cpu_port_create; 4006 4007 for (i = 1; i < max_ports; i++) { 4008 port_mapping = mlxsw_sp->port_mapping[i]; 4009 if (!port_mapping) 4010 continue; 4011 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4012 if (err) 4013 goto err_port_create; 4014 } 4015 return 0; 4016 4017 err_port_create: 4018 for (i--; i >= 1; i--) 4019 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4020 mlxsw_sp_port_remove(mlxsw_sp, i); 4021 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4022 err_cpu_port_create: 4023 kfree(mlxsw_sp->ports); 4024 return err; 4025 } 4026 4027 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4028 { 4029 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4030 struct mlxsw_sp_port_mapping port_mapping; 4031 int i; 4032 int err; 4033 4034 mlxsw_sp->port_mapping = kcalloc(max_ports, 4035 sizeof(struct mlxsw_sp_port_mapping *), 4036 GFP_KERNEL); 4037 if (!mlxsw_sp->port_mapping) 4038 return -ENOMEM; 4039 4040 for (i = 1; i < max_ports; i++) { 4041 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4042 if (err) 4043 goto err_port_module_info_get; 4044 if (!port_mapping.width) 4045 continue; 4046 4047 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4048 sizeof(port_mapping), 4049 GFP_KERNEL); 4050 if (!mlxsw_sp->port_mapping[i]) { 4051 err = -ENOMEM; 4052 goto err_port_module_info_dup; 4053 } 4054 } 4055 return 0; 4056 4057 err_port_module_info_get: 4058 err_port_module_info_dup: 4059 for (i--; i >= 1; i--) 4060 kfree(mlxsw_sp->port_mapping[i]); 4061 kfree(mlxsw_sp->port_mapping); 4062 return err; 4063 } 4064 4065 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4066 { 4067 int i; 4068 4069 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4070 kfree(mlxsw_sp->port_mapping[i]); 4071 kfree(mlxsw_sp->port_mapping); 4072 } 4073 4074 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4075 { 4076 u8 offset = (local_port - 1) % max_width; 4077 4078 return local_port - offset; 4079 } 4080 4081 static int 4082 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4083 struct mlxsw_sp_port_mapping *port_mapping, 4084 unsigned int count, u8 offset) 4085 { 4086 struct mlxsw_sp_port_mapping split_port_mapping; 4087 int err, i; 4088 4089 split_port_mapping = *port_mapping; 4090 split_port_mapping.width /= count; 4091 for (i = 0; i < count; i++) { 4092 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4093 base_port, &split_port_mapping); 4094 if (err) 4095 goto err_port_create; 4096 split_port_mapping.lane += split_port_mapping.width; 4097 } 4098 4099 return 0; 4100 4101 err_port_create: 4102 for (i--; i >= 0; i--) 4103 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4104 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4105 return err; 4106 } 4107 4108 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4109 u8 base_port, 4110 unsigned int count, u8 offset) 4111 { 4112 struct mlxsw_sp_port_mapping *port_mapping; 4113 int i; 4114 4115 /* Go over original unsplit ports in the gap and recreate them. */ 4116 for (i = 0; i < count * offset; i++) { 4117 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4118 if (!port_mapping) 4119 continue; 4120 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4121 } 4122 } 4123 4124 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4125 unsigned int count, 4126 unsigned int max_width) 4127 { 4128 enum mlxsw_res_id local_ports_in_x_res_id; 4129 int split_width = max_width / count; 4130 4131 if (split_width == 1) 4132 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4133 else if (split_width == 2) 4134 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4135 else if (split_width == 4) 4136 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4137 else 4138 return -EINVAL; 4139 4140 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4141 return -EINVAL; 4142 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4143 } 4144 4145 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4146 unsigned int count, 4147 struct netlink_ext_ack *extack) 4148 { 4149 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4150 struct mlxsw_sp_port_mapping port_mapping; 4151 struct mlxsw_sp_port *mlxsw_sp_port; 4152 int max_width; 4153 u8 base_port; 4154 int offset; 4155 int i; 4156 int err; 4157 4158 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4159 if (!mlxsw_sp_port) { 4160 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4161 local_port); 4162 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4163 return -EINVAL; 4164 } 4165 4166 /* Split ports cannot be split. */ 4167 if (mlxsw_sp_port->split) { 4168 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4169 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4170 return -EINVAL; 4171 } 4172 4173 max_width = mlxsw_core_module_max_width(mlxsw_core, 4174 mlxsw_sp_port->mapping.module); 4175 if (max_width < 0) { 4176 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4177 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4178 return max_width; 4179 } 4180 4181 /* Split port with non-max and 1 module width cannot be split. */ 4182 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4183 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4184 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4185 return -EINVAL; 4186 } 4187 4188 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4189 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4190 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4191 return -EINVAL; 4192 } 4193 4194 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4195 if (offset < 0) { 4196 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4197 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4198 return -EINVAL; 4199 } 4200 4201 /* Only in case max split is being done, the local port and 4202 * base port may differ. 4203 */ 4204 base_port = count == max_width ? 4205 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4206 local_port; 4207 4208 for (i = 0; i < count * offset; i++) { 4209 /* Expect base port to exist and also the one in the middle in 4210 * case of maximal split count. 4211 */ 4212 if (i == 0 || (count == max_width && i == count / 2)) 4213 continue; 4214 4215 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4216 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4217 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4218 return -EINVAL; 4219 } 4220 } 4221 4222 port_mapping = mlxsw_sp_port->mapping; 4223 4224 for (i = 0; i < count; i++) 4225 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4226 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4227 4228 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4229 count, offset); 4230 if (err) { 4231 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4232 goto err_port_split_create; 4233 } 4234 4235 return 0; 4236 4237 err_port_split_create: 4238 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4239 return err; 4240 } 4241 4242 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4243 struct netlink_ext_ack *extack) 4244 { 4245 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4246 struct mlxsw_sp_port *mlxsw_sp_port; 4247 unsigned int count; 4248 int max_width; 4249 u8 base_port; 4250 int offset; 4251 int i; 4252 4253 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4254 if (!mlxsw_sp_port) { 4255 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4256 local_port); 4257 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4258 return -EINVAL; 4259 } 4260 4261 if (!mlxsw_sp_port->split) { 4262 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4263 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4264 return -EINVAL; 4265 } 4266 4267 max_width = mlxsw_core_module_max_width(mlxsw_core, 4268 mlxsw_sp_port->mapping.module); 4269 if (max_width < 0) { 4270 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4271 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4272 return max_width; 4273 } 4274 4275 count = max_width / mlxsw_sp_port->mapping.width; 4276 4277 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4278 if (WARN_ON(offset < 0)) { 4279 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4280 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4281 return -EINVAL; 4282 } 4283 4284 base_port = mlxsw_sp_port->split_base_local_port; 4285 4286 for (i = 0; i < count; i++) 4287 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4288 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4289 4290 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4291 4292 return 0; 4293 } 4294 4295 static void 4296 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 4297 { 4298 int i; 4299 4300 for (i = 0; i < TC_MAX_QUEUE; i++) 4301 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 4302 } 4303 4304 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4305 char *pude_pl, void *priv) 4306 { 4307 struct mlxsw_sp *mlxsw_sp = priv; 4308 struct mlxsw_sp_port *mlxsw_sp_port; 4309 enum mlxsw_reg_pude_oper_status status; 4310 u8 local_port; 4311 4312 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4313 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4314 if (!mlxsw_sp_port) 4315 return; 4316 4317 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4318 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4319 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4320 netif_carrier_on(mlxsw_sp_port->dev); 4321 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4322 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 4323 } else { 4324 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4325 netif_carrier_off(mlxsw_sp_port->dev); 4326 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 4327 } 4328 } 4329 4330 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4331 char *mtpptr_pl, bool ingress) 4332 { 4333 u8 local_port; 4334 u8 num_rec; 4335 int i; 4336 4337 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4338 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4339 for (i = 0; i < num_rec; i++) { 4340 u8 domain_number; 4341 u8 message_type; 4342 u16 sequence_id; 4343 u64 timestamp; 4344 4345 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4346 &domain_number, &sequence_id, 4347 ×tamp); 4348 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4349 message_type, domain_number, 4350 sequence_id, timestamp); 4351 } 4352 } 4353 4354 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4355 char *mtpptr_pl, void *priv) 4356 { 4357 struct mlxsw_sp *mlxsw_sp = priv; 4358 4359 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4360 } 4361 4362 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4363 char *mtpptr_pl, void *priv) 4364 { 4365 struct mlxsw_sp *mlxsw_sp = priv; 4366 4367 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4368 } 4369 4370 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4371 u8 local_port, void *priv) 4372 { 4373 struct mlxsw_sp *mlxsw_sp = priv; 4374 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4375 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4376 4377 if (unlikely(!mlxsw_sp_port)) { 4378 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4379 local_port); 4380 return; 4381 } 4382 4383 skb->dev = mlxsw_sp_port->dev; 4384 4385 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4386 u64_stats_update_begin(&pcpu_stats->syncp); 4387 pcpu_stats->rx_packets++; 4388 pcpu_stats->rx_bytes += skb->len; 4389 u64_stats_update_end(&pcpu_stats->syncp); 4390 4391 skb->protocol = eth_type_trans(skb, skb->dev); 4392 netif_receive_skb(skb); 4393 } 4394 4395 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4396 void *priv) 4397 { 4398 skb->offload_fwd_mark = 1; 4399 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4400 } 4401 4402 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4403 u8 local_port, void *priv) 4404 { 4405 skb->offload_l3_fwd_mark = 1; 4406 skb->offload_fwd_mark = 1; 4407 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4408 } 4409 4410 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4411 void *priv) 4412 { 4413 struct mlxsw_sp *mlxsw_sp = priv; 4414 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4415 struct psample_group *psample_group; 4416 u32 size; 4417 4418 if (unlikely(!mlxsw_sp_port)) { 4419 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4420 local_port); 4421 goto out; 4422 } 4423 if (unlikely(!mlxsw_sp_port->sample)) { 4424 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4425 local_port); 4426 goto out; 4427 } 4428 4429 size = mlxsw_sp_port->sample->truncate ? 4430 mlxsw_sp_port->sample->trunc_size : skb->len; 4431 4432 rcu_read_lock(); 4433 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4434 if (!psample_group) 4435 goto out_unlock; 4436 psample_sample_packet(psample_group, skb, size, 4437 mlxsw_sp_port->dev->ifindex, 0, 4438 mlxsw_sp_port->sample->rate); 4439 out_unlock: 4440 rcu_read_unlock(); 4441 out: 4442 consume_skb(skb); 4443 } 4444 4445 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4446 void *priv) 4447 { 4448 struct mlxsw_sp *mlxsw_sp = priv; 4449 4450 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4451 } 4452 4453 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4454 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4455 _is_ctrl, SP_##_trap_group, DISCARD) 4456 4457 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4458 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4459 _is_ctrl, SP_##_trap_group, DISCARD) 4460 4461 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4462 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4463 _is_ctrl, SP_##_trap_group, DISCARD) 4464 4465 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4466 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4467 4468 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4469 /* Events */ 4470 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4471 /* L2 traps */ 4472 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4473 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4474 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4475 false, SP_LLDP, DISCARD), 4476 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4477 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4478 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4479 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4480 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4481 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4482 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4483 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4484 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4485 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4486 false), 4487 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4488 false), 4489 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4490 false), 4491 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4492 false), 4493 /* L3 traps */ 4494 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4495 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4496 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4497 false), 4498 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4499 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4500 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4501 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4502 false), 4503 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4504 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4505 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4506 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4507 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4508 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4509 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4510 false), 4511 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4512 false), 4513 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4514 false), 4515 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4516 false), 4517 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4518 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4519 false), 4520 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4521 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4522 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4523 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4524 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4525 ROUTER_EXP, false), 4526 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4527 ROUTER_EXP, false), 4528 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4529 ROUTER_EXP, false), 4530 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4531 ROUTER_EXP, false), 4532 /* PKT Sample trap */ 4533 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4534 false, SP_IP2ME, DISCARD), 4535 /* ACL trap */ 4536 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4537 /* Multicast Router Traps */ 4538 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4539 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4540 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4541 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4542 /* NVE traps */ 4543 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4544 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4545 /* PTP traps */ 4546 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4547 false, SP_PTP0, DISCARD), 4548 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4549 }; 4550 4551 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4552 /* Events */ 4553 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4554 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4555 }; 4556 4557 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4558 { 4559 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4560 enum mlxsw_reg_qpcr_ir_units ir_units; 4561 int max_cpu_policers; 4562 bool is_bytes; 4563 u8 burst_size; 4564 u32 rate; 4565 int i, err; 4566 4567 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4568 return -EIO; 4569 4570 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4571 4572 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4573 for (i = 0; i < max_cpu_policers; i++) { 4574 is_bytes = false; 4575 switch (i) { 4576 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4577 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4578 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4579 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4583 rate = 128; 4584 burst_size = 7; 4585 break; 4586 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4587 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4588 rate = 16 * 1024; 4589 burst_size = 10; 4590 break; 4591 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4594 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4595 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4596 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4599 rate = 1024; 4600 burst_size = 7; 4601 break; 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4603 rate = 1024; 4604 burst_size = 7; 4605 break; 4606 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4607 rate = 24 * 1024; 4608 burst_size = 12; 4609 break; 4610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4611 rate = 19 * 1024; 4612 burst_size = 12; 4613 break; 4614 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4615 rate = 360; 4616 burst_size = 7; 4617 break; 4618 default: 4619 continue; 4620 } 4621 4622 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4623 burst_size); 4624 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4625 if (err) 4626 return err; 4627 } 4628 4629 return 0; 4630 } 4631 4632 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4633 { 4634 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4635 enum mlxsw_reg_htgt_trap_group i; 4636 int max_cpu_policers; 4637 int max_trap_groups; 4638 u8 priority, tc; 4639 u16 policer_id; 4640 int err; 4641 4642 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4643 return -EIO; 4644 4645 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4646 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4647 4648 for (i = 0; i < max_trap_groups; i++) { 4649 policer_id = i; 4650 switch (i) { 4651 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4652 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4653 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4654 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4655 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4656 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4657 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4658 priority = 5; 4659 tc = 5; 4660 break; 4661 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4662 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4663 priority = 4; 4664 tc = 4; 4665 break; 4666 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4669 priority = 3; 4670 tc = 3; 4671 break; 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4673 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4674 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4675 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4676 priority = 2; 4677 tc = 2; 4678 break; 4679 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4680 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4681 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4682 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4684 priority = 1; 4685 tc = 1; 4686 break; 4687 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4688 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4689 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4690 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4691 break; 4692 default: 4693 continue; 4694 } 4695 4696 if (max_cpu_policers <= policer_id && 4697 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4698 return -EIO; 4699 4700 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4701 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4702 if (err) 4703 return err; 4704 } 4705 4706 return 0; 4707 } 4708 4709 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4710 const struct mlxsw_listener listeners[], 4711 size_t listeners_count) 4712 { 4713 int i; 4714 int err; 4715 4716 for (i = 0; i < listeners_count; i++) { 4717 err = mlxsw_core_trap_register(mlxsw_sp->core, 4718 &listeners[i], 4719 mlxsw_sp); 4720 if (err) 4721 goto err_listener_register; 4722 4723 } 4724 return 0; 4725 4726 err_listener_register: 4727 for (i--; i >= 0; i--) { 4728 mlxsw_core_trap_unregister(mlxsw_sp->core, 4729 &listeners[i], 4730 mlxsw_sp); 4731 } 4732 return err; 4733 } 4734 4735 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4736 const struct mlxsw_listener listeners[], 4737 size_t listeners_count) 4738 { 4739 int i; 4740 4741 for (i = 0; i < listeners_count; i++) { 4742 mlxsw_core_trap_unregister(mlxsw_sp->core, 4743 &listeners[i], 4744 mlxsw_sp); 4745 } 4746 } 4747 4748 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4749 { 4750 int err; 4751 4752 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4753 if (err) 4754 return err; 4755 4756 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4757 if (err) 4758 return err; 4759 4760 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4761 ARRAY_SIZE(mlxsw_sp_listener)); 4762 if (err) 4763 return err; 4764 4765 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4766 mlxsw_sp->listeners_count); 4767 if (err) 4768 goto err_extra_traps_init; 4769 4770 return 0; 4771 4772 err_extra_traps_init: 4773 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4774 ARRAY_SIZE(mlxsw_sp_listener)); 4775 return err; 4776 } 4777 4778 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4779 { 4780 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4781 mlxsw_sp->listeners_count); 4782 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4783 ARRAY_SIZE(mlxsw_sp_listener)); 4784 } 4785 4786 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4787 4788 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4789 { 4790 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4791 u32 seed; 4792 int err; 4793 4794 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4795 MLXSW_SP_LAG_SEED_INIT); 4796 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4797 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4798 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4799 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4800 MLXSW_REG_SLCR_LAG_HASH_SIP | 4801 MLXSW_REG_SLCR_LAG_HASH_DIP | 4802 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4803 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4804 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4805 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4806 if (err) 4807 return err; 4808 4809 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4810 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4811 return -EIO; 4812 4813 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4814 sizeof(struct mlxsw_sp_upper), 4815 GFP_KERNEL); 4816 if (!mlxsw_sp->lags) 4817 return -ENOMEM; 4818 4819 return 0; 4820 } 4821 4822 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4823 { 4824 kfree(mlxsw_sp->lags); 4825 } 4826 4827 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4828 { 4829 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4830 4831 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4832 MLXSW_REG_HTGT_INVALID_POLICER, 4833 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4834 MLXSW_REG_HTGT_DEFAULT_TC); 4835 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4836 } 4837 4838 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4839 .clock_init = mlxsw_sp1_ptp_clock_init, 4840 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4841 .init = mlxsw_sp1_ptp_init, 4842 .fini = mlxsw_sp1_ptp_fini, 4843 .receive = mlxsw_sp1_ptp_receive, 4844 .transmitted = mlxsw_sp1_ptp_transmitted, 4845 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4846 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4847 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4848 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4849 .get_stats_count = mlxsw_sp1_get_stats_count, 4850 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4851 .get_stats = mlxsw_sp1_get_stats, 4852 }; 4853 4854 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4855 .clock_init = mlxsw_sp2_ptp_clock_init, 4856 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4857 .init = mlxsw_sp2_ptp_init, 4858 .fini = mlxsw_sp2_ptp_fini, 4859 .receive = mlxsw_sp2_ptp_receive, 4860 .transmitted = mlxsw_sp2_ptp_transmitted, 4861 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4862 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4863 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4864 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4865 .get_stats_count = mlxsw_sp2_get_stats_count, 4866 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4867 .get_stats = mlxsw_sp2_get_stats, 4868 }; 4869 4870 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4871 { 4872 return mtu * 5 / 2; 4873 } 4874 4875 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4876 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4877 }; 4878 4879 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4880 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 4881 4882 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 4883 { 4884 return 3 * mtu + buffer_factor * speed / 1000; 4885 } 4886 4887 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4888 { 4889 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 4890 4891 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4892 } 4893 4894 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4895 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4896 }; 4897 4898 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 4899 { 4900 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 4901 4902 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4903 } 4904 4905 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 4906 .buffsize_get = mlxsw_sp3_span_buffsize_get, 4907 }; 4908 4909 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4910 { 4911 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4912 4913 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4914 } 4915 4916 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4917 unsigned long event, void *ptr); 4918 4919 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4920 const struct mlxsw_bus_info *mlxsw_bus_info, 4921 struct netlink_ext_ack *extack) 4922 { 4923 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4924 int err; 4925 4926 mlxsw_sp->core = mlxsw_core; 4927 mlxsw_sp->bus_info = mlxsw_bus_info; 4928 4929 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4930 if (err) 4931 return err; 4932 4933 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4934 4935 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4936 if (err) { 4937 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4938 return err; 4939 } 4940 4941 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4942 if (err) { 4943 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4944 return err; 4945 } 4946 4947 err = mlxsw_sp_fids_init(mlxsw_sp); 4948 if (err) { 4949 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4950 goto err_fids_init; 4951 } 4952 4953 err = mlxsw_sp_traps_init(mlxsw_sp); 4954 if (err) { 4955 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4956 goto err_traps_init; 4957 } 4958 4959 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4960 if (err) { 4961 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4962 goto err_devlink_traps_init; 4963 } 4964 4965 err = mlxsw_sp_buffers_init(mlxsw_sp); 4966 if (err) { 4967 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4968 goto err_buffers_init; 4969 } 4970 4971 err = mlxsw_sp_lag_init(mlxsw_sp); 4972 if (err) { 4973 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4974 goto err_lag_init; 4975 } 4976 4977 /* Initialize SPAN before router and switchdev, so that those components 4978 * can call mlxsw_sp_span_respin(). 4979 */ 4980 err = mlxsw_sp_span_init(mlxsw_sp); 4981 if (err) { 4982 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4983 goto err_span_init; 4984 } 4985 4986 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4987 if (err) { 4988 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4989 goto err_switchdev_init; 4990 } 4991 4992 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4993 if (err) { 4994 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4995 goto err_counter_pool_init; 4996 } 4997 4998 err = mlxsw_sp_afa_init(mlxsw_sp); 4999 if (err) { 5000 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 5001 goto err_afa_init; 5002 } 5003 5004 err = mlxsw_sp_nve_init(mlxsw_sp); 5005 if (err) { 5006 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 5007 goto err_nve_init; 5008 } 5009 5010 err = mlxsw_sp_acl_init(mlxsw_sp); 5011 if (err) { 5012 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 5013 goto err_acl_init; 5014 } 5015 5016 err = mlxsw_sp_router_init(mlxsw_sp, extack); 5017 if (err) { 5018 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 5019 goto err_router_init; 5020 } 5021 5022 if (mlxsw_sp->bus_info->read_frc_capable) { 5023 /* NULL is a valid return value from clock_init */ 5024 mlxsw_sp->clock = 5025 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 5026 mlxsw_sp->bus_info->dev); 5027 if (IS_ERR(mlxsw_sp->clock)) { 5028 err = PTR_ERR(mlxsw_sp->clock); 5029 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 5030 goto err_ptp_clock_init; 5031 } 5032 } 5033 5034 if (mlxsw_sp->clock) { 5035 /* NULL is a valid return value from ptp_ops->init */ 5036 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5037 if (IS_ERR(mlxsw_sp->ptp_state)) { 5038 err = PTR_ERR(mlxsw_sp->ptp_state); 5039 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5040 goto err_ptp_init; 5041 } 5042 } 5043 5044 /* Initialize netdevice notifier after router and SPAN is initialized, 5045 * so that the event handler can use router structures and call SPAN 5046 * respin. 5047 */ 5048 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5049 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5050 &mlxsw_sp->netdevice_nb); 5051 if (err) { 5052 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5053 goto err_netdev_notifier; 5054 } 5055 5056 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5057 if (err) { 5058 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5059 goto err_dpipe_init; 5060 } 5061 5062 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5063 if (err) { 5064 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5065 goto err_port_module_info_init; 5066 } 5067 5068 err = mlxsw_sp_ports_create(mlxsw_sp); 5069 if (err) { 5070 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5071 goto err_ports_create; 5072 } 5073 5074 return 0; 5075 5076 err_ports_create: 5077 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5078 err_port_module_info_init: 5079 mlxsw_sp_dpipe_fini(mlxsw_sp); 5080 err_dpipe_init: 5081 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5082 &mlxsw_sp->netdevice_nb); 5083 err_netdev_notifier: 5084 if (mlxsw_sp->clock) 5085 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5086 err_ptp_init: 5087 if (mlxsw_sp->clock) 5088 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5089 err_ptp_clock_init: 5090 mlxsw_sp_router_fini(mlxsw_sp); 5091 err_router_init: 5092 mlxsw_sp_acl_fini(mlxsw_sp); 5093 err_acl_init: 5094 mlxsw_sp_nve_fini(mlxsw_sp); 5095 err_nve_init: 5096 mlxsw_sp_afa_fini(mlxsw_sp); 5097 err_afa_init: 5098 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5099 err_counter_pool_init: 5100 mlxsw_sp_switchdev_fini(mlxsw_sp); 5101 err_switchdev_init: 5102 mlxsw_sp_span_fini(mlxsw_sp); 5103 err_span_init: 5104 mlxsw_sp_lag_fini(mlxsw_sp); 5105 err_lag_init: 5106 mlxsw_sp_buffers_fini(mlxsw_sp); 5107 err_buffers_init: 5108 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5109 err_devlink_traps_init: 5110 mlxsw_sp_traps_fini(mlxsw_sp); 5111 err_traps_init: 5112 mlxsw_sp_fids_fini(mlxsw_sp); 5113 err_fids_init: 5114 mlxsw_sp_kvdl_fini(mlxsw_sp); 5115 return err; 5116 } 5117 5118 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5119 const struct mlxsw_bus_info *mlxsw_bus_info, 5120 struct netlink_ext_ack *extack) 5121 { 5122 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5123 5124 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5125 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5126 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5127 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5128 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5129 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5130 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5131 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5132 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5133 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5134 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5135 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5136 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5137 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 5138 mlxsw_sp->listeners = mlxsw_sp1_listener; 5139 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5140 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 5141 5142 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5143 } 5144 5145 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5146 const struct mlxsw_bus_info *mlxsw_bus_info, 5147 struct netlink_ext_ack *extack) 5148 { 5149 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5150 5151 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5152 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5153 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5154 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5155 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5156 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5157 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5158 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5159 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5160 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5161 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5162 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5163 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5164 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 5165 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 5166 5167 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5168 } 5169 5170 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 5171 const struct mlxsw_bus_info *mlxsw_bus_info, 5172 struct netlink_ext_ack *extack) 5173 { 5174 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5175 5176 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5177 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5178 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5179 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5180 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5181 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5182 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5183 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5184 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5185 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5186 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5187 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 5188 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 5189 5190 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5191 } 5192 5193 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5194 { 5195 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5196 5197 mlxsw_sp_ports_remove(mlxsw_sp); 5198 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5199 mlxsw_sp_dpipe_fini(mlxsw_sp); 5200 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5201 &mlxsw_sp->netdevice_nb); 5202 if (mlxsw_sp->clock) { 5203 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5204 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5205 } 5206 mlxsw_sp_router_fini(mlxsw_sp); 5207 mlxsw_sp_acl_fini(mlxsw_sp); 5208 mlxsw_sp_nve_fini(mlxsw_sp); 5209 mlxsw_sp_afa_fini(mlxsw_sp); 5210 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5211 mlxsw_sp_switchdev_fini(mlxsw_sp); 5212 mlxsw_sp_span_fini(mlxsw_sp); 5213 mlxsw_sp_lag_fini(mlxsw_sp); 5214 mlxsw_sp_buffers_fini(mlxsw_sp); 5215 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5216 mlxsw_sp_traps_fini(mlxsw_sp); 5217 mlxsw_sp_fids_fini(mlxsw_sp); 5218 mlxsw_sp_kvdl_fini(mlxsw_sp); 5219 } 5220 5221 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5222 * 802.1Q FIDs 5223 */ 5224 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5225 VLAN_VID_MASK - 1) 5226 5227 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5228 .used_max_mid = 1, 5229 .max_mid = MLXSW_SP_MID_MAX, 5230 .used_flood_tables = 1, 5231 .used_flood_mode = 1, 5232 .flood_mode = 3, 5233 .max_fid_flood_tables = 3, 5234 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5235 .used_max_ib_mc = 1, 5236 .max_ib_mc = 0, 5237 .used_max_pkey = 1, 5238 .max_pkey = 0, 5239 .used_kvd_sizes = 1, 5240 .kvd_hash_single_parts = 59, 5241 .kvd_hash_double_parts = 41, 5242 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5243 .swid_config = { 5244 { 5245 .used_type = 1, 5246 .type = MLXSW_PORT_SWID_TYPE_ETH, 5247 } 5248 }, 5249 }; 5250 5251 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5252 .used_max_mid = 1, 5253 .max_mid = MLXSW_SP_MID_MAX, 5254 .used_flood_tables = 1, 5255 .used_flood_mode = 1, 5256 .flood_mode = 3, 5257 .max_fid_flood_tables = 3, 5258 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5259 .used_max_ib_mc = 1, 5260 .max_ib_mc = 0, 5261 .used_max_pkey = 1, 5262 .max_pkey = 0, 5263 .swid_config = { 5264 { 5265 .used_type = 1, 5266 .type = MLXSW_PORT_SWID_TYPE_ETH, 5267 } 5268 }, 5269 }; 5270 5271 static void 5272 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5273 struct devlink_resource_size_params *kvd_size_params, 5274 struct devlink_resource_size_params *linear_size_params, 5275 struct devlink_resource_size_params *hash_double_size_params, 5276 struct devlink_resource_size_params *hash_single_size_params) 5277 { 5278 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5279 KVD_SINGLE_MIN_SIZE); 5280 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5281 KVD_DOUBLE_MIN_SIZE); 5282 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5283 u32 linear_size_min = 0; 5284 5285 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5286 MLXSW_SP_KVD_GRANULARITY, 5287 DEVLINK_RESOURCE_UNIT_ENTRY); 5288 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5289 kvd_size - single_size_min - 5290 double_size_min, 5291 MLXSW_SP_KVD_GRANULARITY, 5292 DEVLINK_RESOURCE_UNIT_ENTRY); 5293 devlink_resource_size_params_init(hash_double_size_params, 5294 double_size_min, 5295 kvd_size - single_size_min - 5296 linear_size_min, 5297 MLXSW_SP_KVD_GRANULARITY, 5298 DEVLINK_RESOURCE_UNIT_ENTRY); 5299 devlink_resource_size_params_init(hash_single_size_params, 5300 single_size_min, 5301 kvd_size - double_size_min - 5302 linear_size_min, 5303 MLXSW_SP_KVD_GRANULARITY, 5304 DEVLINK_RESOURCE_UNIT_ENTRY); 5305 } 5306 5307 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5308 { 5309 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5310 struct devlink_resource_size_params hash_single_size_params; 5311 struct devlink_resource_size_params hash_double_size_params; 5312 struct devlink_resource_size_params linear_size_params; 5313 struct devlink_resource_size_params kvd_size_params; 5314 u32 kvd_size, single_size, double_size, linear_size; 5315 const struct mlxsw_config_profile *profile; 5316 int err; 5317 5318 profile = &mlxsw_sp1_config_profile; 5319 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5320 return -EIO; 5321 5322 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5323 &linear_size_params, 5324 &hash_double_size_params, 5325 &hash_single_size_params); 5326 5327 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5328 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5329 kvd_size, MLXSW_SP_RESOURCE_KVD, 5330 DEVLINK_RESOURCE_ID_PARENT_TOP, 5331 &kvd_size_params); 5332 if (err) 5333 return err; 5334 5335 linear_size = profile->kvd_linear_size; 5336 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5337 linear_size, 5338 MLXSW_SP_RESOURCE_KVD_LINEAR, 5339 MLXSW_SP_RESOURCE_KVD, 5340 &linear_size_params); 5341 if (err) 5342 return err; 5343 5344 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5345 if (err) 5346 return err; 5347 5348 double_size = kvd_size - linear_size; 5349 double_size *= profile->kvd_hash_double_parts; 5350 double_size /= profile->kvd_hash_double_parts + 5351 profile->kvd_hash_single_parts; 5352 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5353 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5354 double_size, 5355 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5356 MLXSW_SP_RESOURCE_KVD, 5357 &hash_double_size_params); 5358 if (err) 5359 return err; 5360 5361 single_size = kvd_size - double_size - linear_size; 5362 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5363 single_size, 5364 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5365 MLXSW_SP_RESOURCE_KVD, 5366 &hash_single_size_params); 5367 if (err) 5368 return err; 5369 5370 return 0; 5371 } 5372 5373 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5374 { 5375 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5376 struct devlink_resource_size_params kvd_size_params; 5377 u32 kvd_size; 5378 5379 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5380 return -EIO; 5381 5382 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5383 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5384 MLXSW_SP_KVD_GRANULARITY, 5385 DEVLINK_RESOURCE_UNIT_ENTRY); 5386 5387 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5388 kvd_size, MLXSW_SP_RESOURCE_KVD, 5389 DEVLINK_RESOURCE_ID_PARENT_TOP, 5390 &kvd_size_params); 5391 } 5392 5393 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5394 { 5395 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5396 struct devlink_resource_size_params span_size_params; 5397 u32 max_span; 5398 5399 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5400 return -EIO; 5401 5402 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5403 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5404 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5405 5406 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5407 max_span, MLXSW_SP_RESOURCE_SPAN, 5408 DEVLINK_RESOURCE_ID_PARENT_TOP, 5409 &span_size_params); 5410 } 5411 5412 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5413 { 5414 int err; 5415 5416 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5417 if (err) 5418 return err; 5419 5420 err = mlxsw_sp_resources_span_register(mlxsw_core); 5421 if (err) 5422 goto err_resources_span_register; 5423 5424 return 0; 5425 5426 err_resources_span_register: 5427 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5428 return err; 5429 } 5430 5431 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5432 { 5433 int err; 5434 5435 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5436 if (err) 5437 return err; 5438 5439 err = mlxsw_sp_resources_span_register(mlxsw_core); 5440 if (err) 5441 goto err_resources_span_register; 5442 5443 return 0; 5444 5445 err_resources_span_register: 5446 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5447 return err; 5448 } 5449 5450 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5451 const struct mlxsw_config_profile *profile, 5452 u64 *p_single_size, u64 *p_double_size, 5453 u64 *p_linear_size) 5454 { 5455 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5456 u32 double_size; 5457 int err; 5458 5459 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5460 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5461 return -EIO; 5462 5463 /* The hash part is what left of the kvd without the 5464 * linear part. It is split to the single size and 5465 * double size by the parts ratio from the profile. 5466 * Both sizes must be a multiplications of the 5467 * granularity from the profile. In case the user 5468 * provided the sizes they are obtained via devlink. 5469 */ 5470 err = devlink_resource_size_get(devlink, 5471 MLXSW_SP_RESOURCE_KVD_LINEAR, 5472 p_linear_size); 5473 if (err) 5474 *p_linear_size = profile->kvd_linear_size; 5475 5476 err = devlink_resource_size_get(devlink, 5477 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5478 p_double_size); 5479 if (err) { 5480 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5481 *p_linear_size; 5482 double_size *= profile->kvd_hash_double_parts; 5483 double_size /= profile->kvd_hash_double_parts + 5484 profile->kvd_hash_single_parts; 5485 *p_double_size = rounddown(double_size, 5486 MLXSW_SP_KVD_GRANULARITY); 5487 } 5488 5489 err = devlink_resource_size_get(devlink, 5490 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5491 p_single_size); 5492 if (err) 5493 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5494 *p_double_size - *p_linear_size; 5495 5496 /* Check results are legal. */ 5497 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5498 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5499 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5500 return -EIO; 5501 5502 return 0; 5503 } 5504 5505 static int 5506 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5507 union devlink_param_value val, 5508 struct netlink_ext_ack *extack) 5509 { 5510 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5511 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5512 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5513 return -EINVAL; 5514 } 5515 5516 return 0; 5517 } 5518 5519 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5520 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5521 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5522 NULL, NULL, 5523 mlxsw_sp_devlink_param_fw_load_policy_validate), 5524 }; 5525 5526 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5527 { 5528 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5529 union devlink_param_value value; 5530 int err; 5531 5532 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5533 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5534 if (err) 5535 return err; 5536 5537 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5538 devlink_param_driverinit_value_set(devlink, 5539 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5540 value); 5541 return 0; 5542 } 5543 5544 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5545 { 5546 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5547 mlxsw_sp_devlink_params, 5548 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5549 } 5550 5551 static int 5552 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5553 struct devlink_param_gset_ctx *ctx) 5554 { 5555 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5556 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5557 5558 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5559 return 0; 5560 } 5561 5562 static int 5563 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5564 struct devlink_param_gset_ctx *ctx) 5565 { 5566 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5567 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5568 5569 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5570 } 5571 5572 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5573 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5574 "acl_region_rehash_interval", 5575 DEVLINK_PARAM_TYPE_U32, 5576 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5577 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5578 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5579 NULL), 5580 }; 5581 5582 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5583 { 5584 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5585 union devlink_param_value value; 5586 int err; 5587 5588 err = mlxsw_sp_params_register(mlxsw_core); 5589 if (err) 5590 return err; 5591 5592 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5593 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5594 if (err) 5595 goto err_devlink_params_register; 5596 5597 value.vu32 = 0; 5598 devlink_param_driverinit_value_set(devlink, 5599 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5600 value); 5601 return 0; 5602 5603 err_devlink_params_register: 5604 mlxsw_sp_params_unregister(mlxsw_core); 5605 return err; 5606 } 5607 5608 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5609 { 5610 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5611 mlxsw_sp2_devlink_params, 5612 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5613 mlxsw_sp_params_unregister(mlxsw_core); 5614 } 5615 5616 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5617 struct sk_buff *skb, u8 local_port) 5618 { 5619 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5620 5621 skb_pull(skb, MLXSW_TXHDR_LEN); 5622 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5623 } 5624 5625 static struct mlxsw_driver mlxsw_sp1_driver = { 5626 .kind = mlxsw_sp1_driver_name, 5627 .priv_size = sizeof(struct mlxsw_sp), 5628 .init = mlxsw_sp1_init, 5629 .fini = mlxsw_sp_fini, 5630 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5631 .port_split = mlxsw_sp_port_split, 5632 .port_unsplit = mlxsw_sp_port_unsplit, 5633 .sb_pool_get = mlxsw_sp_sb_pool_get, 5634 .sb_pool_set = mlxsw_sp_sb_pool_set, 5635 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5636 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5637 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5638 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5639 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5640 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5641 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5642 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5643 .flash_update = mlxsw_sp_flash_update, 5644 .trap_init = mlxsw_sp_trap_init, 5645 .trap_fini = mlxsw_sp_trap_fini, 5646 .trap_action_set = mlxsw_sp_trap_action_set, 5647 .trap_group_init = mlxsw_sp_trap_group_init, 5648 .txhdr_construct = mlxsw_sp_txhdr_construct, 5649 .resources_register = mlxsw_sp1_resources_register, 5650 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5651 .params_register = mlxsw_sp_params_register, 5652 .params_unregister = mlxsw_sp_params_unregister, 5653 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5654 .txhdr_len = MLXSW_TXHDR_LEN, 5655 .profile = &mlxsw_sp1_config_profile, 5656 .res_query_enabled = true, 5657 }; 5658 5659 static struct mlxsw_driver mlxsw_sp2_driver = { 5660 .kind = mlxsw_sp2_driver_name, 5661 .priv_size = sizeof(struct mlxsw_sp), 5662 .init = mlxsw_sp2_init, 5663 .fini = mlxsw_sp_fini, 5664 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5665 .port_split = mlxsw_sp_port_split, 5666 .port_unsplit = mlxsw_sp_port_unsplit, 5667 .sb_pool_get = mlxsw_sp_sb_pool_get, 5668 .sb_pool_set = mlxsw_sp_sb_pool_set, 5669 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5670 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5671 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5672 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5673 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5674 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5675 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5676 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5677 .flash_update = mlxsw_sp_flash_update, 5678 .trap_init = mlxsw_sp_trap_init, 5679 .trap_fini = mlxsw_sp_trap_fini, 5680 .trap_action_set = mlxsw_sp_trap_action_set, 5681 .trap_group_init = mlxsw_sp_trap_group_init, 5682 .txhdr_construct = mlxsw_sp_txhdr_construct, 5683 .resources_register = mlxsw_sp2_resources_register, 5684 .params_register = mlxsw_sp2_params_register, 5685 .params_unregister = mlxsw_sp2_params_unregister, 5686 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5687 .txhdr_len = MLXSW_TXHDR_LEN, 5688 .profile = &mlxsw_sp2_config_profile, 5689 .res_query_enabled = true, 5690 }; 5691 5692 static struct mlxsw_driver mlxsw_sp3_driver = { 5693 .kind = mlxsw_sp3_driver_name, 5694 .priv_size = sizeof(struct mlxsw_sp), 5695 .init = mlxsw_sp3_init, 5696 .fini = mlxsw_sp_fini, 5697 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5698 .port_split = mlxsw_sp_port_split, 5699 .port_unsplit = mlxsw_sp_port_unsplit, 5700 .sb_pool_get = mlxsw_sp_sb_pool_get, 5701 .sb_pool_set = mlxsw_sp_sb_pool_set, 5702 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5703 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5704 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5705 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5706 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5707 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5708 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5709 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5710 .flash_update = mlxsw_sp_flash_update, 5711 .trap_init = mlxsw_sp_trap_init, 5712 .trap_fini = mlxsw_sp_trap_fini, 5713 .trap_action_set = mlxsw_sp_trap_action_set, 5714 .trap_group_init = mlxsw_sp_trap_group_init, 5715 .txhdr_construct = mlxsw_sp_txhdr_construct, 5716 .resources_register = mlxsw_sp2_resources_register, 5717 .params_register = mlxsw_sp2_params_register, 5718 .params_unregister = mlxsw_sp2_params_unregister, 5719 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5720 .txhdr_len = MLXSW_TXHDR_LEN, 5721 .profile = &mlxsw_sp2_config_profile, 5722 .res_query_enabled = true, 5723 }; 5724 5725 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5726 { 5727 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5728 } 5729 5730 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5731 { 5732 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5733 int ret = 0; 5734 5735 if (mlxsw_sp_port_dev_check(lower_dev)) { 5736 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5737 ret = 1; 5738 } 5739 5740 return ret; 5741 } 5742 5743 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5744 { 5745 struct mlxsw_sp_port *mlxsw_sp_port; 5746 5747 if (mlxsw_sp_port_dev_check(dev)) 5748 return netdev_priv(dev); 5749 5750 mlxsw_sp_port = NULL; 5751 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5752 5753 return mlxsw_sp_port; 5754 } 5755 5756 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5757 { 5758 struct mlxsw_sp_port *mlxsw_sp_port; 5759 5760 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5761 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5762 } 5763 5764 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5765 { 5766 struct mlxsw_sp_port *mlxsw_sp_port; 5767 5768 if (mlxsw_sp_port_dev_check(dev)) 5769 return netdev_priv(dev); 5770 5771 mlxsw_sp_port = NULL; 5772 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5773 &mlxsw_sp_port); 5774 5775 return mlxsw_sp_port; 5776 } 5777 5778 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5779 { 5780 struct mlxsw_sp_port *mlxsw_sp_port; 5781 5782 rcu_read_lock(); 5783 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5784 if (mlxsw_sp_port) 5785 dev_hold(mlxsw_sp_port->dev); 5786 rcu_read_unlock(); 5787 return mlxsw_sp_port; 5788 } 5789 5790 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5791 { 5792 dev_put(mlxsw_sp_port->dev); 5793 } 5794 5795 static void 5796 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5797 struct net_device *lag_dev) 5798 { 5799 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5800 struct net_device *upper_dev; 5801 struct list_head *iter; 5802 5803 if (netif_is_bridge_port(lag_dev)) 5804 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5805 5806 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5807 if (!netif_is_bridge_port(upper_dev)) 5808 continue; 5809 br_dev = netdev_master_upper_dev_get(upper_dev); 5810 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5811 } 5812 } 5813 5814 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5815 { 5816 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5817 5818 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5819 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5820 } 5821 5822 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5823 { 5824 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5825 5826 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5828 } 5829 5830 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5831 u16 lag_id, u8 port_index) 5832 { 5833 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5834 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5835 5836 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5837 lag_id, port_index); 5838 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5839 } 5840 5841 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5842 u16 lag_id) 5843 { 5844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5845 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5846 5847 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5848 lag_id); 5849 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5850 } 5851 5852 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5853 u16 lag_id) 5854 { 5855 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5856 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5857 5858 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5859 lag_id); 5860 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5861 } 5862 5863 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5864 u16 lag_id) 5865 { 5866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5867 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5868 5869 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5870 lag_id); 5871 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5872 } 5873 5874 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5875 struct net_device *lag_dev, 5876 u16 *p_lag_id) 5877 { 5878 struct mlxsw_sp_upper *lag; 5879 int free_lag_id = -1; 5880 u64 max_lag; 5881 int i; 5882 5883 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5884 for (i = 0; i < max_lag; i++) { 5885 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5886 if (lag->ref_count) { 5887 if (lag->dev == lag_dev) { 5888 *p_lag_id = i; 5889 return 0; 5890 } 5891 } else if (free_lag_id < 0) { 5892 free_lag_id = i; 5893 } 5894 } 5895 if (free_lag_id < 0) 5896 return -EBUSY; 5897 *p_lag_id = free_lag_id; 5898 return 0; 5899 } 5900 5901 static bool 5902 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5903 struct net_device *lag_dev, 5904 struct netdev_lag_upper_info *lag_upper_info, 5905 struct netlink_ext_ack *extack) 5906 { 5907 u16 lag_id; 5908 5909 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5910 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5911 return false; 5912 } 5913 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5914 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5915 return false; 5916 } 5917 return true; 5918 } 5919 5920 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5921 u16 lag_id, u8 *p_port_index) 5922 { 5923 u64 max_lag_members; 5924 int i; 5925 5926 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5927 MAX_LAG_MEMBERS); 5928 for (i = 0; i < max_lag_members; i++) { 5929 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5930 *p_port_index = i; 5931 return 0; 5932 } 5933 } 5934 return -EBUSY; 5935 } 5936 5937 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5938 struct net_device *lag_dev) 5939 { 5940 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5941 struct mlxsw_sp_upper *lag; 5942 u16 lag_id; 5943 u8 port_index; 5944 int err; 5945 5946 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5947 if (err) 5948 return err; 5949 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5950 if (!lag->ref_count) { 5951 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5952 if (err) 5953 return err; 5954 lag->dev = lag_dev; 5955 } 5956 5957 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5958 if (err) 5959 return err; 5960 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5961 if (err) 5962 goto err_col_port_add; 5963 5964 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5965 mlxsw_sp_port->local_port); 5966 mlxsw_sp_port->lag_id = lag_id; 5967 mlxsw_sp_port->lagged = 1; 5968 lag->ref_count++; 5969 5970 /* Port is no longer usable as a router interface */ 5971 if (mlxsw_sp_port->default_vlan->fid) 5972 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5973 5974 return 0; 5975 5976 err_col_port_add: 5977 if (!lag->ref_count) 5978 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5979 return err; 5980 } 5981 5982 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5983 struct net_device *lag_dev) 5984 { 5985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5986 u16 lag_id = mlxsw_sp_port->lag_id; 5987 struct mlxsw_sp_upper *lag; 5988 5989 if (!mlxsw_sp_port->lagged) 5990 return; 5991 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5992 WARN_ON(lag->ref_count == 0); 5993 5994 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5995 5996 /* Any VLANs configured on the port are no longer valid */ 5997 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5998 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5999 /* Make the LAG and its directly linked uppers leave bridges they 6000 * are memeber in 6001 */ 6002 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 6003 6004 if (lag->ref_count == 1) 6005 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6006 6007 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 6008 mlxsw_sp_port->local_port); 6009 mlxsw_sp_port->lagged = 0; 6010 lag->ref_count--; 6011 6012 /* Make sure untagged frames are allowed to ingress */ 6013 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 6014 } 6015 6016 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 6017 u16 lag_id) 6018 { 6019 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6020 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6021 6022 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 6023 mlxsw_sp_port->local_port); 6024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6025 } 6026 6027 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 6028 u16 lag_id) 6029 { 6030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6031 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6032 6033 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 6034 mlxsw_sp_port->local_port); 6035 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6036 } 6037 6038 static int 6039 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 6040 { 6041 int err; 6042 6043 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 6044 mlxsw_sp_port->lag_id); 6045 if (err) 6046 return err; 6047 6048 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6049 if (err) 6050 goto err_dist_port_add; 6051 6052 return 0; 6053 6054 err_dist_port_add: 6055 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6056 return err; 6057 } 6058 6059 static int 6060 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6061 { 6062 int err; 6063 6064 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6065 mlxsw_sp_port->lag_id); 6066 if (err) 6067 return err; 6068 6069 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6070 mlxsw_sp_port->lag_id); 6071 if (err) 6072 goto err_col_port_disable; 6073 6074 return 0; 6075 6076 err_col_port_disable: 6077 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6078 return err; 6079 } 6080 6081 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6082 struct netdev_lag_lower_state_info *info) 6083 { 6084 if (info->tx_enabled) 6085 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6086 else 6087 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6088 } 6089 6090 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6091 bool enable) 6092 { 6093 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6094 enum mlxsw_reg_spms_state spms_state; 6095 char *spms_pl; 6096 u16 vid; 6097 int err; 6098 6099 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6100 MLXSW_REG_SPMS_STATE_DISCARDING; 6101 6102 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6103 if (!spms_pl) 6104 return -ENOMEM; 6105 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6106 6107 for (vid = 0; vid < VLAN_N_VID; vid++) 6108 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6109 6110 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6111 kfree(spms_pl); 6112 return err; 6113 } 6114 6115 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6116 { 6117 u16 vid = 1; 6118 int err; 6119 6120 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6121 if (err) 6122 return err; 6123 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6124 if (err) 6125 goto err_port_stp_set; 6126 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6127 true, false); 6128 if (err) 6129 goto err_port_vlan_set; 6130 6131 for (; vid <= VLAN_N_VID - 1; vid++) { 6132 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6133 vid, false); 6134 if (err) 6135 goto err_vid_learning_set; 6136 } 6137 6138 return 0; 6139 6140 err_vid_learning_set: 6141 for (vid--; vid >= 1; vid--) 6142 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6143 err_port_vlan_set: 6144 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6145 err_port_stp_set: 6146 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6147 return err; 6148 } 6149 6150 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6151 { 6152 u16 vid; 6153 6154 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6155 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6156 vid, true); 6157 6158 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6159 false, false); 6160 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6161 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6162 } 6163 6164 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6165 { 6166 unsigned int num_vxlans = 0; 6167 struct net_device *dev; 6168 struct list_head *iter; 6169 6170 netdev_for_each_lower_dev(br_dev, dev, iter) { 6171 if (netif_is_vxlan(dev)) 6172 num_vxlans++; 6173 } 6174 6175 return num_vxlans > 1; 6176 } 6177 6178 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6179 { 6180 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6181 struct net_device *dev; 6182 struct list_head *iter; 6183 6184 netdev_for_each_lower_dev(br_dev, dev, iter) { 6185 u16 pvid; 6186 int err; 6187 6188 if (!netif_is_vxlan(dev)) 6189 continue; 6190 6191 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6192 if (err || !pvid) 6193 continue; 6194 6195 if (test_and_set_bit(pvid, vlans)) 6196 return false; 6197 } 6198 6199 return true; 6200 } 6201 6202 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6203 struct netlink_ext_ack *extack) 6204 { 6205 if (br_multicast_enabled(br_dev)) { 6206 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6207 return false; 6208 } 6209 6210 if (!br_vlan_enabled(br_dev) && 6211 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6212 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6213 return false; 6214 } 6215 6216 if (br_vlan_enabled(br_dev) && 6217 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6218 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6219 return false; 6220 } 6221 6222 return true; 6223 } 6224 6225 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6226 struct net_device *dev, 6227 unsigned long event, void *ptr) 6228 { 6229 struct netdev_notifier_changeupper_info *info; 6230 struct mlxsw_sp_port *mlxsw_sp_port; 6231 struct netlink_ext_ack *extack; 6232 struct net_device *upper_dev; 6233 struct mlxsw_sp *mlxsw_sp; 6234 int err = 0; 6235 6236 mlxsw_sp_port = netdev_priv(dev); 6237 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6238 info = ptr; 6239 extack = netdev_notifier_info_to_extack(&info->info); 6240 6241 switch (event) { 6242 case NETDEV_PRECHANGEUPPER: 6243 upper_dev = info->upper_dev; 6244 if (!is_vlan_dev(upper_dev) && 6245 !netif_is_lag_master(upper_dev) && 6246 !netif_is_bridge_master(upper_dev) && 6247 !netif_is_ovs_master(upper_dev) && 6248 !netif_is_macvlan(upper_dev)) { 6249 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6250 return -EINVAL; 6251 } 6252 if (!info->linking) 6253 break; 6254 if (netif_is_bridge_master(upper_dev) && 6255 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6256 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6257 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6258 return -EOPNOTSUPP; 6259 if (netdev_has_any_upper_dev(upper_dev) && 6260 (!netif_is_bridge_master(upper_dev) || 6261 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6262 upper_dev))) { 6263 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6264 return -EINVAL; 6265 } 6266 if (netif_is_lag_master(upper_dev) && 6267 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6268 info->upper_info, extack)) 6269 return -EINVAL; 6270 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6271 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6272 return -EINVAL; 6273 } 6274 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6275 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6276 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6277 return -EINVAL; 6278 } 6279 if (netif_is_macvlan(upper_dev) && 6280 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 6281 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6282 return -EOPNOTSUPP; 6283 } 6284 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6285 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6286 return -EINVAL; 6287 } 6288 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6289 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6290 return -EINVAL; 6291 } 6292 break; 6293 case NETDEV_CHANGEUPPER: 6294 upper_dev = info->upper_dev; 6295 if (netif_is_bridge_master(upper_dev)) { 6296 if (info->linking) 6297 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6298 lower_dev, 6299 upper_dev, 6300 extack); 6301 else 6302 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6303 lower_dev, 6304 upper_dev); 6305 } else if (netif_is_lag_master(upper_dev)) { 6306 if (info->linking) { 6307 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6308 upper_dev); 6309 } else { 6310 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6311 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6312 upper_dev); 6313 } 6314 } else if (netif_is_ovs_master(upper_dev)) { 6315 if (info->linking) 6316 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6317 else 6318 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6319 } else if (netif_is_macvlan(upper_dev)) { 6320 if (!info->linking) 6321 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6322 } else if (is_vlan_dev(upper_dev)) { 6323 struct net_device *br_dev; 6324 6325 if (!netif_is_bridge_port(upper_dev)) 6326 break; 6327 if (info->linking) 6328 break; 6329 br_dev = netdev_master_upper_dev_get(upper_dev); 6330 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6331 br_dev); 6332 } 6333 break; 6334 } 6335 6336 return err; 6337 } 6338 6339 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6340 unsigned long event, void *ptr) 6341 { 6342 struct netdev_notifier_changelowerstate_info *info; 6343 struct mlxsw_sp_port *mlxsw_sp_port; 6344 int err; 6345 6346 mlxsw_sp_port = netdev_priv(dev); 6347 info = ptr; 6348 6349 switch (event) { 6350 case NETDEV_CHANGELOWERSTATE: 6351 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6352 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6353 info->lower_state_info); 6354 if (err) 6355 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6356 } 6357 break; 6358 } 6359 6360 return 0; 6361 } 6362 6363 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6364 struct net_device *port_dev, 6365 unsigned long event, void *ptr) 6366 { 6367 switch (event) { 6368 case NETDEV_PRECHANGEUPPER: 6369 case NETDEV_CHANGEUPPER: 6370 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6371 event, ptr); 6372 case NETDEV_CHANGELOWERSTATE: 6373 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6374 ptr); 6375 } 6376 6377 return 0; 6378 } 6379 6380 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6381 unsigned long event, void *ptr) 6382 { 6383 struct net_device *dev; 6384 struct list_head *iter; 6385 int ret; 6386 6387 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6388 if (mlxsw_sp_port_dev_check(dev)) { 6389 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6390 ptr); 6391 if (ret) 6392 return ret; 6393 } 6394 } 6395 6396 return 0; 6397 } 6398 6399 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6400 struct net_device *dev, 6401 unsigned long event, void *ptr, 6402 u16 vid) 6403 { 6404 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6405 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6406 struct netdev_notifier_changeupper_info *info = ptr; 6407 struct netlink_ext_ack *extack; 6408 struct net_device *upper_dev; 6409 int err = 0; 6410 6411 extack = netdev_notifier_info_to_extack(&info->info); 6412 6413 switch (event) { 6414 case NETDEV_PRECHANGEUPPER: 6415 upper_dev = info->upper_dev; 6416 if (!netif_is_bridge_master(upper_dev) && 6417 !netif_is_macvlan(upper_dev)) { 6418 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6419 return -EINVAL; 6420 } 6421 if (!info->linking) 6422 break; 6423 if (netif_is_bridge_master(upper_dev) && 6424 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6425 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6426 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6427 return -EOPNOTSUPP; 6428 if (netdev_has_any_upper_dev(upper_dev) && 6429 (!netif_is_bridge_master(upper_dev) || 6430 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6431 upper_dev))) { 6432 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6433 return -EINVAL; 6434 } 6435 if (netif_is_macvlan(upper_dev) && 6436 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6437 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6438 return -EOPNOTSUPP; 6439 } 6440 break; 6441 case NETDEV_CHANGEUPPER: 6442 upper_dev = info->upper_dev; 6443 if (netif_is_bridge_master(upper_dev)) { 6444 if (info->linking) 6445 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6446 vlan_dev, 6447 upper_dev, 6448 extack); 6449 else 6450 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6451 vlan_dev, 6452 upper_dev); 6453 } else if (netif_is_macvlan(upper_dev)) { 6454 if (!info->linking) 6455 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6456 } else { 6457 err = -EINVAL; 6458 WARN_ON(1); 6459 } 6460 break; 6461 } 6462 6463 return err; 6464 } 6465 6466 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6467 struct net_device *lag_dev, 6468 unsigned long event, 6469 void *ptr, u16 vid) 6470 { 6471 struct net_device *dev; 6472 struct list_head *iter; 6473 int ret; 6474 6475 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6476 if (mlxsw_sp_port_dev_check(dev)) { 6477 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6478 event, ptr, 6479 vid); 6480 if (ret) 6481 return ret; 6482 } 6483 } 6484 6485 return 0; 6486 } 6487 6488 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6489 struct net_device *br_dev, 6490 unsigned long event, void *ptr, 6491 u16 vid) 6492 { 6493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6494 struct netdev_notifier_changeupper_info *info = ptr; 6495 struct netlink_ext_ack *extack; 6496 struct net_device *upper_dev; 6497 6498 if (!mlxsw_sp) 6499 return 0; 6500 6501 extack = netdev_notifier_info_to_extack(&info->info); 6502 6503 switch (event) { 6504 case NETDEV_PRECHANGEUPPER: 6505 upper_dev = info->upper_dev; 6506 if (!netif_is_macvlan(upper_dev)) { 6507 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6508 return -EOPNOTSUPP; 6509 } 6510 if (!info->linking) 6511 break; 6512 if (netif_is_macvlan(upper_dev) && 6513 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6514 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6515 return -EOPNOTSUPP; 6516 } 6517 break; 6518 case NETDEV_CHANGEUPPER: 6519 upper_dev = info->upper_dev; 6520 if (info->linking) 6521 break; 6522 if (netif_is_macvlan(upper_dev)) 6523 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6524 break; 6525 } 6526 6527 return 0; 6528 } 6529 6530 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6531 unsigned long event, void *ptr) 6532 { 6533 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6534 u16 vid = vlan_dev_vlan_id(vlan_dev); 6535 6536 if (mlxsw_sp_port_dev_check(real_dev)) 6537 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6538 event, ptr, vid); 6539 else if (netif_is_lag_master(real_dev)) 6540 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6541 real_dev, event, 6542 ptr, vid); 6543 else if (netif_is_bridge_master(real_dev)) 6544 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6545 event, ptr, vid); 6546 6547 return 0; 6548 } 6549 6550 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6551 unsigned long event, void *ptr) 6552 { 6553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6554 struct netdev_notifier_changeupper_info *info = ptr; 6555 struct netlink_ext_ack *extack; 6556 struct net_device *upper_dev; 6557 6558 if (!mlxsw_sp) 6559 return 0; 6560 6561 extack = netdev_notifier_info_to_extack(&info->info); 6562 6563 switch (event) { 6564 case NETDEV_PRECHANGEUPPER: 6565 upper_dev = info->upper_dev; 6566 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6567 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6568 return -EOPNOTSUPP; 6569 } 6570 if (!info->linking) 6571 break; 6572 if (netif_is_macvlan(upper_dev) && 6573 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 6574 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6575 return -EOPNOTSUPP; 6576 } 6577 break; 6578 case NETDEV_CHANGEUPPER: 6579 upper_dev = info->upper_dev; 6580 if (info->linking) 6581 break; 6582 if (is_vlan_dev(upper_dev)) 6583 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6584 if (netif_is_macvlan(upper_dev)) 6585 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6586 break; 6587 } 6588 6589 return 0; 6590 } 6591 6592 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6593 unsigned long event, void *ptr) 6594 { 6595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6596 struct netdev_notifier_changeupper_info *info = ptr; 6597 struct netlink_ext_ack *extack; 6598 6599 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6600 return 0; 6601 6602 extack = netdev_notifier_info_to_extack(&info->info); 6603 6604 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6605 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6606 6607 return -EOPNOTSUPP; 6608 } 6609 6610 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6611 { 6612 struct netdev_notifier_changeupper_info *info = ptr; 6613 6614 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6615 return false; 6616 return netif_is_l3_master(info->upper_dev); 6617 } 6618 6619 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6620 struct net_device *dev, 6621 unsigned long event, void *ptr) 6622 { 6623 struct netdev_notifier_changeupper_info *cu_info; 6624 struct netdev_notifier_info *info = ptr; 6625 struct netlink_ext_ack *extack; 6626 struct net_device *upper_dev; 6627 6628 extack = netdev_notifier_info_to_extack(info); 6629 6630 switch (event) { 6631 case NETDEV_CHANGEUPPER: 6632 cu_info = container_of(info, 6633 struct netdev_notifier_changeupper_info, 6634 info); 6635 upper_dev = cu_info->upper_dev; 6636 if (!netif_is_bridge_master(upper_dev)) 6637 return 0; 6638 if (!mlxsw_sp_lower_get(upper_dev)) 6639 return 0; 6640 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6641 return -EOPNOTSUPP; 6642 if (cu_info->linking) { 6643 if (!netif_running(dev)) 6644 return 0; 6645 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6646 * device needs to be mapped to a VLAN, but at this 6647 * point no VLANs are configured on the VxLAN device 6648 */ 6649 if (br_vlan_enabled(upper_dev)) 6650 return 0; 6651 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6652 dev, 0, extack); 6653 } else { 6654 /* VLANs were already flushed, which triggered the 6655 * necessary cleanup 6656 */ 6657 if (br_vlan_enabled(upper_dev)) 6658 return 0; 6659 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6660 } 6661 break; 6662 case NETDEV_PRE_UP: 6663 upper_dev = netdev_master_upper_dev_get(dev); 6664 if (!upper_dev) 6665 return 0; 6666 if (!netif_is_bridge_master(upper_dev)) 6667 return 0; 6668 if (!mlxsw_sp_lower_get(upper_dev)) 6669 return 0; 6670 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6671 extack); 6672 case NETDEV_DOWN: 6673 upper_dev = netdev_master_upper_dev_get(dev); 6674 if (!upper_dev) 6675 return 0; 6676 if (!netif_is_bridge_master(upper_dev)) 6677 return 0; 6678 if (!mlxsw_sp_lower_get(upper_dev)) 6679 return 0; 6680 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6681 break; 6682 } 6683 6684 return 0; 6685 } 6686 6687 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6688 unsigned long event, void *ptr) 6689 { 6690 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6691 struct mlxsw_sp_span_entry *span_entry; 6692 struct mlxsw_sp *mlxsw_sp; 6693 int err = 0; 6694 6695 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6696 if (event == NETDEV_UNREGISTER) { 6697 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6698 if (span_entry) 6699 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6700 } 6701 mlxsw_sp_span_respin(mlxsw_sp); 6702 6703 if (netif_is_vxlan(dev)) 6704 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6705 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6706 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6707 event, ptr); 6708 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6709 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6710 event, ptr); 6711 else if (event == NETDEV_PRE_CHANGEADDR || 6712 event == NETDEV_CHANGEADDR || 6713 event == NETDEV_CHANGEMTU) 6714 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6715 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6716 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6717 else if (mlxsw_sp_port_dev_check(dev)) 6718 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6719 else if (netif_is_lag_master(dev)) 6720 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6721 else if (is_vlan_dev(dev)) 6722 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6723 else if (netif_is_bridge_master(dev)) 6724 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6725 else if (netif_is_macvlan(dev)) 6726 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6727 6728 return notifier_from_errno(err); 6729 } 6730 6731 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6732 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6733 }; 6734 6735 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6736 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6737 }; 6738 6739 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6740 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6741 {0, }, 6742 }; 6743 6744 static struct pci_driver mlxsw_sp1_pci_driver = { 6745 .name = mlxsw_sp1_driver_name, 6746 .id_table = mlxsw_sp1_pci_id_table, 6747 }; 6748 6749 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6750 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6751 {0, }, 6752 }; 6753 6754 static struct pci_driver mlxsw_sp2_pci_driver = { 6755 .name = mlxsw_sp2_driver_name, 6756 .id_table = mlxsw_sp2_pci_id_table, 6757 }; 6758 6759 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6760 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6761 {0, }, 6762 }; 6763 6764 static struct pci_driver mlxsw_sp3_pci_driver = { 6765 .name = mlxsw_sp3_driver_name, 6766 .id_table = mlxsw_sp3_pci_id_table, 6767 }; 6768 6769 static int __init mlxsw_sp_module_init(void) 6770 { 6771 int err; 6772 6773 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6774 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6775 6776 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6777 if (err) 6778 goto err_sp1_core_driver_register; 6779 6780 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6781 if (err) 6782 goto err_sp2_core_driver_register; 6783 6784 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6785 if (err) 6786 goto err_sp3_core_driver_register; 6787 6788 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6789 if (err) 6790 goto err_sp1_pci_driver_register; 6791 6792 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6793 if (err) 6794 goto err_sp2_pci_driver_register; 6795 6796 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6797 if (err) 6798 goto err_sp3_pci_driver_register; 6799 6800 return 0; 6801 6802 err_sp3_pci_driver_register: 6803 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6804 err_sp2_pci_driver_register: 6805 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6806 err_sp1_pci_driver_register: 6807 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6808 err_sp3_core_driver_register: 6809 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6810 err_sp2_core_driver_register: 6811 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6812 err_sp1_core_driver_register: 6813 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6814 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6815 return err; 6816 } 6817 6818 static void __exit mlxsw_sp_module_exit(void) 6819 { 6820 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6821 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6822 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6823 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6824 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6825 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6826 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6827 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6828 } 6829 6830 module_init(mlxsw_sp_module_init); 6831 module_exit(mlxsw_sp_module_exit); 6832 6833 MODULE_LICENSE("Dual BSD/GPL"); 6834 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6835 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6836 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6837 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6838 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6839 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6840 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6841