1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2000 50 #define MLXSW_SP1_FWREV_SUBMINOR 2714 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2000 67 #define MLXSW_SP2_FWREV_SUBMINOR 2714 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 81 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 82 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 83 static const char mlxsw_sp_driver_version[] = "1.0"; 84 85 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 86 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 87 }; 88 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 89 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 90 }; 91 92 /* tx_hdr_version 93 * Tx header version. 94 * Must be set to 1. 95 */ 96 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 97 98 /* tx_hdr_ctl 99 * Packet control type. 100 * 0 - Ethernet control (e.g. EMADs, LACP) 101 * 1 - Ethernet data 102 */ 103 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 104 105 /* tx_hdr_proto 106 * Packet protocol type. Must be set to 1 (Ethernet). 107 */ 108 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 109 110 /* tx_hdr_rx_is_router 111 * Packet is sent from the router. Valid for data packets only. 112 */ 113 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 114 115 /* tx_hdr_fid_valid 116 * Indicates if the 'fid' field is valid and should be used for 117 * forwarding lookup. Valid for data packets only. 118 */ 119 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 120 121 /* tx_hdr_swid 122 * Switch partition ID. Must be set to 0. 123 */ 124 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 125 126 /* tx_hdr_control_tclass 127 * Indicates if the packet should use the control TClass and not one 128 * of the data TClasses. 129 */ 130 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 131 132 /* tx_hdr_etclass 133 * Egress TClass to be used on the egress device on the egress port. 134 */ 135 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 136 137 /* tx_hdr_port_mid 138 * Destination local port for unicast packets. 139 * Destination multicast ID for multicast packets. 140 * 141 * Control packets are directed to a specific egress port, while data 142 * packets are transmitted through the CPU port (0) into the switch partition, 143 * where forwarding rules are applied. 144 */ 145 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 146 147 /* tx_hdr_fid 148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 149 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 150 * Valid for data packets only. 151 */ 152 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 153 154 /* tx_hdr_type 155 * 0 - Data packets 156 * 6 - Control packets 157 */ 158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 159 160 struct mlxsw_sp_mlxfw_dev { 161 struct mlxfw_dev mlxfw_dev; 162 struct mlxsw_sp *mlxsw_sp; 163 }; 164 165 struct mlxsw_sp_ptp_ops { 166 struct mlxsw_sp_ptp_clock * 167 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 168 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 169 170 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 171 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 172 173 /* Notify a driver that a packet that might be PTP was received. Driver 174 * is responsible for freeing the passed-in SKB. 175 */ 176 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 177 u8 local_port); 178 179 /* Notify a driver that a timestamped packet was transmitted. Driver 180 * is responsible for freeing the passed-in SKB. 181 */ 182 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 183 u8 local_port); 184 185 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 186 struct hwtstamp_config *config); 187 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 void (*shaper_work)(struct work_struct *work); 190 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 191 struct ethtool_ts_info *info); 192 int (*get_stats_count)(void); 193 void (*get_stats_strings)(u8 **p); 194 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 195 u64 *data, int data_index); 196 }; 197 198 struct mlxsw_sp_span_ops { 199 u32 (*buffsize_get)(int mtu, u32 speed); 200 }; 201 202 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 203 u16 component_index, u32 *p_max_size, 204 u8 *p_align_bits, u16 *p_max_write_size) 205 { 206 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 207 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 208 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 209 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 210 int err; 211 212 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 213 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 214 if (err) 215 return err; 216 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 217 p_max_write_size); 218 219 *p_align_bits = max_t(u8, *p_align_bits, 2); 220 *p_max_write_size = min_t(u16, *p_max_write_size, 221 MLXSW_REG_MCDA_MAX_DATA_LEN); 222 return 0; 223 } 224 225 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 226 { 227 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 228 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 230 char mcc_pl[MLXSW_REG_MCC_LEN]; 231 u8 control_state; 232 int err; 233 234 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 235 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 236 if (err) 237 return err; 238 239 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 240 if (control_state != MLXFW_FSM_STATE_IDLE) 241 return -EBUSY; 242 243 mlxsw_reg_mcc_pack(mcc_pl, 244 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 245 0, *fwhandle, 0); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 247 } 248 249 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 250 u32 fwhandle, u16 component_index, 251 u32 component_size) 252 { 253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 256 char mcc_pl[MLXSW_REG_MCC_LEN]; 257 258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 259 component_index, fwhandle, component_size); 260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 261 } 262 263 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 264 u32 fwhandle, u8 *data, u16 size, 265 u32 offset) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcda_pl[MLXSW_REG_MCDA_LEN]; 271 272 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 273 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 274 } 275 276 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 277 u32 fwhandle, u16 component_index) 278 { 279 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 280 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 282 char mcc_pl[MLXSW_REG_MCC_LEN]; 283 284 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 285 component_index, fwhandle, 0); 286 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 287 } 288 289 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 290 { 291 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 292 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 294 char mcc_pl[MLXSW_REG_MCC_LEN]; 295 296 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 297 fwhandle, 0); 298 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 299 } 300 301 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 302 enum mlxfw_fsm_state *fsm_state, 303 enum mlxfw_fsm_state_err *fsm_state_err) 304 { 305 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 306 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 308 char mcc_pl[MLXSW_REG_MCC_LEN]; 309 u8 control_state; 310 u8 error_code; 311 int err; 312 313 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 314 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 315 if (err) 316 return err; 317 318 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 319 *fsm_state = control_state; 320 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 321 MLXFW_FSM_STATE_ERR_MAX); 322 return 0; 323 } 324 325 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 326 { 327 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 328 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 330 char mcc_pl[MLXSW_REG_MCC_LEN]; 331 332 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 333 fwhandle, 0); 334 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 335 } 336 337 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 338 { 339 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 340 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 342 char mcc_pl[MLXSW_REG_MCC_LEN]; 343 344 mlxsw_reg_mcc_pack(mcc_pl, 345 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 346 fwhandle, 0); 347 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 348 } 349 350 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 351 const char *msg, const char *comp_name, 352 u32 done_bytes, u32 total_bytes) 353 { 354 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 355 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 357 358 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 359 msg, comp_name, 360 done_bytes, total_bytes); 361 } 362 363 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 364 .component_query = mlxsw_sp_component_query, 365 .fsm_lock = mlxsw_sp_fsm_lock, 366 .fsm_component_update = mlxsw_sp_fsm_component_update, 367 .fsm_block_download = mlxsw_sp_fsm_block_download, 368 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 369 .fsm_activate = mlxsw_sp_fsm_activate, 370 .fsm_query_state = mlxsw_sp_fsm_query_state, 371 .fsm_cancel = mlxsw_sp_fsm_cancel, 372 .fsm_release = mlxsw_sp_fsm_release, 373 .status_notify = mlxsw_sp_status_notify, 374 }; 375 376 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 377 const struct firmware *firmware, 378 struct netlink_ext_ack *extack) 379 { 380 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 381 .mlxfw_dev = { 382 .ops = &mlxsw_sp_mlxfw_dev_ops, 383 .psid = mlxsw_sp->bus_info->psid, 384 .psid_size = strlen(mlxsw_sp->bus_info->psid), 385 }, 386 .mlxsw_sp = mlxsw_sp 387 }; 388 int err; 389 390 mlxsw_core_fw_flash_start(mlxsw_sp->core); 391 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 392 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 393 firmware, extack); 394 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 395 mlxsw_core_fw_flash_end(mlxsw_sp->core); 396 397 return err; 398 } 399 400 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 401 { 402 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 403 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 404 const char *fw_filename = mlxsw_sp->fw_filename; 405 union devlink_param_value value; 406 const struct firmware *firmware; 407 int err; 408 409 /* Don't check if driver does not require it */ 410 if (!req_rev || !fw_filename) 411 return 0; 412 413 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 414 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 415 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 416 &value); 417 if (err) 418 return err; 419 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 420 return 0; 421 422 /* Validate driver & FW are compatible */ 423 if (rev->major != req_rev->major) { 424 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 425 rev->major, req_rev->major); 426 return -EINVAL; 427 } 428 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 429 return 0; 430 431 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 432 rev->major, rev->minor, rev->subminor, req_rev->major, 433 req_rev->minor, req_rev->subminor); 434 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 435 fw_filename); 436 437 err = request_firmware_direct(&firmware, fw_filename, 438 mlxsw_sp->bus_info->dev); 439 if (err) { 440 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 441 fw_filename); 442 return err; 443 } 444 445 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 446 release_firmware(firmware); 447 if (err) 448 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 449 450 /* On FW flash success, tell the caller FW reset is needed 451 * if current FW supports it. 452 */ 453 if (rev->minor >= req_rev->can_reset_minor) 454 return err ? err : -EAGAIN; 455 else 456 return 0; 457 } 458 459 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 460 const char *file_name, const char *component, 461 struct netlink_ext_ack *extack) 462 { 463 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 464 const struct firmware *firmware; 465 int err; 466 467 if (component) 468 return -EOPNOTSUPP; 469 470 err = request_firmware_direct(&firmware, file_name, 471 mlxsw_sp->bus_info->dev); 472 if (err) 473 return err; 474 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 475 release_firmware(firmware); 476 477 return err; 478 } 479 480 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 481 unsigned int counter_index, u64 *packets, 482 u64 *bytes) 483 { 484 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 485 int err; 486 487 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 488 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 489 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 490 if (err) 491 return err; 492 if (packets) 493 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 494 if (bytes) 495 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 496 return 0; 497 } 498 499 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 500 unsigned int counter_index) 501 { 502 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 503 504 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 505 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 506 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 507 } 508 509 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 510 unsigned int *p_counter_index) 511 { 512 int err; 513 514 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 515 p_counter_index); 516 if (err) 517 return err; 518 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 519 if (err) 520 goto err_counter_clear; 521 return 0; 522 523 err_counter_clear: 524 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 525 *p_counter_index); 526 return err; 527 } 528 529 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 530 unsigned int counter_index) 531 { 532 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 533 counter_index); 534 } 535 536 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 537 const struct mlxsw_tx_info *tx_info) 538 { 539 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 540 541 memset(txhdr, 0, MLXSW_TXHDR_LEN); 542 543 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 544 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 545 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 546 mlxsw_tx_hdr_swid_set(txhdr, 0); 547 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 548 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 549 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 550 } 551 552 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 553 { 554 switch (state) { 555 case BR_STATE_FORWARDING: 556 return MLXSW_REG_SPMS_STATE_FORWARDING; 557 case BR_STATE_LEARNING: 558 return MLXSW_REG_SPMS_STATE_LEARNING; 559 case BR_STATE_LISTENING: /* fall-through */ 560 case BR_STATE_DISABLED: /* fall-through */ 561 case BR_STATE_BLOCKING: 562 return MLXSW_REG_SPMS_STATE_DISCARDING; 563 default: 564 BUG(); 565 } 566 } 567 568 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 569 u8 state) 570 { 571 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 573 char *spms_pl; 574 int err; 575 576 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 577 if (!spms_pl) 578 return -ENOMEM; 579 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 580 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 581 582 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 583 kfree(spms_pl); 584 return err; 585 } 586 587 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 588 { 589 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 590 int err; 591 592 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 593 if (err) 594 return err; 595 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 596 return 0; 597 } 598 599 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 600 bool enable, u32 rate) 601 { 602 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 603 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 604 605 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 606 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 607 } 608 609 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 610 bool is_up) 611 { 612 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 613 char paos_pl[MLXSW_REG_PAOS_LEN]; 614 615 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 616 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 617 MLXSW_PORT_ADMIN_STATUS_DOWN); 618 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 619 } 620 621 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 622 unsigned char *addr) 623 { 624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 625 char ppad_pl[MLXSW_REG_PPAD_LEN]; 626 627 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 628 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 630 } 631 632 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 633 { 634 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 635 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 636 637 ether_addr_copy(addr, mlxsw_sp->base_mac); 638 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 639 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 640 } 641 642 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 643 { 644 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 645 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 646 int max_mtu; 647 int err; 648 649 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 650 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 651 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 652 if (err) 653 return err; 654 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 655 656 if (mtu > max_mtu) 657 return -EINVAL; 658 659 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 660 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 661 } 662 663 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 664 { 665 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 666 char pspa_pl[MLXSW_REG_PSPA_LEN]; 667 668 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 669 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 670 } 671 672 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 673 { 674 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 675 char svpe_pl[MLXSW_REG_SVPE_LEN]; 676 677 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 679 } 680 681 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 682 bool learn_enable) 683 { 684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 685 char *spvmlr_pl; 686 int err; 687 688 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 689 if (!spvmlr_pl) 690 return -ENOMEM; 691 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 692 learn_enable); 693 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 694 kfree(spvmlr_pl); 695 return err; 696 } 697 698 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 699 u16 vid) 700 { 701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 702 char spvid_pl[MLXSW_REG_SPVID_LEN]; 703 704 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 705 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 706 } 707 708 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 709 bool allow) 710 { 711 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 712 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 713 714 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 715 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 716 } 717 718 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 719 { 720 int err; 721 722 if (!vid) { 723 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 724 if (err) 725 return err; 726 } else { 727 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 728 if (err) 729 return err; 730 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 731 if (err) 732 goto err_port_allow_untagged_set; 733 } 734 735 mlxsw_sp_port->pvid = vid; 736 return 0; 737 738 err_port_allow_untagged_set: 739 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 740 return err; 741 } 742 743 static int 744 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 745 { 746 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 747 char sspr_pl[MLXSW_REG_SSPR_LEN]; 748 749 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 751 } 752 753 static int 754 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 755 struct mlxsw_sp_port_mapping *port_mapping) 756 { 757 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 758 bool separate_rxtx; 759 u8 module; 760 u8 width; 761 int err; 762 int i; 763 764 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 765 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 766 if (err) 767 return err; 768 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 769 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 770 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 771 772 if (width && !is_power_of_2(width)) { 773 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 774 local_port); 775 return -EINVAL; 776 } 777 778 for (i = 0; i < width; i++) { 779 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 780 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 781 local_port); 782 return -EINVAL; 783 } 784 if (separate_rxtx && 785 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 786 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 787 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 788 local_port); 789 return -EINVAL; 790 } 791 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 792 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 793 local_port); 794 return -EINVAL; 795 } 796 } 797 798 port_mapping->module = module; 799 port_mapping->width = width; 800 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 801 return 0; 802 } 803 804 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 805 { 806 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 808 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 809 int i; 810 811 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 812 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 813 for (i = 0; i < port_mapping->width; i++) { 814 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 815 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 816 } 817 818 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 819 } 820 821 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 822 { 823 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 824 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 825 826 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 827 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 828 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 829 } 830 831 static int mlxsw_sp_port_open(struct net_device *dev) 832 { 833 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 834 int err; 835 836 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 837 if (err) 838 return err; 839 netif_start_queue(dev); 840 return 0; 841 } 842 843 static int mlxsw_sp_port_stop(struct net_device *dev) 844 { 845 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 846 847 netif_stop_queue(dev); 848 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 849 } 850 851 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 852 struct net_device *dev) 853 { 854 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 855 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 856 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 857 const struct mlxsw_tx_info tx_info = { 858 .local_port = mlxsw_sp_port->local_port, 859 .is_emad = false, 860 }; 861 u64 len; 862 int err; 863 864 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 865 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 866 dev_kfree_skb_any(skb); 867 return NETDEV_TX_OK; 868 } 869 870 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 871 872 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 873 return NETDEV_TX_BUSY; 874 875 if (eth_skb_pad(skb)) { 876 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 877 return NETDEV_TX_OK; 878 } 879 880 mlxsw_sp_txhdr_construct(skb, &tx_info); 881 /* TX header is consumed by HW on the way so we shouldn't count its 882 * bytes as being sent. 883 */ 884 len = skb->len - MLXSW_TXHDR_LEN; 885 886 /* Due to a race we might fail here because of a full queue. In that 887 * unlikely case we simply drop the packet. 888 */ 889 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 890 891 if (!err) { 892 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 893 u64_stats_update_begin(&pcpu_stats->syncp); 894 pcpu_stats->tx_packets++; 895 pcpu_stats->tx_bytes += len; 896 u64_stats_update_end(&pcpu_stats->syncp); 897 } else { 898 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 899 dev_kfree_skb_any(skb); 900 } 901 return NETDEV_TX_OK; 902 } 903 904 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 905 { 906 } 907 908 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 909 { 910 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 911 struct sockaddr *addr = p; 912 int err; 913 914 if (!is_valid_ether_addr(addr->sa_data)) 915 return -EADDRNOTAVAIL; 916 917 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 918 if (err) 919 return err; 920 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 921 return 0; 922 } 923 924 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 925 int mtu) 926 { 927 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 928 } 929 930 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 931 932 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 933 u16 delay) 934 { 935 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 936 BITS_PER_BYTE)); 937 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 938 mtu); 939 } 940 941 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 942 * Assumes 100m cable and maximum MTU. 943 */ 944 #define MLXSW_SP_PAUSE_DELAY 58752 945 946 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 947 u16 delay, bool pfc, bool pause) 948 { 949 if (pfc) 950 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 951 else if (pause) 952 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 953 else 954 return 0; 955 } 956 957 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 958 bool lossy) 959 { 960 if (lossy) 961 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 962 else 963 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 964 thres); 965 } 966 967 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 968 u8 *prio_tc, bool pause_en, 969 struct ieee_pfc *my_pfc) 970 { 971 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 972 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 973 u16 delay = !!my_pfc ? my_pfc->delay : 0; 974 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 975 u32 taken_headroom_cells = 0; 976 u32 max_headroom_cells; 977 int i, j, err; 978 979 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 980 981 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 982 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 983 if (err) 984 return err; 985 986 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 987 bool configure = false; 988 bool pfc = false; 989 u16 thres_cells; 990 u16 delay_cells; 991 u16 total_cells; 992 bool lossy; 993 994 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 995 if (prio_tc[j] == i) { 996 pfc = pfc_en & BIT(j); 997 configure = true; 998 break; 999 } 1000 } 1001 1002 if (!configure) 1003 continue; 1004 1005 lossy = !(pfc || pause_en); 1006 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1007 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 1008 pfc, pause_en); 1009 total_cells = thres_cells + delay_cells; 1010 1011 taken_headroom_cells += total_cells; 1012 if (taken_headroom_cells > max_headroom_cells) 1013 return -ENOBUFS; 1014 1015 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1016 thres_cells, lossy); 1017 } 1018 1019 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1020 } 1021 1022 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1023 int mtu, bool pause_en) 1024 { 1025 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1026 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1027 struct ieee_pfc *my_pfc; 1028 u8 *prio_tc; 1029 1030 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1031 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1032 1033 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1034 pause_en, my_pfc); 1035 } 1036 1037 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1038 { 1039 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1040 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1041 int err; 1042 1043 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1044 if (err) 1045 return err; 1046 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1047 if (err) 1048 goto err_span_port_mtu_update; 1049 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1050 if (err) 1051 goto err_port_mtu_set; 1052 dev->mtu = mtu; 1053 return 0; 1054 1055 err_port_mtu_set: 1056 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1057 err_span_port_mtu_update: 1058 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1059 return err; 1060 } 1061 1062 static int 1063 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1064 struct rtnl_link_stats64 *stats) 1065 { 1066 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1067 struct mlxsw_sp_port_pcpu_stats *p; 1068 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1069 u32 tx_dropped = 0; 1070 unsigned int start; 1071 int i; 1072 1073 for_each_possible_cpu(i) { 1074 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1075 do { 1076 start = u64_stats_fetch_begin_irq(&p->syncp); 1077 rx_packets = p->rx_packets; 1078 rx_bytes = p->rx_bytes; 1079 tx_packets = p->tx_packets; 1080 tx_bytes = p->tx_bytes; 1081 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1082 1083 stats->rx_packets += rx_packets; 1084 stats->rx_bytes += rx_bytes; 1085 stats->tx_packets += tx_packets; 1086 stats->tx_bytes += tx_bytes; 1087 /* tx_dropped is u32, updated without syncp protection. */ 1088 tx_dropped += p->tx_dropped; 1089 } 1090 stats->tx_dropped = tx_dropped; 1091 return 0; 1092 } 1093 1094 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1095 { 1096 switch (attr_id) { 1097 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1098 return true; 1099 } 1100 1101 return false; 1102 } 1103 1104 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1105 void *sp) 1106 { 1107 switch (attr_id) { 1108 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1109 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1110 } 1111 1112 return -EINVAL; 1113 } 1114 1115 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1116 int prio, char *ppcnt_pl) 1117 { 1118 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1119 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1120 1121 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1122 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1123 } 1124 1125 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1126 struct rtnl_link_stats64 *stats) 1127 { 1128 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1129 int err; 1130 1131 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1132 0, ppcnt_pl); 1133 if (err) 1134 goto out; 1135 1136 stats->tx_packets = 1137 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1138 stats->rx_packets = 1139 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1140 stats->tx_bytes = 1141 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1142 stats->rx_bytes = 1143 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1144 stats->multicast = 1145 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1146 1147 stats->rx_crc_errors = 1148 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1149 stats->rx_frame_errors = 1150 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1151 1152 stats->rx_length_errors = ( 1153 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1154 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1155 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1156 1157 stats->rx_errors = (stats->rx_crc_errors + 1158 stats->rx_frame_errors + stats->rx_length_errors); 1159 1160 out: 1161 return err; 1162 } 1163 1164 static void 1165 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1166 struct mlxsw_sp_port_xstats *xstats) 1167 { 1168 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1169 int err, i; 1170 1171 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1172 ppcnt_pl); 1173 if (!err) 1174 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1175 1176 for (i = 0; i < TC_MAX_QUEUE; i++) { 1177 err = mlxsw_sp_port_get_stats_raw(dev, 1178 MLXSW_REG_PPCNT_TC_CONG_TC, 1179 i, ppcnt_pl); 1180 if (!err) 1181 xstats->wred_drop[i] = 1182 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1183 1184 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1185 i, ppcnt_pl); 1186 if (err) 1187 continue; 1188 1189 xstats->backlog[i] = 1190 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1191 xstats->tail_drop[i] = 1192 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1193 } 1194 1195 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1196 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1197 i, ppcnt_pl); 1198 if (err) 1199 continue; 1200 1201 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1202 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1203 } 1204 } 1205 1206 static void update_stats_cache(struct work_struct *work) 1207 { 1208 struct mlxsw_sp_port *mlxsw_sp_port = 1209 container_of(work, struct mlxsw_sp_port, 1210 periodic_hw_stats.update_dw.work); 1211 1212 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1213 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1214 * necessary when port goes down. 1215 */ 1216 goto out; 1217 1218 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1219 &mlxsw_sp_port->periodic_hw_stats.stats); 1220 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1221 &mlxsw_sp_port->periodic_hw_stats.xstats); 1222 1223 out: 1224 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1225 MLXSW_HW_STATS_UPDATE_TIME); 1226 } 1227 1228 /* Return the stats from a cache that is updated periodically, 1229 * as this function might get called in an atomic context. 1230 */ 1231 static void 1232 mlxsw_sp_port_get_stats64(struct net_device *dev, 1233 struct rtnl_link_stats64 *stats) 1234 { 1235 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1236 1237 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1238 } 1239 1240 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1241 u16 vid_begin, u16 vid_end, 1242 bool is_member, bool untagged) 1243 { 1244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1245 char *spvm_pl; 1246 int err; 1247 1248 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1249 if (!spvm_pl) 1250 return -ENOMEM; 1251 1252 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1253 vid_end, is_member, untagged); 1254 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1255 kfree(spvm_pl); 1256 return err; 1257 } 1258 1259 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1260 u16 vid_end, bool is_member, bool untagged) 1261 { 1262 u16 vid, vid_e; 1263 int err; 1264 1265 for (vid = vid_begin; vid <= vid_end; 1266 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1267 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1268 vid_end); 1269 1270 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1271 is_member, untagged); 1272 if (err) 1273 return err; 1274 } 1275 1276 return 0; 1277 } 1278 1279 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1280 bool flush_default) 1281 { 1282 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1283 1284 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1285 &mlxsw_sp_port->vlans_list, list) { 1286 if (!flush_default && 1287 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1288 continue; 1289 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1290 } 1291 } 1292 1293 static void 1294 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1295 { 1296 if (mlxsw_sp_port_vlan->bridge_port) 1297 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1298 else if (mlxsw_sp_port_vlan->fid) 1299 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1300 } 1301 1302 struct mlxsw_sp_port_vlan * 1303 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1304 { 1305 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1306 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1307 int err; 1308 1309 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1310 if (mlxsw_sp_port_vlan) 1311 return ERR_PTR(-EEXIST); 1312 1313 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1314 if (err) 1315 return ERR_PTR(err); 1316 1317 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1318 if (!mlxsw_sp_port_vlan) { 1319 err = -ENOMEM; 1320 goto err_port_vlan_alloc; 1321 } 1322 1323 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1324 mlxsw_sp_port_vlan->vid = vid; 1325 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1326 1327 return mlxsw_sp_port_vlan; 1328 1329 err_port_vlan_alloc: 1330 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1331 return ERR_PTR(err); 1332 } 1333 1334 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1335 { 1336 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1337 u16 vid = mlxsw_sp_port_vlan->vid; 1338 1339 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1340 list_del(&mlxsw_sp_port_vlan->list); 1341 kfree(mlxsw_sp_port_vlan); 1342 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1343 } 1344 1345 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1346 __be16 __always_unused proto, u16 vid) 1347 { 1348 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1349 1350 /* VLAN 0 is added to HW filter when device goes up, but it is 1351 * reserved in our case, so simply return. 1352 */ 1353 if (!vid) 1354 return 0; 1355 1356 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1357 } 1358 1359 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1360 __be16 __always_unused proto, u16 vid) 1361 { 1362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1363 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1364 1365 /* VLAN 0 is removed from HW filter when device goes down, but 1366 * it is reserved in our case, so simply return. 1367 */ 1368 if (!vid) 1369 return 0; 1370 1371 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1372 if (!mlxsw_sp_port_vlan) 1373 return 0; 1374 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1375 1376 return 0; 1377 } 1378 1379 static struct mlxsw_sp_port_mall_tc_entry * 1380 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1381 unsigned long cookie) { 1382 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1383 1384 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1385 if (mall_tc_entry->cookie == cookie) 1386 return mall_tc_entry; 1387 1388 return NULL; 1389 } 1390 1391 static int 1392 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1393 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1394 const struct flow_action_entry *act, 1395 bool ingress) 1396 { 1397 enum mlxsw_sp_span_type span_type; 1398 1399 if (!act->dev) { 1400 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1401 return -EINVAL; 1402 } 1403 1404 mirror->ingress = ingress; 1405 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1406 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1407 true, &mirror->span_id); 1408 } 1409 1410 static void 1411 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1412 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1413 { 1414 enum mlxsw_sp_span_type span_type; 1415 1416 span_type = mirror->ingress ? 1417 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1418 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1419 span_type, true); 1420 } 1421 1422 static int 1423 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1424 struct tc_cls_matchall_offload *cls, 1425 const struct flow_action_entry *act, 1426 bool ingress) 1427 { 1428 int err; 1429 1430 if (!mlxsw_sp_port->sample) 1431 return -EOPNOTSUPP; 1432 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1433 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1434 return -EEXIST; 1435 } 1436 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1437 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1438 return -EOPNOTSUPP; 1439 } 1440 1441 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1442 act->sample.psample_group); 1443 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1444 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1445 mlxsw_sp_port->sample->rate = act->sample.rate; 1446 1447 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1448 if (err) 1449 goto err_port_sample_set; 1450 return 0; 1451 1452 err_port_sample_set: 1453 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1454 return err; 1455 } 1456 1457 static void 1458 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1459 { 1460 if (!mlxsw_sp_port->sample) 1461 return; 1462 1463 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1464 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1465 } 1466 1467 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1468 struct tc_cls_matchall_offload *f, 1469 bool ingress) 1470 { 1471 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1472 __be16 protocol = f->common.protocol; 1473 struct flow_action_entry *act; 1474 int err; 1475 1476 if (!flow_offload_has_one_action(&f->rule->action)) { 1477 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1478 return -EOPNOTSUPP; 1479 } 1480 1481 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1482 if (!mall_tc_entry) 1483 return -ENOMEM; 1484 mall_tc_entry->cookie = f->cookie; 1485 1486 act = &f->rule->action.entries[0]; 1487 1488 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1489 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1490 1491 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1492 mirror = &mall_tc_entry->mirror; 1493 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1494 mirror, act, 1495 ingress); 1496 } else if (act->id == FLOW_ACTION_SAMPLE && 1497 protocol == htons(ETH_P_ALL)) { 1498 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1499 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1500 act, ingress); 1501 } else { 1502 err = -EOPNOTSUPP; 1503 } 1504 1505 if (err) 1506 goto err_add_action; 1507 1508 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1509 return 0; 1510 1511 err_add_action: 1512 kfree(mall_tc_entry); 1513 return err; 1514 } 1515 1516 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1517 struct tc_cls_matchall_offload *f) 1518 { 1519 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1520 1521 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1522 f->cookie); 1523 if (!mall_tc_entry) { 1524 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1525 return; 1526 } 1527 list_del(&mall_tc_entry->list); 1528 1529 switch (mall_tc_entry->type) { 1530 case MLXSW_SP_PORT_MALL_MIRROR: 1531 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1532 &mall_tc_entry->mirror); 1533 break; 1534 case MLXSW_SP_PORT_MALL_SAMPLE: 1535 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1536 break; 1537 default: 1538 WARN_ON(1); 1539 } 1540 1541 kfree(mall_tc_entry); 1542 } 1543 1544 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1545 struct tc_cls_matchall_offload *f, 1546 bool ingress) 1547 { 1548 switch (f->command) { 1549 case TC_CLSMATCHALL_REPLACE: 1550 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1551 ingress); 1552 case TC_CLSMATCHALL_DESTROY: 1553 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1554 return 0; 1555 default: 1556 return -EOPNOTSUPP; 1557 } 1558 } 1559 1560 static int 1561 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1562 struct flow_cls_offload *f) 1563 { 1564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1565 1566 switch (f->command) { 1567 case FLOW_CLS_REPLACE: 1568 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1569 case FLOW_CLS_DESTROY: 1570 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1571 return 0; 1572 case FLOW_CLS_STATS: 1573 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1574 case FLOW_CLS_TMPLT_CREATE: 1575 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1576 case FLOW_CLS_TMPLT_DESTROY: 1577 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1578 return 0; 1579 default: 1580 return -EOPNOTSUPP; 1581 } 1582 } 1583 1584 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1585 void *type_data, 1586 void *cb_priv, bool ingress) 1587 { 1588 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1589 1590 switch (type) { 1591 case TC_SETUP_CLSMATCHALL: 1592 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1593 type_data)) 1594 return -EOPNOTSUPP; 1595 1596 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1597 ingress); 1598 case TC_SETUP_CLSFLOWER: 1599 return 0; 1600 default: 1601 return -EOPNOTSUPP; 1602 } 1603 } 1604 1605 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1606 void *type_data, 1607 void *cb_priv) 1608 { 1609 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1610 cb_priv, true); 1611 } 1612 1613 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1614 void *type_data, 1615 void *cb_priv) 1616 { 1617 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1618 cb_priv, false); 1619 } 1620 1621 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1622 void *type_data, void *cb_priv) 1623 { 1624 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1625 1626 switch (type) { 1627 case TC_SETUP_CLSMATCHALL: 1628 return 0; 1629 case TC_SETUP_CLSFLOWER: 1630 if (mlxsw_sp_acl_block_disabled(acl_block)) 1631 return -EOPNOTSUPP; 1632 1633 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1634 default: 1635 return -EOPNOTSUPP; 1636 } 1637 } 1638 1639 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1640 { 1641 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1642 1643 mlxsw_sp_acl_block_destroy(acl_block); 1644 } 1645 1646 static LIST_HEAD(mlxsw_sp_block_cb_list); 1647 1648 static int 1649 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1650 struct flow_block_offload *f, bool ingress) 1651 { 1652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1653 struct mlxsw_sp_acl_block *acl_block; 1654 struct flow_block_cb *block_cb; 1655 bool register_block = false; 1656 int err; 1657 1658 block_cb = flow_block_cb_lookup(f->block, 1659 mlxsw_sp_setup_tc_block_cb_flower, 1660 mlxsw_sp); 1661 if (!block_cb) { 1662 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1663 if (!acl_block) 1664 return -ENOMEM; 1665 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1666 mlxsw_sp, acl_block, 1667 mlxsw_sp_tc_block_flower_release); 1668 if (IS_ERR(block_cb)) { 1669 mlxsw_sp_acl_block_destroy(acl_block); 1670 err = PTR_ERR(block_cb); 1671 goto err_cb_register; 1672 } 1673 register_block = true; 1674 } else { 1675 acl_block = flow_block_cb_priv(block_cb); 1676 } 1677 flow_block_cb_incref(block_cb); 1678 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1679 mlxsw_sp_port, ingress, f->extack); 1680 if (err) 1681 goto err_block_bind; 1682 1683 if (ingress) 1684 mlxsw_sp_port->ing_acl_block = acl_block; 1685 else 1686 mlxsw_sp_port->eg_acl_block = acl_block; 1687 1688 if (register_block) { 1689 flow_block_cb_add(block_cb, f); 1690 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1691 } 1692 1693 return 0; 1694 1695 err_block_bind: 1696 if (!flow_block_cb_decref(block_cb)) 1697 flow_block_cb_free(block_cb); 1698 err_cb_register: 1699 return err; 1700 } 1701 1702 static void 1703 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1704 struct flow_block_offload *f, bool ingress) 1705 { 1706 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1707 struct mlxsw_sp_acl_block *acl_block; 1708 struct flow_block_cb *block_cb; 1709 int err; 1710 1711 block_cb = flow_block_cb_lookup(f->block, 1712 mlxsw_sp_setup_tc_block_cb_flower, 1713 mlxsw_sp); 1714 if (!block_cb) 1715 return; 1716 1717 if (ingress) 1718 mlxsw_sp_port->ing_acl_block = NULL; 1719 else 1720 mlxsw_sp_port->eg_acl_block = NULL; 1721 1722 acl_block = flow_block_cb_priv(block_cb); 1723 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1724 mlxsw_sp_port, ingress); 1725 if (!err && !flow_block_cb_decref(block_cb)) { 1726 flow_block_cb_remove(block_cb, f); 1727 list_del(&block_cb->driver_list); 1728 } 1729 } 1730 1731 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1732 struct flow_block_offload *f) 1733 { 1734 struct flow_block_cb *block_cb; 1735 flow_setup_cb_t *cb; 1736 bool ingress; 1737 int err; 1738 1739 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1740 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1741 ingress = true; 1742 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1743 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1744 ingress = false; 1745 } else { 1746 return -EOPNOTSUPP; 1747 } 1748 1749 f->driver_block_list = &mlxsw_sp_block_cb_list; 1750 1751 switch (f->command) { 1752 case FLOW_BLOCK_BIND: 1753 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1754 &mlxsw_sp_block_cb_list)) 1755 return -EBUSY; 1756 1757 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1758 mlxsw_sp_port, NULL); 1759 if (IS_ERR(block_cb)) 1760 return PTR_ERR(block_cb); 1761 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1762 ingress); 1763 if (err) { 1764 flow_block_cb_free(block_cb); 1765 return err; 1766 } 1767 flow_block_cb_add(block_cb, f); 1768 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1769 return 0; 1770 case FLOW_BLOCK_UNBIND: 1771 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1772 f, ingress); 1773 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1774 if (!block_cb) 1775 return -ENOENT; 1776 1777 flow_block_cb_remove(block_cb, f); 1778 list_del(&block_cb->driver_list); 1779 return 0; 1780 default: 1781 return -EOPNOTSUPP; 1782 } 1783 } 1784 1785 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1786 void *type_data) 1787 { 1788 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1789 1790 switch (type) { 1791 case TC_SETUP_BLOCK: 1792 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1793 case TC_SETUP_QDISC_RED: 1794 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1795 case TC_SETUP_QDISC_PRIO: 1796 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1797 case TC_SETUP_QDISC_ETS: 1798 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1799 case TC_SETUP_QDISC_TBF: 1800 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1801 default: 1802 return -EOPNOTSUPP; 1803 } 1804 } 1805 1806 1807 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1808 { 1809 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1810 1811 if (!enable) { 1812 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1813 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1814 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1815 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1816 return -EINVAL; 1817 } 1818 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1819 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1820 } else { 1821 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1822 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1823 } 1824 return 0; 1825 } 1826 1827 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1828 { 1829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1830 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1831 int err; 1832 1833 if (netif_running(dev)) 1834 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1835 1836 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1837 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1838 pplr_pl); 1839 1840 if (netif_running(dev)) 1841 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1842 1843 return err; 1844 } 1845 1846 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1847 1848 static int mlxsw_sp_handle_feature(struct net_device *dev, 1849 netdev_features_t wanted_features, 1850 netdev_features_t feature, 1851 mlxsw_sp_feature_handler feature_handler) 1852 { 1853 netdev_features_t changes = wanted_features ^ dev->features; 1854 bool enable = !!(wanted_features & feature); 1855 int err; 1856 1857 if (!(changes & feature)) 1858 return 0; 1859 1860 err = feature_handler(dev, enable); 1861 if (err) { 1862 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1863 enable ? "Enable" : "Disable", &feature, err); 1864 return err; 1865 } 1866 1867 if (enable) 1868 dev->features |= feature; 1869 else 1870 dev->features &= ~feature; 1871 1872 return 0; 1873 } 1874 static int mlxsw_sp_set_features(struct net_device *dev, 1875 netdev_features_t features) 1876 { 1877 netdev_features_t oper_features = dev->features; 1878 int err = 0; 1879 1880 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1881 mlxsw_sp_feature_hw_tc); 1882 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1883 mlxsw_sp_feature_loopback); 1884 1885 if (err) { 1886 dev->features = oper_features; 1887 return -EINVAL; 1888 } 1889 1890 return 0; 1891 } 1892 1893 static struct devlink_port * 1894 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1895 { 1896 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1897 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1898 1899 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1900 mlxsw_sp_port->local_port); 1901 } 1902 1903 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1904 struct ifreq *ifr) 1905 { 1906 struct hwtstamp_config config; 1907 int err; 1908 1909 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1910 return -EFAULT; 1911 1912 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1913 &config); 1914 if (err) 1915 return err; 1916 1917 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1918 return -EFAULT; 1919 1920 return 0; 1921 } 1922 1923 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1924 struct ifreq *ifr) 1925 { 1926 struct hwtstamp_config config; 1927 int err; 1928 1929 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1930 &config); 1931 if (err) 1932 return err; 1933 1934 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1935 return -EFAULT; 1936 1937 return 0; 1938 } 1939 1940 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1941 { 1942 struct hwtstamp_config config = {0}; 1943 1944 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1945 } 1946 1947 static int 1948 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1949 { 1950 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1951 1952 switch (cmd) { 1953 case SIOCSHWTSTAMP: 1954 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1955 case SIOCGHWTSTAMP: 1956 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1957 default: 1958 return -EOPNOTSUPP; 1959 } 1960 } 1961 1962 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1963 .ndo_open = mlxsw_sp_port_open, 1964 .ndo_stop = mlxsw_sp_port_stop, 1965 .ndo_start_xmit = mlxsw_sp_port_xmit, 1966 .ndo_setup_tc = mlxsw_sp_setup_tc, 1967 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1968 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1969 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1970 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1971 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1972 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1973 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1974 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1975 .ndo_set_features = mlxsw_sp_set_features, 1976 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1977 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1978 }; 1979 1980 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1981 struct ethtool_drvinfo *drvinfo) 1982 { 1983 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1985 1986 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1987 sizeof(drvinfo->driver)); 1988 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1989 sizeof(drvinfo->version)); 1990 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1991 "%d.%d.%d", 1992 mlxsw_sp->bus_info->fw_rev.major, 1993 mlxsw_sp->bus_info->fw_rev.minor, 1994 mlxsw_sp->bus_info->fw_rev.subminor); 1995 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1996 sizeof(drvinfo->bus_info)); 1997 } 1998 1999 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 2000 struct ethtool_pauseparam *pause) 2001 { 2002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2003 2004 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2005 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2006 } 2007 2008 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2009 struct ethtool_pauseparam *pause) 2010 { 2011 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2012 2013 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2014 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2015 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2016 2017 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2018 pfcc_pl); 2019 } 2020 2021 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2022 struct ethtool_pauseparam *pause) 2023 { 2024 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2025 bool pause_en = pause->tx_pause || pause->rx_pause; 2026 int err; 2027 2028 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2029 netdev_err(dev, "PFC already enabled on port\n"); 2030 return -EINVAL; 2031 } 2032 2033 if (pause->autoneg) { 2034 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2035 return -EINVAL; 2036 } 2037 2038 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2039 if (err) { 2040 netdev_err(dev, "Failed to configure port's headroom\n"); 2041 return err; 2042 } 2043 2044 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2045 if (err) { 2046 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2047 goto err_port_pause_configure; 2048 } 2049 2050 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2051 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2052 2053 return 0; 2054 2055 err_port_pause_configure: 2056 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2057 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2058 return err; 2059 } 2060 2061 struct mlxsw_sp_port_hw_stats { 2062 char str[ETH_GSTRING_LEN]; 2063 u64 (*getter)(const char *payload); 2064 bool cells_bytes; 2065 }; 2066 2067 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2068 { 2069 .str = "a_frames_transmitted_ok", 2070 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2071 }, 2072 { 2073 .str = "a_frames_received_ok", 2074 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2075 }, 2076 { 2077 .str = "a_frame_check_sequence_errors", 2078 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2079 }, 2080 { 2081 .str = "a_alignment_errors", 2082 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2083 }, 2084 { 2085 .str = "a_octets_transmitted_ok", 2086 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2087 }, 2088 { 2089 .str = "a_octets_received_ok", 2090 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2091 }, 2092 { 2093 .str = "a_multicast_frames_xmitted_ok", 2094 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2095 }, 2096 { 2097 .str = "a_broadcast_frames_xmitted_ok", 2098 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2099 }, 2100 { 2101 .str = "a_multicast_frames_received_ok", 2102 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2103 }, 2104 { 2105 .str = "a_broadcast_frames_received_ok", 2106 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2107 }, 2108 { 2109 .str = "a_in_range_length_errors", 2110 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2111 }, 2112 { 2113 .str = "a_out_of_range_length_field", 2114 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2115 }, 2116 { 2117 .str = "a_frame_too_long_errors", 2118 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2119 }, 2120 { 2121 .str = "a_symbol_error_during_carrier", 2122 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2123 }, 2124 { 2125 .str = "a_mac_control_frames_transmitted", 2126 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2127 }, 2128 { 2129 .str = "a_mac_control_frames_received", 2130 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2131 }, 2132 { 2133 .str = "a_unsupported_opcodes_received", 2134 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2135 }, 2136 { 2137 .str = "a_pause_mac_ctrl_frames_received", 2138 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2139 }, 2140 { 2141 .str = "a_pause_mac_ctrl_frames_xmitted", 2142 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2143 }, 2144 }; 2145 2146 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2147 2148 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2149 { 2150 .str = "if_in_discards", 2151 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2152 }, 2153 { 2154 .str = "if_out_discards", 2155 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2156 }, 2157 { 2158 .str = "if_out_errors", 2159 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2160 }, 2161 }; 2162 2163 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2164 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2165 2166 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2167 { 2168 .str = "ether_stats_undersize_pkts", 2169 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2170 }, 2171 { 2172 .str = "ether_stats_oversize_pkts", 2173 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2174 }, 2175 { 2176 .str = "ether_stats_fragments", 2177 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2178 }, 2179 { 2180 .str = "ether_pkts64octets", 2181 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2182 }, 2183 { 2184 .str = "ether_pkts65to127octets", 2185 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2186 }, 2187 { 2188 .str = "ether_pkts128to255octets", 2189 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2190 }, 2191 { 2192 .str = "ether_pkts256to511octets", 2193 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2194 }, 2195 { 2196 .str = "ether_pkts512to1023octets", 2197 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2198 }, 2199 { 2200 .str = "ether_pkts1024to1518octets", 2201 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2202 }, 2203 { 2204 .str = "ether_pkts1519to2047octets", 2205 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2206 }, 2207 { 2208 .str = "ether_pkts2048to4095octets", 2209 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2210 }, 2211 { 2212 .str = "ether_pkts4096to8191octets", 2213 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2214 }, 2215 { 2216 .str = "ether_pkts8192to10239octets", 2217 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2218 }, 2219 }; 2220 2221 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2222 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2223 2224 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2225 { 2226 .str = "dot3stats_fcs_errors", 2227 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2228 }, 2229 { 2230 .str = "dot3stats_symbol_errors", 2231 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2232 }, 2233 { 2234 .str = "dot3control_in_unknown_opcodes", 2235 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2236 }, 2237 { 2238 .str = "dot3in_pause_frames", 2239 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2240 }, 2241 }; 2242 2243 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2244 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2245 2246 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2247 { 2248 .str = "discard_ingress_general", 2249 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2250 }, 2251 { 2252 .str = "discard_ingress_policy_engine", 2253 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2254 }, 2255 { 2256 .str = "discard_ingress_vlan_membership", 2257 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2258 }, 2259 { 2260 .str = "discard_ingress_tag_frame_type", 2261 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2262 }, 2263 { 2264 .str = "discard_egress_vlan_membership", 2265 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2266 }, 2267 { 2268 .str = "discard_loopback_filter", 2269 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2270 }, 2271 { 2272 .str = "discard_egress_general", 2273 .getter = mlxsw_reg_ppcnt_egress_general_get, 2274 }, 2275 { 2276 .str = "discard_egress_hoq", 2277 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2278 }, 2279 { 2280 .str = "discard_egress_policy_engine", 2281 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2282 }, 2283 { 2284 .str = "discard_ingress_tx_link_down", 2285 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2286 }, 2287 { 2288 .str = "discard_egress_stp_filter", 2289 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2290 }, 2291 { 2292 .str = "discard_egress_sll", 2293 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2294 }, 2295 }; 2296 2297 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2298 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2299 2300 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2301 { 2302 .str = "rx_octets_prio", 2303 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2304 }, 2305 { 2306 .str = "rx_frames_prio", 2307 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2308 }, 2309 { 2310 .str = "tx_octets_prio", 2311 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2312 }, 2313 { 2314 .str = "tx_frames_prio", 2315 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2316 }, 2317 { 2318 .str = "rx_pause_prio", 2319 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2320 }, 2321 { 2322 .str = "rx_pause_duration_prio", 2323 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2324 }, 2325 { 2326 .str = "tx_pause_prio", 2327 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2328 }, 2329 { 2330 .str = "tx_pause_duration_prio", 2331 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2332 }, 2333 }; 2334 2335 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2336 2337 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2338 { 2339 .str = "tc_transmit_queue_tc", 2340 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2341 .cells_bytes = true, 2342 }, 2343 { 2344 .str = "tc_no_buffer_discard_uc_tc", 2345 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2346 }, 2347 }; 2348 2349 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2350 2351 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2352 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2353 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2354 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2355 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2356 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2357 IEEE_8021QAZ_MAX_TCS) + \ 2358 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2359 TC_MAX_QUEUE)) 2360 2361 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2362 { 2363 int i; 2364 2365 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2366 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2367 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2368 *p += ETH_GSTRING_LEN; 2369 } 2370 } 2371 2372 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2373 { 2374 int i; 2375 2376 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2377 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2378 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2379 *p += ETH_GSTRING_LEN; 2380 } 2381 } 2382 2383 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2384 u32 stringset, u8 *data) 2385 { 2386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2387 u8 *p = data; 2388 int i; 2389 2390 switch (stringset) { 2391 case ETH_SS_STATS: 2392 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2393 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2394 ETH_GSTRING_LEN); 2395 p += ETH_GSTRING_LEN; 2396 } 2397 2398 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2399 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2400 ETH_GSTRING_LEN); 2401 p += ETH_GSTRING_LEN; 2402 } 2403 2404 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2405 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2406 ETH_GSTRING_LEN); 2407 p += ETH_GSTRING_LEN; 2408 } 2409 2410 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2411 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2412 ETH_GSTRING_LEN); 2413 p += ETH_GSTRING_LEN; 2414 } 2415 2416 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2417 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2418 ETH_GSTRING_LEN); 2419 p += ETH_GSTRING_LEN; 2420 } 2421 2422 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2423 mlxsw_sp_port_get_prio_strings(&p, i); 2424 2425 for (i = 0; i < TC_MAX_QUEUE; i++) 2426 mlxsw_sp_port_get_tc_strings(&p, i); 2427 2428 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2429 break; 2430 } 2431 } 2432 2433 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2434 enum ethtool_phys_id_state state) 2435 { 2436 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2438 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2439 bool active; 2440 2441 switch (state) { 2442 case ETHTOOL_ID_ACTIVE: 2443 active = true; 2444 break; 2445 case ETHTOOL_ID_INACTIVE: 2446 active = false; 2447 break; 2448 default: 2449 return -EOPNOTSUPP; 2450 } 2451 2452 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2454 } 2455 2456 static int 2457 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2458 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2459 { 2460 switch (grp) { 2461 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2462 *p_hw_stats = mlxsw_sp_port_hw_stats; 2463 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2464 break; 2465 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2466 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2467 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2468 break; 2469 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2470 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2471 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2472 break; 2473 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2474 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2475 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2476 break; 2477 case MLXSW_REG_PPCNT_DISCARD_CNT: 2478 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2479 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2480 break; 2481 case MLXSW_REG_PPCNT_PRIO_CNT: 2482 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2483 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2484 break; 2485 case MLXSW_REG_PPCNT_TC_CNT: 2486 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2487 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2488 break; 2489 default: 2490 WARN_ON(1); 2491 return -EOPNOTSUPP; 2492 } 2493 return 0; 2494 } 2495 2496 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2497 enum mlxsw_reg_ppcnt_grp grp, int prio, 2498 u64 *data, int data_index) 2499 { 2500 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2501 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2502 struct mlxsw_sp_port_hw_stats *hw_stats; 2503 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2504 int i, len; 2505 int err; 2506 2507 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2508 if (err) 2509 return; 2510 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2511 for (i = 0; i < len; i++) { 2512 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2513 if (!hw_stats[i].cells_bytes) 2514 continue; 2515 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2516 data[data_index + i]); 2517 } 2518 } 2519 2520 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2521 struct ethtool_stats *stats, u64 *data) 2522 { 2523 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2524 int i, data_index = 0; 2525 2526 /* IEEE 802.3 Counters */ 2527 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2528 data, data_index); 2529 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2530 2531 /* RFC 2863 Counters */ 2532 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2533 data, data_index); 2534 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2535 2536 /* RFC 2819 Counters */ 2537 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2538 data, data_index); 2539 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2540 2541 /* RFC 3635 Counters */ 2542 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2543 data, data_index); 2544 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2545 2546 /* Discard Counters */ 2547 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2548 data, data_index); 2549 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2550 2551 /* Per-Priority Counters */ 2552 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2553 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2554 data, data_index); 2555 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2556 } 2557 2558 /* Per-TC Counters */ 2559 for (i = 0; i < TC_MAX_QUEUE; i++) { 2560 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2561 data, data_index); 2562 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2563 } 2564 2565 /* PTP counters */ 2566 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2567 data, data_index); 2568 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2569 } 2570 2571 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2572 { 2573 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2574 2575 switch (sset) { 2576 case ETH_SS_STATS: 2577 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2578 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2579 default: 2580 return -EOPNOTSUPP; 2581 } 2582 } 2583 2584 struct mlxsw_sp1_port_link_mode { 2585 enum ethtool_link_mode_bit_indices mask_ethtool; 2586 u32 mask; 2587 u32 speed; 2588 }; 2589 2590 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2594 .speed = SPEED_100, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2598 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2599 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2600 .speed = SPEED_1000, 2601 }, 2602 { 2603 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2604 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2605 .speed = SPEED_10000, 2606 }, 2607 { 2608 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2609 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2610 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2611 .speed = SPEED_10000, 2612 }, 2613 { 2614 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2615 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2616 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2617 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2619 .speed = SPEED_10000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2624 .speed = SPEED_20000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2629 .speed = SPEED_40000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2634 .speed = SPEED_40000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2639 .speed = SPEED_40000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2644 .speed = SPEED_40000, 2645 }, 2646 { 2647 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2648 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2649 .speed = SPEED_25000, 2650 }, 2651 { 2652 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2653 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2654 .speed = SPEED_25000, 2655 }, 2656 { 2657 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2658 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2659 .speed = SPEED_25000, 2660 }, 2661 { 2662 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2663 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2664 .speed = SPEED_50000, 2665 }, 2666 { 2667 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2668 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2669 .speed = SPEED_50000, 2670 }, 2671 { 2672 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2673 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2674 .speed = SPEED_50000, 2675 }, 2676 { 2677 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2678 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2679 .speed = SPEED_100000, 2680 }, 2681 { 2682 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2683 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2684 .speed = SPEED_100000, 2685 }, 2686 { 2687 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2688 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2689 .speed = SPEED_100000, 2690 }, 2691 { 2692 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2693 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2694 .speed = SPEED_100000, 2695 }, 2696 }; 2697 2698 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2699 2700 static void 2701 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2702 u32 ptys_eth_proto, 2703 struct ethtool_link_ksettings *cmd) 2704 { 2705 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2706 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2707 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2708 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2709 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2710 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2711 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2712 2713 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2714 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2715 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2716 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2717 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2718 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2719 } 2720 2721 static void 2722 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2723 u8 width, unsigned long *mode) 2724 { 2725 int i; 2726 2727 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2728 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2729 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2730 mode); 2731 } 2732 } 2733 2734 static u32 2735 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2736 { 2737 int i; 2738 2739 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2740 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2741 return mlxsw_sp1_port_link_mode[i].speed; 2742 } 2743 2744 return SPEED_UNKNOWN; 2745 } 2746 2747 static void 2748 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2749 u32 ptys_eth_proto, 2750 struct ethtool_link_ksettings *cmd) 2751 { 2752 cmd->base.speed = SPEED_UNKNOWN; 2753 cmd->base.duplex = DUPLEX_UNKNOWN; 2754 2755 if (!carrier_ok) 2756 return; 2757 2758 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2759 if (cmd->base.speed != SPEED_UNKNOWN) 2760 cmd->base.duplex = DUPLEX_FULL; 2761 } 2762 2763 static u32 2764 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2765 const struct ethtool_link_ksettings *cmd) 2766 { 2767 u32 ptys_proto = 0; 2768 int i; 2769 2770 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2771 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2772 cmd->link_modes.advertising)) 2773 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2774 } 2775 return ptys_proto; 2776 } 2777 2778 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2779 u32 speed) 2780 { 2781 u32 ptys_proto = 0; 2782 int i; 2783 2784 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2785 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2786 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2787 } 2788 return ptys_proto; 2789 } 2790 2791 static u32 2792 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2793 { 2794 u32 ptys_proto = 0; 2795 int i; 2796 2797 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2798 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2799 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2800 } 2801 return ptys_proto; 2802 } 2803 2804 static int 2805 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2806 u32 *base_speed) 2807 { 2808 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2809 return 0; 2810 } 2811 2812 static void 2813 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2814 u8 local_port, u32 proto_admin, bool autoneg) 2815 { 2816 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2817 } 2818 2819 static void 2820 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2821 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2822 u32 *p_eth_proto_oper) 2823 { 2824 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2825 p_eth_proto_oper); 2826 } 2827 2828 static const struct mlxsw_sp_port_type_speed_ops 2829 mlxsw_sp1_port_type_speed_ops = { 2830 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2831 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2832 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2833 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2834 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2835 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2836 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2837 .port_speed_base = mlxsw_sp1_port_speed_base, 2838 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2839 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2840 }; 2841 2842 static const enum ethtool_link_mode_bit_indices 2843 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2844 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2845 }; 2846 2847 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2848 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2849 2850 static const enum ethtool_link_mode_bit_indices 2851 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2852 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2853 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2854 }; 2855 2856 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2857 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2858 2859 static const enum ethtool_link_mode_bit_indices 2860 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2861 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2862 }; 2863 2864 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2865 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2866 2867 static const enum ethtool_link_mode_bit_indices 2868 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2869 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2870 }; 2871 2872 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2873 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2874 2875 static const enum ethtool_link_mode_bit_indices 2876 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2877 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2878 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2879 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2880 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2881 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2882 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2883 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2884 }; 2885 2886 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2887 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2888 2889 static const enum ethtool_link_mode_bit_indices 2890 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2891 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2892 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2893 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2894 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2895 }; 2896 2897 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2898 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2899 2900 static const enum ethtool_link_mode_bit_indices 2901 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2902 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2903 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2904 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2905 }; 2906 2907 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2908 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2909 2910 static const enum ethtool_link_mode_bit_indices 2911 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2912 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2913 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2914 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2915 }; 2916 2917 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2918 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2919 2920 static const enum ethtool_link_mode_bit_indices 2921 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2922 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2923 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2924 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2925 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2926 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2927 }; 2928 2929 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2930 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2931 2932 static const enum ethtool_link_mode_bit_indices 2933 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2934 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2935 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2936 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2937 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2938 }; 2939 2940 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2941 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2942 2943 static const enum ethtool_link_mode_bit_indices 2944 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2945 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2946 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2947 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2948 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2949 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2950 }; 2951 2952 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2953 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2954 2955 static const enum ethtool_link_mode_bit_indices 2956 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2957 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2958 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2959 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2960 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2961 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2962 }; 2963 2964 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2965 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2966 2967 static const enum ethtool_link_mode_bit_indices 2968 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2969 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2970 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2971 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2972 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2973 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2974 }; 2975 2976 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2977 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2978 2979 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2980 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2981 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2982 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2983 2984 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2985 { 2986 switch (width) { 2987 case 1: 2988 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2989 case 2: 2990 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2991 case 4: 2992 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2993 case 8: 2994 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2995 default: 2996 WARN_ON_ONCE(1); 2997 return 0; 2998 } 2999 } 3000 3001 struct mlxsw_sp2_port_link_mode { 3002 const enum ethtool_link_mode_bit_indices *mask_ethtool; 3003 int m_ethtool_len; 3004 u32 mask; 3005 u32 speed; 3006 u8 mask_width; 3007 }; 3008 3009 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3010 { 3011 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3012 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3013 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3014 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3015 MLXSW_SP_PORT_MASK_WIDTH_2X | 3016 MLXSW_SP_PORT_MASK_WIDTH_4X | 3017 MLXSW_SP_PORT_MASK_WIDTH_8X, 3018 .speed = SPEED_100, 3019 }, 3020 { 3021 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3022 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3023 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3024 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3025 MLXSW_SP_PORT_MASK_WIDTH_2X | 3026 MLXSW_SP_PORT_MASK_WIDTH_4X | 3027 MLXSW_SP_PORT_MASK_WIDTH_8X, 3028 .speed = SPEED_1000, 3029 }, 3030 { 3031 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3032 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3033 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3034 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3035 MLXSW_SP_PORT_MASK_WIDTH_2X | 3036 MLXSW_SP_PORT_MASK_WIDTH_4X | 3037 MLXSW_SP_PORT_MASK_WIDTH_8X, 3038 .speed = SPEED_2500, 3039 }, 3040 { 3041 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3042 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3043 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3044 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3045 MLXSW_SP_PORT_MASK_WIDTH_2X | 3046 MLXSW_SP_PORT_MASK_WIDTH_4X | 3047 MLXSW_SP_PORT_MASK_WIDTH_8X, 3048 .speed = SPEED_5000, 3049 }, 3050 { 3051 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3052 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3053 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3054 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3055 MLXSW_SP_PORT_MASK_WIDTH_2X | 3056 MLXSW_SP_PORT_MASK_WIDTH_4X | 3057 MLXSW_SP_PORT_MASK_WIDTH_8X, 3058 .speed = SPEED_10000, 3059 }, 3060 { 3061 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3062 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3063 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3064 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3065 MLXSW_SP_PORT_MASK_WIDTH_8X, 3066 .speed = SPEED_40000, 3067 }, 3068 { 3069 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3070 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3071 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3072 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3073 MLXSW_SP_PORT_MASK_WIDTH_2X | 3074 MLXSW_SP_PORT_MASK_WIDTH_4X | 3075 MLXSW_SP_PORT_MASK_WIDTH_8X, 3076 .speed = SPEED_25000, 3077 }, 3078 { 3079 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3080 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3081 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3082 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3083 MLXSW_SP_PORT_MASK_WIDTH_4X | 3084 MLXSW_SP_PORT_MASK_WIDTH_8X, 3085 .speed = SPEED_50000, 3086 }, 3087 { 3088 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3089 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3090 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3091 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3092 .speed = SPEED_50000, 3093 }, 3094 { 3095 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3096 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3097 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3098 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3099 MLXSW_SP_PORT_MASK_WIDTH_8X, 3100 .speed = SPEED_100000, 3101 }, 3102 { 3103 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3104 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3105 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3106 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3107 .speed = SPEED_100000, 3108 }, 3109 { 3110 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3111 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3112 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3113 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3114 MLXSW_SP_PORT_MASK_WIDTH_8X, 3115 .speed = SPEED_200000, 3116 }, 3117 { 3118 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3119 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3120 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3121 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3122 .speed = SPEED_400000, 3123 }, 3124 }; 3125 3126 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3127 3128 static void 3129 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3130 u32 ptys_eth_proto, 3131 struct ethtool_link_ksettings *cmd) 3132 { 3133 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3134 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3135 } 3136 3137 static void 3138 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3139 unsigned long *mode) 3140 { 3141 int i; 3142 3143 for (i = 0; i < link_mode->m_ethtool_len; i++) 3144 __set_bit(link_mode->mask_ethtool[i], mode); 3145 } 3146 3147 static void 3148 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3149 u8 width, unsigned long *mode) 3150 { 3151 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3152 int i; 3153 3154 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3155 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3156 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3157 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3158 mode); 3159 } 3160 } 3161 3162 static u32 3163 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3164 { 3165 int i; 3166 3167 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3168 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3169 return mlxsw_sp2_port_link_mode[i].speed; 3170 } 3171 3172 return SPEED_UNKNOWN; 3173 } 3174 3175 static void 3176 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3177 u32 ptys_eth_proto, 3178 struct ethtool_link_ksettings *cmd) 3179 { 3180 cmd->base.speed = SPEED_UNKNOWN; 3181 cmd->base.duplex = DUPLEX_UNKNOWN; 3182 3183 if (!carrier_ok) 3184 return; 3185 3186 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3187 if (cmd->base.speed != SPEED_UNKNOWN) 3188 cmd->base.duplex = DUPLEX_FULL; 3189 } 3190 3191 static bool 3192 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3193 const unsigned long *mode) 3194 { 3195 int cnt = 0; 3196 int i; 3197 3198 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3199 if (test_bit(link_mode->mask_ethtool[i], mode)) 3200 cnt++; 3201 } 3202 3203 return cnt == link_mode->m_ethtool_len; 3204 } 3205 3206 static u32 3207 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3208 const struct ethtool_link_ksettings *cmd) 3209 { 3210 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3211 u32 ptys_proto = 0; 3212 int i; 3213 3214 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3215 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3216 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3217 cmd->link_modes.advertising)) 3218 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3219 } 3220 return ptys_proto; 3221 } 3222 3223 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3224 u8 width, u32 speed) 3225 { 3226 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3227 u32 ptys_proto = 0; 3228 int i; 3229 3230 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3231 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3232 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3233 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3234 } 3235 return ptys_proto; 3236 } 3237 3238 static u32 3239 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3240 { 3241 u32 ptys_proto = 0; 3242 int i; 3243 3244 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3245 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3246 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3247 } 3248 return ptys_proto; 3249 } 3250 3251 static int 3252 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3253 u32 *base_speed) 3254 { 3255 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3256 u32 eth_proto_cap; 3257 int err; 3258 3259 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3260 * it from firmware. 3261 */ 3262 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3263 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3264 if (err) 3265 return err; 3266 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3267 3268 if (eth_proto_cap & 3269 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3270 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3271 return 0; 3272 } 3273 3274 if (eth_proto_cap & 3275 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3276 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3277 return 0; 3278 } 3279 3280 return -EIO; 3281 } 3282 3283 static void 3284 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3285 u8 local_port, u32 proto_admin, 3286 bool autoneg) 3287 { 3288 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3289 } 3290 3291 static void 3292 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3293 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3294 u32 *p_eth_proto_oper) 3295 { 3296 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3297 p_eth_proto_admin, p_eth_proto_oper); 3298 } 3299 3300 static const struct mlxsw_sp_port_type_speed_ops 3301 mlxsw_sp2_port_type_speed_ops = { 3302 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3303 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3304 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3305 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3306 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3307 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3308 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3309 .port_speed_base = mlxsw_sp2_port_speed_base, 3310 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3311 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3312 }; 3313 3314 static void 3315 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3316 u8 width, struct ethtool_link_ksettings *cmd) 3317 { 3318 const struct mlxsw_sp_port_type_speed_ops *ops; 3319 3320 ops = mlxsw_sp->port_type_speed_ops; 3321 3322 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3323 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3324 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3325 3326 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3327 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3328 cmd->link_modes.supported); 3329 } 3330 3331 static void 3332 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3333 u32 eth_proto_admin, bool autoneg, u8 width, 3334 struct ethtool_link_ksettings *cmd) 3335 { 3336 const struct mlxsw_sp_port_type_speed_ops *ops; 3337 3338 ops = mlxsw_sp->port_type_speed_ops; 3339 3340 if (!autoneg) 3341 return; 3342 3343 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3344 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3345 cmd->link_modes.advertising); 3346 } 3347 3348 static u8 3349 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3350 { 3351 switch (connector_type) { 3352 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3353 return PORT_OTHER; 3354 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3355 return PORT_NONE; 3356 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3357 return PORT_TP; 3358 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3359 return PORT_AUI; 3360 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3361 return PORT_BNC; 3362 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3363 return PORT_MII; 3364 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3365 return PORT_FIBRE; 3366 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3367 return PORT_DA; 3368 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3369 return PORT_OTHER; 3370 default: 3371 WARN_ON_ONCE(1); 3372 return PORT_OTHER; 3373 } 3374 } 3375 3376 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3377 struct ethtool_link_ksettings *cmd) 3378 { 3379 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3380 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3381 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3382 const struct mlxsw_sp_port_type_speed_ops *ops; 3383 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3384 u8 connector_type; 3385 bool autoneg; 3386 int err; 3387 3388 ops = mlxsw_sp->port_type_speed_ops; 3389 3390 autoneg = mlxsw_sp_port->link.autoneg; 3391 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3392 0, false); 3393 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3394 if (err) 3395 return err; 3396 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3397 ð_proto_admin, ð_proto_oper); 3398 3399 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3400 mlxsw_sp_port->mapping.width, cmd); 3401 3402 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3403 mlxsw_sp_port->mapping.width, cmd); 3404 3405 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3406 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3407 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3408 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3409 eth_proto_oper, cmd); 3410 3411 return 0; 3412 } 3413 3414 static int 3415 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3416 const struct ethtool_link_ksettings *cmd) 3417 { 3418 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3420 const struct mlxsw_sp_port_type_speed_ops *ops; 3421 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3422 u32 eth_proto_cap, eth_proto_new; 3423 bool autoneg; 3424 int err; 3425 3426 ops = mlxsw_sp->port_type_speed_ops; 3427 3428 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3429 0, false); 3430 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3431 if (err) 3432 return err; 3433 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3434 3435 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3436 eth_proto_new = autoneg ? 3437 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3438 cmd) : 3439 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3440 cmd->base.speed); 3441 3442 eth_proto_new = eth_proto_new & eth_proto_cap; 3443 if (!eth_proto_new) { 3444 netdev_err(dev, "No supported speed requested\n"); 3445 return -EINVAL; 3446 } 3447 3448 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3449 eth_proto_new, autoneg); 3450 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3451 if (err) 3452 return err; 3453 3454 mlxsw_sp_port->link.autoneg = autoneg; 3455 3456 if (!netif_running(dev)) 3457 return 0; 3458 3459 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3460 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3461 3462 return 0; 3463 } 3464 3465 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3466 struct ethtool_modinfo *modinfo) 3467 { 3468 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3469 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3470 int err; 3471 3472 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3473 mlxsw_sp_port->mapping.module, 3474 modinfo); 3475 3476 return err; 3477 } 3478 3479 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3480 struct ethtool_eeprom *ee, 3481 u8 *data) 3482 { 3483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3485 int err; 3486 3487 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3488 mlxsw_sp_port->mapping.module, ee, 3489 data); 3490 3491 return err; 3492 } 3493 3494 static int 3495 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3496 { 3497 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3499 3500 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3501 } 3502 3503 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3504 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3505 .get_link = ethtool_op_get_link, 3506 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3507 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3508 .get_strings = mlxsw_sp_port_get_strings, 3509 .set_phys_id = mlxsw_sp_port_set_phys_id, 3510 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3511 .get_sset_count = mlxsw_sp_port_get_sset_count, 3512 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3513 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3514 .get_module_info = mlxsw_sp_get_module_info, 3515 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3516 .get_ts_info = mlxsw_sp_get_ts_info, 3517 }; 3518 3519 static int 3520 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3521 { 3522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3523 const struct mlxsw_sp_port_type_speed_ops *ops; 3524 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3525 u32 eth_proto_admin; 3526 u32 upper_speed; 3527 u32 base_speed; 3528 int err; 3529 3530 ops = mlxsw_sp->port_type_speed_ops; 3531 3532 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3533 &base_speed); 3534 if (err) 3535 return err; 3536 upper_speed = base_speed * mlxsw_sp_port->mapping.width; 3537 3538 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3539 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3540 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3541 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3542 } 3543 3544 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3545 { 3546 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3547 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3548 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3549 u32 eth_proto_oper; 3550 int err; 3551 3552 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3553 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3554 mlxsw_sp_port->local_port, 0, 3555 false); 3556 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3557 if (err) 3558 return err; 3559 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3560 ð_proto_oper); 3561 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3562 return 0; 3563 } 3564 3565 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3566 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3567 bool dwrr, u8 dwrr_weight) 3568 { 3569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3570 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3571 3572 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3573 next_index); 3574 mlxsw_reg_qeec_de_set(qeec_pl, true); 3575 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3576 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3577 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3578 } 3579 3580 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3581 enum mlxsw_reg_qeec_hr hr, u8 index, 3582 u8 next_index, u32 maxrate, u8 burst_size) 3583 { 3584 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3585 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3586 3587 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3588 next_index); 3589 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3590 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3591 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3593 } 3594 3595 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3596 enum mlxsw_reg_qeec_hr hr, u8 index, 3597 u8 next_index, u32 minrate) 3598 { 3599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3600 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3601 3602 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3603 next_index); 3604 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3605 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3606 3607 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3608 } 3609 3610 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3611 u8 switch_prio, u8 tclass) 3612 { 3613 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3614 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3615 3616 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3617 tclass); 3618 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3619 } 3620 3621 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3622 { 3623 int err, i; 3624 3625 /* Setup the elements hierarcy, so that each TC is linked to 3626 * one subgroup, which are all member in the same group. 3627 */ 3628 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3629 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3630 if (err) 3631 return err; 3632 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3633 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3634 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3635 0, false, 0); 3636 if (err) 3637 return err; 3638 } 3639 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3640 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3641 MLXSW_REG_QEEC_HR_TC, i, i, 3642 false, 0); 3643 if (err) 3644 return err; 3645 3646 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3647 MLXSW_REG_QEEC_HR_TC, 3648 i + 8, i, 3649 true, 100); 3650 if (err) 3651 return err; 3652 } 3653 3654 /* Make sure the max shaper is disabled in all hierarchies that support 3655 * it. Note that this disables ptps (PTP shaper), but that is intended 3656 * for the initial configuration. 3657 */ 3658 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3659 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3660 MLXSW_REG_QEEC_MAS_DIS, 0); 3661 if (err) 3662 return err; 3663 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3664 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3665 MLXSW_REG_QEEC_HR_SUBGROUP, 3666 i, 0, 3667 MLXSW_REG_QEEC_MAS_DIS, 0); 3668 if (err) 3669 return err; 3670 } 3671 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3672 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3673 MLXSW_REG_QEEC_HR_TC, 3674 i, i, 3675 MLXSW_REG_QEEC_MAS_DIS, 0); 3676 if (err) 3677 return err; 3678 3679 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3680 MLXSW_REG_QEEC_HR_TC, 3681 i + 8, i, 3682 MLXSW_REG_QEEC_MAS_DIS, 0); 3683 if (err) 3684 return err; 3685 } 3686 3687 /* Configure the min shaper for multicast TCs. */ 3688 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3689 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3690 MLXSW_REG_QEEC_HR_TC, 3691 i + 8, i, 3692 MLXSW_REG_QEEC_MIS_MIN); 3693 if (err) 3694 return err; 3695 } 3696 3697 /* Map all priorities to traffic class 0. */ 3698 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3699 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3700 if (err) 3701 return err; 3702 } 3703 3704 return 0; 3705 } 3706 3707 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3708 bool enable) 3709 { 3710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3711 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3712 3713 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3715 } 3716 3717 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3718 u8 split_base_local_port, 3719 struct mlxsw_sp_port_mapping *port_mapping) 3720 { 3721 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3722 bool split = !!split_base_local_port; 3723 struct mlxsw_sp_port *mlxsw_sp_port; 3724 struct net_device *dev; 3725 int err; 3726 3727 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3728 port_mapping->module + 1, split, 3729 port_mapping->lane / port_mapping->width, 3730 mlxsw_sp->base_mac, 3731 sizeof(mlxsw_sp->base_mac)); 3732 if (err) { 3733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3734 local_port); 3735 return err; 3736 } 3737 3738 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3739 if (!dev) { 3740 err = -ENOMEM; 3741 goto err_alloc_etherdev; 3742 } 3743 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3744 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3745 mlxsw_sp_port = netdev_priv(dev); 3746 mlxsw_sp_port->dev = dev; 3747 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3748 mlxsw_sp_port->local_port = local_port; 3749 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3750 mlxsw_sp_port->split = split; 3751 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3752 mlxsw_sp_port->mapping = *port_mapping; 3753 mlxsw_sp_port->link.autoneg = 1; 3754 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3755 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3756 3757 mlxsw_sp_port->pcpu_stats = 3758 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3759 if (!mlxsw_sp_port->pcpu_stats) { 3760 err = -ENOMEM; 3761 goto err_alloc_stats; 3762 } 3763 3764 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3765 GFP_KERNEL); 3766 if (!mlxsw_sp_port->sample) { 3767 err = -ENOMEM; 3768 goto err_alloc_sample; 3769 } 3770 3771 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3772 &update_stats_cache); 3773 3774 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3775 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3776 3777 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3778 if (err) { 3779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3780 mlxsw_sp_port->local_port); 3781 goto err_port_module_map; 3782 } 3783 3784 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3785 if (err) { 3786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3787 mlxsw_sp_port->local_port); 3788 goto err_port_swid_set; 3789 } 3790 3791 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3792 if (err) { 3793 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3794 mlxsw_sp_port->local_port); 3795 goto err_dev_addr_init; 3796 } 3797 3798 netif_carrier_off(dev); 3799 3800 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3801 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3802 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3803 3804 dev->min_mtu = 0; 3805 dev->max_mtu = ETH_MAX_MTU; 3806 3807 /* Each packet needs to have a Tx header (metadata) on top all other 3808 * headers. 3809 */ 3810 dev->needed_headroom = MLXSW_TXHDR_LEN; 3811 3812 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3813 if (err) { 3814 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3815 mlxsw_sp_port->local_port); 3816 goto err_port_system_port_mapping_set; 3817 } 3818 3819 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3820 if (err) { 3821 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3822 mlxsw_sp_port->local_port); 3823 goto err_port_speed_by_width_set; 3824 } 3825 3826 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3827 if (err) { 3828 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3829 mlxsw_sp_port->local_port); 3830 goto err_port_mtu_set; 3831 } 3832 3833 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3834 if (err) 3835 goto err_port_admin_status_set; 3836 3837 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3838 if (err) { 3839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3840 mlxsw_sp_port->local_port); 3841 goto err_port_buffers_init; 3842 } 3843 3844 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3845 if (err) { 3846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3847 mlxsw_sp_port->local_port); 3848 goto err_port_ets_init; 3849 } 3850 3851 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3852 if (err) { 3853 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3854 mlxsw_sp_port->local_port); 3855 goto err_port_tc_mc_mode; 3856 } 3857 3858 /* ETS and buffers must be initialized before DCB. */ 3859 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3860 if (err) { 3861 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3862 mlxsw_sp_port->local_port); 3863 goto err_port_dcb_init; 3864 } 3865 3866 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3867 if (err) { 3868 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3869 mlxsw_sp_port->local_port); 3870 goto err_port_fids_init; 3871 } 3872 3873 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3874 if (err) { 3875 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3876 mlxsw_sp_port->local_port); 3877 goto err_port_qdiscs_init; 3878 } 3879 3880 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3881 false); 3882 if (err) { 3883 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3884 mlxsw_sp_port->local_port); 3885 goto err_port_vlan_clear; 3886 } 3887 3888 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3889 if (err) { 3890 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3891 mlxsw_sp_port->local_port); 3892 goto err_port_nve_init; 3893 } 3894 3895 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3896 if (err) { 3897 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3898 mlxsw_sp_port->local_port); 3899 goto err_port_pvid_set; 3900 } 3901 3902 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3903 MLXSW_SP_DEFAULT_VID); 3904 if (IS_ERR(mlxsw_sp_port_vlan)) { 3905 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3906 mlxsw_sp_port->local_port); 3907 err = PTR_ERR(mlxsw_sp_port_vlan); 3908 goto err_port_vlan_create; 3909 } 3910 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3911 3912 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3913 mlxsw_sp->ptp_ops->shaper_work); 3914 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3915 mlxsw_sp_span_speed_update_work); 3916 3917 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3918 err = register_netdev(dev); 3919 if (err) { 3920 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3921 mlxsw_sp_port->local_port); 3922 goto err_register_netdev; 3923 } 3924 3925 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3926 mlxsw_sp_port, dev); 3927 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3928 return 0; 3929 3930 err_register_netdev: 3931 mlxsw_sp->ports[local_port] = NULL; 3932 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3933 err_port_vlan_create: 3934 err_port_pvid_set: 3935 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3936 err_port_nve_init: 3937 err_port_vlan_clear: 3938 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3939 err_port_qdiscs_init: 3940 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3941 err_port_fids_init: 3942 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3943 err_port_dcb_init: 3944 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3945 err_port_tc_mc_mode: 3946 err_port_ets_init: 3947 err_port_buffers_init: 3948 err_port_admin_status_set: 3949 err_port_mtu_set: 3950 err_port_speed_by_width_set: 3951 err_port_system_port_mapping_set: 3952 err_dev_addr_init: 3953 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3954 err_port_swid_set: 3955 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3956 err_port_module_map: 3957 kfree(mlxsw_sp_port->sample); 3958 err_alloc_sample: 3959 free_percpu(mlxsw_sp_port->pcpu_stats); 3960 err_alloc_stats: 3961 free_netdev(dev); 3962 err_alloc_etherdev: 3963 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3964 return err; 3965 } 3966 3967 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3968 { 3969 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3970 3971 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3972 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3973 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3974 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3975 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3976 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3977 mlxsw_sp->ports[local_port] = NULL; 3978 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3979 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3980 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3981 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3982 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3983 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3984 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3985 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3986 kfree(mlxsw_sp_port->sample); 3987 free_percpu(mlxsw_sp_port->pcpu_stats); 3988 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3989 free_netdev(mlxsw_sp_port->dev); 3990 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3991 } 3992 3993 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3994 { 3995 struct mlxsw_sp_port *mlxsw_sp_port; 3996 int err; 3997 3998 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3999 if (!mlxsw_sp_port) 4000 return -ENOMEM; 4001 4002 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 4003 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 4004 4005 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 4006 mlxsw_sp_port, 4007 mlxsw_sp->base_mac, 4008 sizeof(mlxsw_sp->base_mac)); 4009 if (err) { 4010 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 4011 goto err_core_cpu_port_init; 4012 } 4013 4014 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 4015 return 0; 4016 4017 err_core_cpu_port_init: 4018 kfree(mlxsw_sp_port); 4019 return err; 4020 } 4021 4022 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 4023 { 4024 struct mlxsw_sp_port *mlxsw_sp_port = 4025 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 4026 4027 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 4028 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 4029 kfree(mlxsw_sp_port); 4030 } 4031 4032 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4033 { 4034 return mlxsw_sp->ports[local_port] != NULL; 4035 } 4036 4037 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 4038 { 4039 int i; 4040 4041 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4042 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4043 mlxsw_sp_port_remove(mlxsw_sp, i); 4044 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4045 kfree(mlxsw_sp->ports); 4046 } 4047 4048 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 4049 { 4050 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4051 struct mlxsw_sp_port_mapping *port_mapping; 4052 size_t alloc_size; 4053 int i; 4054 int err; 4055 4056 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4057 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4058 if (!mlxsw_sp->ports) 4059 return -ENOMEM; 4060 4061 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4062 if (err) 4063 goto err_cpu_port_create; 4064 4065 for (i = 1; i < max_ports; i++) { 4066 port_mapping = mlxsw_sp->port_mapping[i]; 4067 if (!port_mapping) 4068 continue; 4069 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4070 if (err) 4071 goto err_port_create; 4072 } 4073 return 0; 4074 4075 err_port_create: 4076 for (i--; i >= 1; i--) 4077 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4078 mlxsw_sp_port_remove(mlxsw_sp, i); 4079 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4080 err_cpu_port_create: 4081 kfree(mlxsw_sp->ports); 4082 return err; 4083 } 4084 4085 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4086 { 4087 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4088 struct mlxsw_sp_port_mapping port_mapping; 4089 int i; 4090 int err; 4091 4092 mlxsw_sp->port_mapping = kcalloc(max_ports, 4093 sizeof(struct mlxsw_sp_port_mapping *), 4094 GFP_KERNEL); 4095 if (!mlxsw_sp->port_mapping) 4096 return -ENOMEM; 4097 4098 for (i = 1; i < max_ports; i++) { 4099 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4100 if (err) 4101 goto err_port_module_info_get; 4102 if (!port_mapping.width) 4103 continue; 4104 4105 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4106 sizeof(port_mapping), 4107 GFP_KERNEL); 4108 if (!mlxsw_sp->port_mapping[i]) { 4109 err = -ENOMEM; 4110 goto err_port_module_info_dup; 4111 } 4112 } 4113 return 0; 4114 4115 err_port_module_info_get: 4116 err_port_module_info_dup: 4117 for (i--; i >= 1; i--) 4118 kfree(mlxsw_sp->port_mapping[i]); 4119 kfree(mlxsw_sp->port_mapping); 4120 return err; 4121 } 4122 4123 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4124 { 4125 int i; 4126 4127 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4128 kfree(mlxsw_sp->port_mapping[i]); 4129 kfree(mlxsw_sp->port_mapping); 4130 } 4131 4132 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4133 { 4134 u8 offset = (local_port - 1) % max_width; 4135 4136 return local_port - offset; 4137 } 4138 4139 static int 4140 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4141 struct mlxsw_sp_port_mapping *port_mapping, 4142 unsigned int count, u8 offset) 4143 { 4144 struct mlxsw_sp_port_mapping split_port_mapping; 4145 int err, i; 4146 4147 split_port_mapping = *port_mapping; 4148 split_port_mapping.width /= count; 4149 for (i = 0; i < count; i++) { 4150 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4151 base_port, &split_port_mapping); 4152 if (err) 4153 goto err_port_create; 4154 split_port_mapping.lane += split_port_mapping.width; 4155 } 4156 4157 return 0; 4158 4159 err_port_create: 4160 for (i--; i >= 0; i--) 4161 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4162 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4163 return err; 4164 } 4165 4166 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4167 u8 base_port, 4168 unsigned int count, u8 offset) 4169 { 4170 struct mlxsw_sp_port_mapping *port_mapping; 4171 int i; 4172 4173 /* Go over original unsplit ports in the gap and recreate them. */ 4174 for (i = 0; i < count * offset; i++) { 4175 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4176 if (!port_mapping) 4177 continue; 4178 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4179 } 4180 } 4181 4182 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4183 unsigned int count, 4184 unsigned int max_width) 4185 { 4186 enum mlxsw_res_id local_ports_in_x_res_id; 4187 int split_width = max_width / count; 4188 4189 if (split_width == 1) 4190 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4191 else if (split_width == 2) 4192 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4193 else if (split_width == 4) 4194 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4195 else 4196 return -EINVAL; 4197 4198 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4199 return -EINVAL; 4200 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4201 } 4202 4203 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4204 unsigned int count, 4205 struct netlink_ext_ack *extack) 4206 { 4207 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4208 struct mlxsw_sp_port_mapping port_mapping; 4209 struct mlxsw_sp_port *mlxsw_sp_port; 4210 int max_width; 4211 u8 base_port; 4212 int offset; 4213 int i; 4214 int err; 4215 4216 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4217 if (!mlxsw_sp_port) { 4218 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4219 local_port); 4220 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4221 return -EINVAL; 4222 } 4223 4224 /* Split ports cannot be split. */ 4225 if (mlxsw_sp_port->split) { 4226 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4227 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4228 return -EINVAL; 4229 } 4230 4231 max_width = mlxsw_core_module_max_width(mlxsw_core, 4232 mlxsw_sp_port->mapping.module); 4233 if (max_width < 0) { 4234 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4235 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4236 return max_width; 4237 } 4238 4239 /* Split port with non-max and 1 module width cannot be split. */ 4240 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4241 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4242 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4243 return -EINVAL; 4244 } 4245 4246 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4247 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4248 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4249 return -EINVAL; 4250 } 4251 4252 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4253 if (offset < 0) { 4254 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4255 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4256 return -EINVAL; 4257 } 4258 4259 /* Only in case max split is being done, the local port and 4260 * base port may differ. 4261 */ 4262 base_port = count == max_width ? 4263 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4264 local_port; 4265 4266 for (i = 0; i < count * offset; i++) { 4267 /* Expect base port to exist and also the one in the middle in 4268 * case of maximal split count. 4269 */ 4270 if (i == 0 || (count == max_width && i == count / 2)) 4271 continue; 4272 4273 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4274 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4275 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4276 return -EINVAL; 4277 } 4278 } 4279 4280 port_mapping = mlxsw_sp_port->mapping; 4281 4282 for (i = 0; i < count; i++) 4283 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4284 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4285 4286 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4287 count, offset); 4288 if (err) { 4289 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4290 goto err_port_split_create; 4291 } 4292 4293 return 0; 4294 4295 err_port_split_create: 4296 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4297 return err; 4298 } 4299 4300 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4301 struct netlink_ext_ack *extack) 4302 { 4303 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4304 struct mlxsw_sp_port *mlxsw_sp_port; 4305 unsigned int count; 4306 int max_width; 4307 u8 base_port; 4308 int offset; 4309 int i; 4310 4311 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4312 if (!mlxsw_sp_port) { 4313 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4314 local_port); 4315 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4316 return -EINVAL; 4317 } 4318 4319 if (!mlxsw_sp_port->split) { 4320 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4321 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4322 return -EINVAL; 4323 } 4324 4325 max_width = mlxsw_core_module_max_width(mlxsw_core, 4326 mlxsw_sp_port->mapping.module); 4327 if (max_width < 0) { 4328 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4329 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4330 return max_width; 4331 } 4332 4333 count = max_width / mlxsw_sp_port->mapping.width; 4334 4335 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4336 if (WARN_ON(offset < 0)) { 4337 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4338 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4339 return -EINVAL; 4340 } 4341 4342 base_port = mlxsw_sp_port->split_base_local_port; 4343 4344 for (i = 0; i < count; i++) 4345 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4346 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4347 4348 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4349 4350 return 0; 4351 } 4352 4353 static void 4354 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 4355 { 4356 int i; 4357 4358 for (i = 0; i < TC_MAX_QUEUE; i++) 4359 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 4360 } 4361 4362 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4363 char *pude_pl, void *priv) 4364 { 4365 struct mlxsw_sp *mlxsw_sp = priv; 4366 struct mlxsw_sp_port *mlxsw_sp_port; 4367 enum mlxsw_reg_pude_oper_status status; 4368 u8 local_port; 4369 4370 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4371 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4372 if (!mlxsw_sp_port) 4373 return; 4374 4375 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4376 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4377 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4378 netif_carrier_on(mlxsw_sp_port->dev); 4379 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4380 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 4381 } else { 4382 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4383 netif_carrier_off(mlxsw_sp_port->dev); 4384 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 4385 } 4386 } 4387 4388 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4389 char *mtpptr_pl, bool ingress) 4390 { 4391 u8 local_port; 4392 u8 num_rec; 4393 int i; 4394 4395 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4396 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4397 for (i = 0; i < num_rec; i++) { 4398 u8 domain_number; 4399 u8 message_type; 4400 u16 sequence_id; 4401 u64 timestamp; 4402 4403 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4404 &domain_number, &sequence_id, 4405 ×tamp); 4406 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4407 message_type, domain_number, 4408 sequence_id, timestamp); 4409 } 4410 } 4411 4412 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4413 char *mtpptr_pl, void *priv) 4414 { 4415 struct mlxsw_sp *mlxsw_sp = priv; 4416 4417 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4418 } 4419 4420 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4421 char *mtpptr_pl, void *priv) 4422 { 4423 struct mlxsw_sp *mlxsw_sp = priv; 4424 4425 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4426 } 4427 4428 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4429 u8 local_port, void *priv) 4430 { 4431 struct mlxsw_sp *mlxsw_sp = priv; 4432 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4433 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4434 4435 if (unlikely(!mlxsw_sp_port)) { 4436 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4437 local_port); 4438 return; 4439 } 4440 4441 skb->dev = mlxsw_sp_port->dev; 4442 4443 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4444 u64_stats_update_begin(&pcpu_stats->syncp); 4445 pcpu_stats->rx_packets++; 4446 pcpu_stats->rx_bytes += skb->len; 4447 u64_stats_update_end(&pcpu_stats->syncp); 4448 4449 skb->protocol = eth_type_trans(skb, skb->dev); 4450 netif_receive_skb(skb); 4451 } 4452 4453 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4454 void *priv) 4455 { 4456 skb->offload_fwd_mark = 1; 4457 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4458 } 4459 4460 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4461 u8 local_port, void *priv) 4462 { 4463 skb->offload_l3_fwd_mark = 1; 4464 skb->offload_fwd_mark = 1; 4465 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4466 } 4467 4468 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4469 void *priv) 4470 { 4471 struct mlxsw_sp *mlxsw_sp = priv; 4472 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4473 struct psample_group *psample_group; 4474 u32 size; 4475 4476 if (unlikely(!mlxsw_sp_port)) { 4477 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4478 local_port); 4479 goto out; 4480 } 4481 if (unlikely(!mlxsw_sp_port->sample)) { 4482 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4483 local_port); 4484 goto out; 4485 } 4486 4487 size = mlxsw_sp_port->sample->truncate ? 4488 mlxsw_sp_port->sample->trunc_size : skb->len; 4489 4490 rcu_read_lock(); 4491 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4492 if (!psample_group) 4493 goto out_unlock; 4494 psample_sample_packet(psample_group, skb, size, 4495 mlxsw_sp_port->dev->ifindex, 0, 4496 mlxsw_sp_port->sample->rate); 4497 out_unlock: 4498 rcu_read_unlock(); 4499 out: 4500 consume_skb(skb); 4501 } 4502 4503 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4504 void *priv) 4505 { 4506 struct mlxsw_sp *mlxsw_sp = priv; 4507 4508 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4509 } 4510 4511 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4512 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4513 _is_ctrl, SP_##_trap_group, DISCARD) 4514 4515 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4516 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4517 _is_ctrl, SP_##_trap_group, DISCARD) 4518 4519 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4520 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4521 _is_ctrl, SP_##_trap_group, DISCARD) 4522 4523 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4524 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4525 4526 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4527 /* Events */ 4528 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4529 /* L2 traps */ 4530 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4531 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4532 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4533 false, SP_LLDP, DISCARD), 4534 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4535 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4536 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4537 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4538 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4539 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4540 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4541 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4542 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4543 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4544 false), 4545 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4546 false), 4547 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4548 false), 4549 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4550 false), 4551 /* L3 traps */ 4552 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4553 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4554 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4555 false), 4556 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4557 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4558 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4559 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4560 false), 4561 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4562 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4563 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4564 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4565 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4566 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4567 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4568 false), 4569 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4570 false), 4571 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4572 false), 4573 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4574 false), 4575 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4576 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4577 false), 4578 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4579 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4580 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4581 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4582 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4583 ROUTER_EXP, false), 4584 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4585 ROUTER_EXP, false), 4586 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4587 ROUTER_EXP, false), 4588 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4589 ROUTER_EXP, false), 4590 /* PKT Sample trap */ 4591 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4592 false, SP_IP2ME, DISCARD), 4593 /* ACL trap */ 4594 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4595 /* Multicast Router Traps */ 4596 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4597 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4598 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4599 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4600 /* NVE traps */ 4601 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4602 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4603 /* PTP traps */ 4604 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4605 false, SP_PTP0, DISCARD), 4606 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4607 }; 4608 4609 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4610 /* Events */ 4611 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4612 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4613 }; 4614 4615 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4616 { 4617 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4618 enum mlxsw_reg_qpcr_ir_units ir_units; 4619 int max_cpu_policers; 4620 bool is_bytes; 4621 u8 burst_size; 4622 u32 rate; 4623 int i, err; 4624 4625 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4626 return -EIO; 4627 4628 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4629 4630 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4631 for (i = 0; i < max_cpu_policers; i++) { 4632 is_bytes = false; 4633 switch (i) { 4634 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4635 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4636 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4637 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4638 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4639 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4640 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4641 rate = 128; 4642 burst_size = 7; 4643 break; 4644 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4645 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4646 rate = 16 * 1024; 4647 burst_size = 10; 4648 break; 4649 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4650 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4651 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4652 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4653 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4654 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4655 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4656 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4657 rate = 1024; 4658 burst_size = 7; 4659 break; 4660 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4661 rate = 1024; 4662 burst_size = 7; 4663 break; 4664 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4665 rate = 24 * 1024; 4666 burst_size = 12; 4667 break; 4668 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4669 rate = 19 * 1024; 4670 burst_size = 12; 4671 break; 4672 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4673 rate = 360; 4674 burst_size = 7; 4675 break; 4676 default: 4677 continue; 4678 } 4679 4680 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4681 burst_size); 4682 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4683 if (err) 4684 return err; 4685 } 4686 4687 return 0; 4688 } 4689 4690 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4691 { 4692 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4693 enum mlxsw_reg_htgt_trap_group i; 4694 int max_cpu_policers; 4695 int max_trap_groups; 4696 u8 priority, tc; 4697 u16 policer_id; 4698 int err; 4699 4700 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4701 return -EIO; 4702 4703 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4704 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4705 4706 for (i = 0; i < max_trap_groups; i++) { 4707 policer_id = i; 4708 switch (i) { 4709 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4710 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4711 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4712 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4713 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4714 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4715 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4716 priority = 5; 4717 tc = 5; 4718 break; 4719 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4720 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4721 priority = 4; 4722 tc = 4; 4723 break; 4724 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4725 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4726 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4727 priority = 3; 4728 tc = 3; 4729 break; 4730 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4731 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4732 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4733 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4734 priority = 2; 4735 tc = 2; 4736 break; 4737 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4738 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4739 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4740 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4741 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4742 priority = 1; 4743 tc = 1; 4744 break; 4745 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4746 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4747 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4748 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4749 break; 4750 default: 4751 continue; 4752 } 4753 4754 if (max_cpu_policers <= policer_id && 4755 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4756 return -EIO; 4757 4758 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4759 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4760 if (err) 4761 return err; 4762 } 4763 4764 return 0; 4765 } 4766 4767 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4768 const struct mlxsw_listener listeners[], 4769 size_t listeners_count) 4770 { 4771 int i; 4772 int err; 4773 4774 for (i = 0; i < listeners_count; i++) { 4775 err = mlxsw_core_trap_register(mlxsw_sp->core, 4776 &listeners[i], 4777 mlxsw_sp); 4778 if (err) 4779 goto err_listener_register; 4780 4781 } 4782 return 0; 4783 4784 err_listener_register: 4785 for (i--; i >= 0; i--) { 4786 mlxsw_core_trap_unregister(mlxsw_sp->core, 4787 &listeners[i], 4788 mlxsw_sp); 4789 } 4790 return err; 4791 } 4792 4793 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4794 const struct mlxsw_listener listeners[], 4795 size_t listeners_count) 4796 { 4797 int i; 4798 4799 for (i = 0; i < listeners_count; i++) { 4800 mlxsw_core_trap_unregister(mlxsw_sp->core, 4801 &listeners[i], 4802 mlxsw_sp); 4803 } 4804 } 4805 4806 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4807 { 4808 int err; 4809 4810 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4811 if (err) 4812 return err; 4813 4814 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4815 if (err) 4816 return err; 4817 4818 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4819 ARRAY_SIZE(mlxsw_sp_listener)); 4820 if (err) 4821 return err; 4822 4823 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4824 mlxsw_sp->listeners_count); 4825 if (err) 4826 goto err_extra_traps_init; 4827 4828 return 0; 4829 4830 err_extra_traps_init: 4831 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4832 ARRAY_SIZE(mlxsw_sp_listener)); 4833 return err; 4834 } 4835 4836 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4837 { 4838 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4839 mlxsw_sp->listeners_count); 4840 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4841 ARRAY_SIZE(mlxsw_sp_listener)); 4842 } 4843 4844 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4845 4846 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4847 { 4848 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4849 u32 seed; 4850 int err; 4851 4852 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4853 MLXSW_SP_LAG_SEED_INIT); 4854 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4855 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4856 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4857 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4858 MLXSW_REG_SLCR_LAG_HASH_SIP | 4859 MLXSW_REG_SLCR_LAG_HASH_DIP | 4860 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4861 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4862 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4863 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4864 if (err) 4865 return err; 4866 4867 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4868 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4869 return -EIO; 4870 4871 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4872 sizeof(struct mlxsw_sp_upper), 4873 GFP_KERNEL); 4874 if (!mlxsw_sp->lags) 4875 return -ENOMEM; 4876 4877 return 0; 4878 } 4879 4880 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4881 { 4882 kfree(mlxsw_sp->lags); 4883 } 4884 4885 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4886 { 4887 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4888 4889 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4890 MLXSW_REG_HTGT_INVALID_POLICER, 4891 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4892 MLXSW_REG_HTGT_DEFAULT_TC); 4893 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4894 } 4895 4896 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4897 .clock_init = mlxsw_sp1_ptp_clock_init, 4898 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4899 .init = mlxsw_sp1_ptp_init, 4900 .fini = mlxsw_sp1_ptp_fini, 4901 .receive = mlxsw_sp1_ptp_receive, 4902 .transmitted = mlxsw_sp1_ptp_transmitted, 4903 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4904 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4905 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4906 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4907 .get_stats_count = mlxsw_sp1_get_stats_count, 4908 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4909 .get_stats = mlxsw_sp1_get_stats, 4910 }; 4911 4912 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4913 .clock_init = mlxsw_sp2_ptp_clock_init, 4914 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4915 .init = mlxsw_sp2_ptp_init, 4916 .fini = mlxsw_sp2_ptp_fini, 4917 .receive = mlxsw_sp2_ptp_receive, 4918 .transmitted = mlxsw_sp2_ptp_transmitted, 4919 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4920 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4921 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4922 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4923 .get_stats_count = mlxsw_sp2_get_stats_count, 4924 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4925 .get_stats = mlxsw_sp2_get_stats, 4926 }; 4927 4928 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4929 { 4930 return mtu * 5 / 2; 4931 } 4932 4933 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4934 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4935 }; 4936 4937 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4938 4939 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4940 { 4941 return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000; 4942 } 4943 4944 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4945 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4946 }; 4947 4948 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4949 { 4950 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4951 4952 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4953 } 4954 4955 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4956 unsigned long event, void *ptr); 4957 4958 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4959 const struct mlxsw_bus_info *mlxsw_bus_info, 4960 struct netlink_ext_ack *extack) 4961 { 4962 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4963 int err; 4964 4965 mlxsw_sp->core = mlxsw_core; 4966 mlxsw_sp->bus_info = mlxsw_bus_info; 4967 4968 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4969 if (err) 4970 return err; 4971 4972 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4973 4974 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4975 if (err) { 4976 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4977 return err; 4978 } 4979 4980 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4981 if (err) { 4982 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4983 return err; 4984 } 4985 4986 err = mlxsw_sp_fids_init(mlxsw_sp); 4987 if (err) { 4988 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4989 goto err_fids_init; 4990 } 4991 4992 err = mlxsw_sp_traps_init(mlxsw_sp); 4993 if (err) { 4994 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4995 goto err_traps_init; 4996 } 4997 4998 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4999 if (err) { 5000 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 5001 goto err_devlink_traps_init; 5002 } 5003 5004 err = mlxsw_sp_buffers_init(mlxsw_sp); 5005 if (err) { 5006 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 5007 goto err_buffers_init; 5008 } 5009 5010 err = mlxsw_sp_lag_init(mlxsw_sp); 5011 if (err) { 5012 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 5013 goto err_lag_init; 5014 } 5015 5016 /* Initialize SPAN before router and switchdev, so that those components 5017 * can call mlxsw_sp_span_respin(). 5018 */ 5019 err = mlxsw_sp_span_init(mlxsw_sp); 5020 if (err) { 5021 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 5022 goto err_span_init; 5023 } 5024 5025 err = mlxsw_sp_switchdev_init(mlxsw_sp); 5026 if (err) { 5027 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 5028 goto err_switchdev_init; 5029 } 5030 5031 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 5032 if (err) { 5033 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 5034 goto err_counter_pool_init; 5035 } 5036 5037 err = mlxsw_sp_afa_init(mlxsw_sp); 5038 if (err) { 5039 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 5040 goto err_afa_init; 5041 } 5042 5043 err = mlxsw_sp_nve_init(mlxsw_sp); 5044 if (err) { 5045 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 5046 goto err_nve_init; 5047 } 5048 5049 err = mlxsw_sp_acl_init(mlxsw_sp); 5050 if (err) { 5051 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 5052 goto err_acl_init; 5053 } 5054 5055 err = mlxsw_sp_router_init(mlxsw_sp, extack); 5056 if (err) { 5057 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 5058 goto err_router_init; 5059 } 5060 5061 if (mlxsw_sp->bus_info->read_frc_capable) { 5062 /* NULL is a valid return value from clock_init */ 5063 mlxsw_sp->clock = 5064 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 5065 mlxsw_sp->bus_info->dev); 5066 if (IS_ERR(mlxsw_sp->clock)) { 5067 err = PTR_ERR(mlxsw_sp->clock); 5068 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 5069 goto err_ptp_clock_init; 5070 } 5071 } 5072 5073 if (mlxsw_sp->clock) { 5074 /* NULL is a valid return value from ptp_ops->init */ 5075 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5076 if (IS_ERR(mlxsw_sp->ptp_state)) { 5077 err = PTR_ERR(mlxsw_sp->ptp_state); 5078 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5079 goto err_ptp_init; 5080 } 5081 } 5082 5083 /* Initialize netdevice notifier after router and SPAN is initialized, 5084 * so that the event handler can use router structures and call SPAN 5085 * respin. 5086 */ 5087 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5088 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5089 &mlxsw_sp->netdevice_nb); 5090 if (err) { 5091 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5092 goto err_netdev_notifier; 5093 } 5094 5095 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5096 if (err) { 5097 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5098 goto err_dpipe_init; 5099 } 5100 5101 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5102 if (err) { 5103 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5104 goto err_port_module_info_init; 5105 } 5106 5107 err = mlxsw_sp_ports_create(mlxsw_sp); 5108 if (err) { 5109 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5110 goto err_ports_create; 5111 } 5112 5113 return 0; 5114 5115 err_ports_create: 5116 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5117 err_port_module_info_init: 5118 mlxsw_sp_dpipe_fini(mlxsw_sp); 5119 err_dpipe_init: 5120 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5121 &mlxsw_sp->netdevice_nb); 5122 err_netdev_notifier: 5123 if (mlxsw_sp->clock) 5124 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5125 err_ptp_init: 5126 if (mlxsw_sp->clock) 5127 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5128 err_ptp_clock_init: 5129 mlxsw_sp_router_fini(mlxsw_sp); 5130 err_router_init: 5131 mlxsw_sp_acl_fini(mlxsw_sp); 5132 err_acl_init: 5133 mlxsw_sp_nve_fini(mlxsw_sp); 5134 err_nve_init: 5135 mlxsw_sp_afa_fini(mlxsw_sp); 5136 err_afa_init: 5137 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5138 err_counter_pool_init: 5139 mlxsw_sp_switchdev_fini(mlxsw_sp); 5140 err_switchdev_init: 5141 mlxsw_sp_span_fini(mlxsw_sp); 5142 err_span_init: 5143 mlxsw_sp_lag_fini(mlxsw_sp); 5144 err_lag_init: 5145 mlxsw_sp_buffers_fini(mlxsw_sp); 5146 err_buffers_init: 5147 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5148 err_devlink_traps_init: 5149 mlxsw_sp_traps_fini(mlxsw_sp); 5150 err_traps_init: 5151 mlxsw_sp_fids_fini(mlxsw_sp); 5152 err_fids_init: 5153 mlxsw_sp_kvdl_fini(mlxsw_sp); 5154 return err; 5155 } 5156 5157 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5158 const struct mlxsw_bus_info *mlxsw_bus_info, 5159 struct netlink_ext_ack *extack) 5160 { 5161 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5162 5163 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5164 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5165 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5166 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5167 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5168 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5169 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5170 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5171 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5172 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5173 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5174 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5175 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5176 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 5177 mlxsw_sp->listeners = mlxsw_sp1_listener; 5178 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5179 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 5180 5181 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5182 } 5183 5184 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5185 const struct mlxsw_bus_info *mlxsw_bus_info, 5186 struct netlink_ext_ack *extack) 5187 { 5188 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5189 5190 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5191 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5192 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5193 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5194 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5195 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5196 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5197 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5198 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5199 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5200 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5201 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5202 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5203 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 5204 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 5205 5206 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5207 } 5208 5209 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 5210 const struct mlxsw_bus_info *mlxsw_bus_info, 5211 struct netlink_ext_ack *extack) 5212 { 5213 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5214 5215 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5216 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5217 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5218 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5219 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5220 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5221 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5222 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5223 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5224 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5225 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5226 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 5227 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 5228 5229 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5230 } 5231 5232 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5233 { 5234 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5235 5236 mlxsw_sp_ports_remove(mlxsw_sp); 5237 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5238 mlxsw_sp_dpipe_fini(mlxsw_sp); 5239 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5240 &mlxsw_sp->netdevice_nb); 5241 if (mlxsw_sp->clock) { 5242 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5243 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5244 } 5245 mlxsw_sp_router_fini(mlxsw_sp); 5246 mlxsw_sp_acl_fini(mlxsw_sp); 5247 mlxsw_sp_nve_fini(mlxsw_sp); 5248 mlxsw_sp_afa_fini(mlxsw_sp); 5249 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5250 mlxsw_sp_switchdev_fini(mlxsw_sp); 5251 mlxsw_sp_span_fini(mlxsw_sp); 5252 mlxsw_sp_lag_fini(mlxsw_sp); 5253 mlxsw_sp_buffers_fini(mlxsw_sp); 5254 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5255 mlxsw_sp_traps_fini(mlxsw_sp); 5256 mlxsw_sp_fids_fini(mlxsw_sp); 5257 mlxsw_sp_kvdl_fini(mlxsw_sp); 5258 } 5259 5260 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5261 * 802.1Q FIDs 5262 */ 5263 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5264 VLAN_VID_MASK - 1) 5265 5266 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5267 .used_max_mid = 1, 5268 .max_mid = MLXSW_SP_MID_MAX, 5269 .used_flood_tables = 1, 5270 .used_flood_mode = 1, 5271 .flood_mode = 3, 5272 .max_fid_flood_tables = 3, 5273 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5274 .used_max_ib_mc = 1, 5275 .max_ib_mc = 0, 5276 .used_max_pkey = 1, 5277 .max_pkey = 0, 5278 .used_kvd_sizes = 1, 5279 .kvd_hash_single_parts = 59, 5280 .kvd_hash_double_parts = 41, 5281 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5282 .swid_config = { 5283 { 5284 .used_type = 1, 5285 .type = MLXSW_PORT_SWID_TYPE_ETH, 5286 } 5287 }, 5288 }; 5289 5290 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5291 .used_max_mid = 1, 5292 .max_mid = MLXSW_SP_MID_MAX, 5293 .used_flood_tables = 1, 5294 .used_flood_mode = 1, 5295 .flood_mode = 3, 5296 .max_fid_flood_tables = 3, 5297 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5298 .used_max_ib_mc = 1, 5299 .max_ib_mc = 0, 5300 .used_max_pkey = 1, 5301 .max_pkey = 0, 5302 .swid_config = { 5303 { 5304 .used_type = 1, 5305 .type = MLXSW_PORT_SWID_TYPE_ETH, 5306 } 5307 }, 5308 }; 5309 5310 static void 5311 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5312 struct devlink_resource_size_params *kvd_size_params, 5313 struct devlink_resource_size_params *linear_size_params, 5314 struct devlink_resource_size_params *hash_double_size_params, 5315 struct devlink_resource_size_params *hash_single_size_params) 5316 { 5317 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5318 KVD_SINGLE_MIN_SIZE); 5319 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5320 KVD_DOUBLE_MIN_SIZE); 5321 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5322 u32 linear_size_min = 0; 5323 5324 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5325 MLXSW_SP_KVD_GRANULARITY, 5326 DEVLINK_RESOURCE_UNIT_ENTRY); 5327 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5328 kvd_size - single_size_min - 5329 double_size_min, 5330 MLXSW_SP_KVD_GRANULARITY, 5331 DEVLINK_RESOURCE_UNIT_ENTRY); 5332 devlink_resource_size_params_init(hash_double_size_params, 5333 double_size_min, 5334 kvd_size - single_size_min - 5335 linear_size_min, 5336 MLXSW_SP_KVD_GRANULARITY, 5337 DEVLINK_RESOURCE_UNIT_ENTRY); 5338 devlink_resource_size_params_init(hash_single_size_params, 5339 single_size_min, 5340 kvd_size - double_size_min - 5341 linear_size_min, 5342 MLXSW_SP_KVD_GRANULARITY, 5343 DEVLINK_RESOURCE_UNIT_ENTRY); 5344 } 5345 5346 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5347 { 5348 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5349 struct devlink_resource_size_params hash_single_size_params; 5350 struct devlink_resource_size_params hash_double_size_params; 5351 struct devlink_resource_size_params linear_size_params; 5352 struct devlink_resource_size_params kvd_size_params; 5353 u32 kvd_size, single_size, double_size, linear_size; 5354 const struct mlxsw_config_profile *profile; 5355 int err; 5356 5357 profile = &mlxsw_sp1_config_profile; 5358 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5359 return -EIO; 5360 5361 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5362 &linear_size_params, 5363 &hash_double_size_params, 5364 &hash_single_size_params); 5365 5366 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5367 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5368 kvd_size, MLXSW_SP_RESOURCE_KVD, 5369 DEVLINK_RESOURCE_ID_PARENT_TOP, 5370 &kvd_size_params); 5371 if (err) 5372 return err; 5373 5374 linear_size = profile->kvd_linear_size; 5375 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5376 linear_size, 5377 MLXSW_SP_RESOURCE_KVD_LINEAR, 5378 MLXSW_SP_RESOURCE_KVD, 5379 &linear_size_params); 5380 if (err) 5381 return err; 5382 5383 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5384 if (err) 5385 return err; 5386 5387 double_size = kvd_size - linear_size; 5388 double_size *= profile->kvd_hash_double_parts; 5389 double_size /= profile->kvd_hash_double_parts + 5390 profile->kvd_hash_single_parts; 5391 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5392 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5393 double_size, 5394 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5395 MLXSW_SP_RESOURCE_KVD, 5396 &hash_double_size_params); 5397 if (err) 5398 return err; 5399 5400 single_size = kvd_size - double_size - linear_size; 5401 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5402 single_size, 5403 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5404 MLXSW_SP_RESOURCE_KVD, 5405 &hash_single_size_params); 5406 if (err) 5407 return err; 5408 5409 return 0; 5410 } 5411 5412 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5413 { 5414 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5415 struct devlink_resource_size_params kvd_size_params; 5416 u32 kvd_size; 5417 5418 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5419 return -EIO; 5420 5421 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5422 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5423 MLXSW_SP_KVD_GRANULARITY, 5424 DEVLINK_RESOURCE_UNIT_ENTRY); 5425 5426 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5427 kvd_size, MLXSW_SP_RESOURCE_KVD, 5428 DEVLINK_RESOURCE_ID_PARENT_TOP, 5429 &kvd_size_params); 5430 } 5431 5432 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5433 { 5434 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5435 struct devlink_resource_size_params span_size_params; 5436 u32 max_span; 5437 5438 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5439 return -EIO; 5440 5441 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5442 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5443 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5444 5445 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5446 max_span, MLXSW_SP_RESOURCE_SPAN, 5447 DEVLINK_RESOURCE_ID_PARENT_TOP, 5448 &span_size_params); 5449 } 5450 5451 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5452 { 5453 int err; 5454 5455 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5456 if (err) 5457 return err; 5458 5459 err = mlxsw_sp_resources_span_register(mlxsw_core); 5460 if (err) 5461 goto err_resources_span_register; 5462 5463 return 0; 5464 5465 err_resources_span_register: 5466 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5467 return err; 5468 } 5469 5470 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5471 { 5472 int err; 5473 5474 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5475 if (err) 5476 return err; 5477 5478 err = mlxsw_sp_resources_span_register(mlxsw_core); 5479 if (err) 5480 goto err_resources_span_register; 5481 5482 return 0; 5483 5484 err_resources_span_register: 5485 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5486 return err; 5487 } 5488 5489 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5490 const struct mlxsw_config_profile *profile, 5491 u64 *p_single_size, u64 *p_double_size, 5492 u64 *p_linear_size) 5493 { 5494 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5495 u32 double_size; 5496 int err; 5497 5498 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5499 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5500 return -EIO; 5501 5502 /* The hash part is what left of the kvd without the 5503 * linear part. It is split to the single size and 5504 * double size by the parts ratio from the profile. 5505 * Both sizes must be a multiplications of the 5506 * granularity from the profile. In case the user 5507 * provided the sizes they are obtained via devlink. 5508 */ 5509 err = devlink_resource_size_get(devlink, 5510 MLXSW_SP_RESOURCE_KVD_LINEAR, 5511 p_linear_size); 5512 if (err) 5513 *p_linear_size = profile->kvd_linear_size; 5514 5515 err = devlink_resource_size_get(devlink, 5516 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5517 p_double_size); 5518 if (err) { 5519 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5520 *p_linear_size; 5521 double_size *= profile->kvd_hash_double_parts; 5522 double_size /= profile->kvd_hash_double_parts + 5523 profile->kvd_hash_single_parts; 5524 *p_double_size = rounddown(double_size, 5525 MLXSW_SP_KVD_GRANULARITY); 5526 } 5527 5528 err = devlink_resource_size_get(devlink, 5529 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5530 p_single_size); 5531 if (err) 5532 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5533 *p_double_size - *p_linear_size; 5534 5535 /* Check results are legal. */ 5536 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5537 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5538 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5539 return -EIO; 5540 5541 return 0; 5542 } 5543 5544 static int 5545 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5546 union devlink_param_value val, 5547 struct netlink_ext_ack *extack) 5548 { 5549 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5550 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5551 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5552 return -EINVAL; 5553 } 5554 5555 return 0; 5556 } 5557 5558 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5559 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5560 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5561 NULL, NULL, 5562 mlxsw_sp_devlink_param_fw_load_policy_validate), 5563 }; 5564 5565 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5566 { 5567 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5568 union devlink_param_value value; 5569 int err; 5570 5571 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5572 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5573 if (err) 5574 return err; 5575 5576 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5577 devlink_param_driverinit_value_set(devlink, 5578 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5579 value); 5580 return 0; 5581 } 5582 5583 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5584 { 5585 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5586 mlxsw_sp_devlink_params, 5587 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5588 } 5589 5590 static int 5591 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5592 struct devlink_param_gset_ctx *ctx) 5593 { 5594 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5595 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5596 5597 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5598 return 0; 5599 } 5600 5601 static int 5602 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5603 struct devlink_param_gset_ctx *ctx) 5604 { 5605 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5606 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5607 5608 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5609 } 5610 5611 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5612 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5613 "acl_region_rehash_interval", 5614 DEVLINK_PARAM_TYPE_U32, 5615 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5616 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5617 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5618 NULL), 5619 }; 5620 5621 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5622 { 5623 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5624 union devlink_param_value value; 5625 int err; 5626 5627 err = mlxsw_sp_params_register(mlxsw_core); 5628 if (err) 5629 return err; 5630 5631 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5632 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5633 if (err) 5634 goto err_devlink_params_register; 5635 5636 value.vu32 = 0; 5637 devlink_param_driverinit_value_set(devlink, 5638 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5639 value); 5640 return 0; 5641 5642 err_devlink_params_register: 5643 mlxsw_sp_params_unregister(mlxsw_core); 5644 return err; 5645 } 5646 5647 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5648 { 5649 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5650 mlxsw_sp2_devlink_params, 5651 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5652 mlxsw_sp_params_unregister(mlxsw_core); 5653 } 5654 5655 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5656 struct sk_buff *skb, u8 local_port) 5657 { 5658 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5659 5660 skb_pull(skb, MLXSW_TXHDR_LEN); 5661 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5662 } 5663 5664 static struct mlxsw_driver mlxsw_sp1_driver = { 5665 .kind = mlxsw_sp1_driver_name, 5666 .priv_size = sizeof(struct mlxsw_sp), 5667 .init = mlxsw_sp1_init, 5668 .fini = mlxsw_sp_fini, 5669 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5670 .port_split = mlxsw_sp_port_split, 5671 .port_unsplit = mlxsw_sp_port_unsplit, 5672 .sb_pool_get = mlxsw_sp_sb_pool_get, 5673 .sb_pool_set = mlxsw_sp_sb_pool_set, 5674 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5675 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5676 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5677 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5678 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5679 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5680 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5681 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5682 .flash_update = mlxsw_sp_flash_update, 5683 .trap_init = mlxsw_sp_trap_init, 5684 .trap_fini = mlxsw_sp_trap_fini, 5685 .trap_action_set = mlxsw_sp_trap_action_set, 5686 .trap_group_init = mlxsw_sp_trap_group_init, 5687 .txhdr_construct = mlxsw_sp_txhdr_construct, 5688 .resources_register = mlxsw_sp1_resources_register, 5689 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5690 .params_register = mlxsw_sp_params_register, 5691 .params_unregister = mlxsw_sp_params_unregister, 5692 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5693 .txhdr_len = MLXSW_TXHDR_LEN, 5694 .profile = &mlxsw_sp1_config_profile, 5695 .res_query_enabled = true, 5696 }; 5697 5698 static struct mlxsw_driver mlxsw_sp2_driver = { 5699 .kind = mlxsw_sp2_driver_name, 5700 .priv_size = sizeof(struct mlxsw_sp), 5701 .init = mlxsw_sp2_init, 5702 .fini = mlxsw_sp_fini, 5703 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5704 .port_split = mlxsw_sp_port_split, 5705 .port_unsplit = mlxsw_sp_port_unsplit, 5706 .sb_pool_get = mlxsw_sp_sb_pool_get, 5707 .sb_pool_set = mlxsw_sp_sb_pool_set, 5708 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5709 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5710 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5711 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5712 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5713 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5714 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5715 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5716 .flash_update = mlxsw_sp_flash_update, 5717 .trap_init = mlxsw_sp_trap_init, 5718 .trap_fini = mlxsw_sp_trap_fini, 5719 .trap_action_set = mlxsw_sp_trap_action_set, 5720 .trap_group_init = mlxsw_sp_trap_group_init, 5721 .txhdr_construct = mlxsw_sp_txhdr_construct, 5722 .resources_register = mlxsw_sp2_resources_register, 5723 .params_register = mlxsw_sp2_params_register, 5724 .params_unregister = mlxsw_sp2_params_unregister, 5725 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5726 .txhdr_len = MLXSW_TXHDR_LEN, 5727 .profile = &mlxsw_sp2_config_profile, 5728 .res_query_enabled = true, 5729 }; 5730 5731 static struct mlxsw_driver mlxsw_sp3_driver = { 5732 .kind = mlxsw_sp3_driver_name, 5733 .priv_size = sizeof(struct mlxsw_sp), 5734 .init = mlxsw_sp3_init, 5735 .fini = mlxsw_sp_fini, 5736 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5737 .port_split = mlxsw_sp_port_split, 5738 .port_unsplit = mlxsw_sp_port_unsplit, 5739 .sb_pool_get = mlxsw_sp_sb_pool_get, 5740 .sb_pool_set = mlxsw_sp_sb_pool_set, 5741 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5742 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5743 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5744 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5745 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5746 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5747 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5748 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5749 .flash_update = mlxsw_sp_flash_update, 5750 .trap_init = mlxsw_sp_trap_init, 5751 .trap_fini = mlxsw_sp_trap_fini, 5752 .trap_action_set = mlxsw_sp_trap_action_set, 5753 .trap_group_init = mlxsw_sp_trap_group_init, 5754 .txhdr_construct = mlxsw_sp_txhdr_construct, 5755 .resources_register = mlxsw_sp2_resources_register, 5756 .params_register = mlxsw_sp2_params_register, 5757 .params_unregister = mlxsw_sp2_params_unregister, 5758 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5759 .txhdr_len = MLXSW_TXHDR_LEN, 5760 .profile = &mlxsw_sp2_config_profile, 5761 .res_query_enabled = true, 5762 }; 5763 5764 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5765 { 5766 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5767 } 5768 5769 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5770 { 5771 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5772 int ret = 0; 5773 5774 if (mlxsw_sp_port_dev_check(lower_dev)) { 5775 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5776 ret = 1; 5777 } 5778 5779 return ret; 5780 } 5781 5782 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5783 { 5784 struct mlxsw_sp_port *mlxsw_sp_port; 5785 5786 if (mlxsw_sp_port_dev_check(dev)) 5787 return netdev_priv(dev); 5788 5789 mlxsw_sp_port = NULL; 5790 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5791 5792 return mlxsw_sp_port; 5793 } 5794 5795 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5796 { 5797 struct mlxsw_sp_port *mlxsw_sp_port; 5798 5799 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5800 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5801 } 5802 5803 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5804 { 5805 struct mlxsw_sp_port *mlxsw_sp_port; 5806 5807 if (mlxsw_sp_port_dev_check(dev)) 5808 return netdev_priv(dev); 5809 5810 mlxsw_sp_port = NULL; 5811 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5812 &mlxsw_sp_port); 5813 5814 return mlxsw_sp_port; 5815 } 5816 5817 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5818 { 5819 struct mlxsw_sp_port *mlxsw_sp_port; 5820 5821 rcu_read_lock(); 5822 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5823 if (mlxsw_sp_port) 5824 dev_hold(mlxsw_sp_port->dev); 5825 rcu_read_unlock(); 5826 return mlxsw_sp_port; 5827 } 5828 5829 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5830 { 5831 dev_put(mlxsw_sp_port->dev); 5832 } 5833 5834 static void 5835 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5836 struct net_device *lag_dev) 5837 { 5838 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5839 struct net_device *upper_dev; 5840 struct list_head *iter; 5841 5842 if (netif_is_bridge_port(lag_dev)) 5843 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5844 5845 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5846 if (!netif_is_bridge_port(upper_dev)) 5847 continue; 5848 br_dev = netdev_master_upper_dev_get(upper_dev); 5849 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5850 } 5851 } 5852 5853 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5854 { 5855 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5856 5857 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5858 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5859 } 5860 5861 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5862 { 5863 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5864 5865 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5867 } 5868 5869 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5870 u16 lag_id, u8 port_index) 5871 { 5872 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5873 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5874 5875 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5876 lag_id, port_index); 5877 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5878 } 5879 5880 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5881 u16 lag_id) 5882 { 5883 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5884 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5885 5886 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5887 lag_id); 5888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5889 } 5890 5891 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5892 u16 lag_id) 5893 { 5894 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5895 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5896 5897 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5898 lag_id); 5899 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5900 } 5901 5902 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5903 u16 lag_id) 5904 { 5905 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5906 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5907 5908 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5909 lag_id); 5910 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5911 } 5912 5913 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5914 struct net_device *lag_dev, 5915 u16 *p_lag_id) 5916 { 5917 struct mlxsw_sp_upper *lag; 5918 int free_lag_id = -1; 5919 u64 max_lag; 5920 int i; 5921 5922 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5923 for (i = 0; i < max_lag; i++) { 5924 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5925 if (lag->ref_count) { 5926 if (lag->dev == lag_dev) { 5927 *p_lag_id = i; 5928 return 0; 5929 } 5930 } else if (free_lag_id < 0) { 5931 free_lag_id = i; 5932 } 5933 } 5934 if (free_lag_id < 0) 5935 return -EBUSY; 5936 *p_lag_id = free_lag_id; 5937 return 0; 5938 } 5939 5940 static bool 5941 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5942 struct net_device *lag_dev, 5943 struct netdev_lag_upper_info *lag_upper_info, 5944 struct netlink_ext_ack *extack) 5945 { 5946 u16 lag_id; 5947 5948 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5949 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5950 return false; 5951 } 5952 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5953 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5954 return false; 5955 } 5956 return true; 5957 } 5958 5959 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5960 u16 lag_id, u8 *p_port_index) 5961 { 5962 u64 max_lag_members; 5963 int i; 5964 5965 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5966 MAX_LAG_MEMBERS); 5967 for (i = 0; i < max_lag_members; i++) { 5968 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5969 *p_port_index = i; 5970 return 0; 5971 } 5972 } 5973 return -EBUSY; 5974 } 5975 5976 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5977 struct net_device *lag_dev) 5978 { 5979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5980 struct mlxsw_sp_upper *lag; 5981 u16 lag_id; 5982 u8 port_index; 5983 int err; 5984 5985 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5986 if (err) 5987 return err; 5988 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5989 if (!lag->ref_count) { 5990 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5991 if (err) 5992 return err; 5993 lag->dev = lag_dev; 5994 } 5995 5996 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5997 if (err) 5998 return err; 5999 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 6000 if (err) 6001 goto err_col_port_add; 6002 6003 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 6004 mlxsw_sp_port->local_port); 6005 mlxsw_sp_port->lag_id = lag_id; 6006 mlxsw_sp_port->lagged = 1; 6007 lag->ref_count++; 6008 6009 /* Port is no longer usable as a router interface */ 6010 if (mlxsw_sp_port->default_vlan->fid) 6011 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 6012 6013 return 0; 6014 6015 err_col_port_add: 6016 if (!lag->ref_count) 6017 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6018 return err; 6019 } 6020 6021 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 6022 struct net_device *lag_dev) 6023 { 6024 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6025 u16 lag_id = mlxsw_sp_port->lag_id; 6026 struct mlxsw_sp_upper *lag; 6027 6028 if (!mlxsw_sp_port->lagged) 6029 return; 6030 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 6031 WARN_ON(lag->ref_count == 0); 6032 6033 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 6034 6035 /* Any VLANs configured on the port are no longer valid */ 6036 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 6037 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 6038 /* Make the LAG and its directly linked uppers leave bridges they 6039 * are memeber in 6040 */ 6041 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 6042 6043 if (lag->ref_count == 1) 6044 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 6045 6046 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 6047 mlxsw_sp_port->local_port); 6048 mlxsw_sp_port->lagged = 0; 6049 lag->ref_count--; 6050 6051 /* Make sure untagged frames are allowed to ingress */ 6052 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 6053 } 6054 6055 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 6056 u16 lag_id) 6057 { 6058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6059 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6060 6061 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 6062 mlxsw_sp_port->local_port); 6063 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6064 } 6065 6066 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 6067 u16 lag_id) 6068 { 6069 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6070 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6071 6072 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 6073 mlxsw_sp_port->local_port); 6074 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6075 } 6076 6077 static int 6078 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 6079 { 6080 int err; 6081 6082 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 6083 mlxsw_sp_port->lag_id); 6084 if (err) 6085 return err; 6086 6087 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6088 if (err) 6089 goto err_dist_port_add; 6090 6091 return 0; 6092 6093 err_dist_port_add: 6094 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6095 return err; 6096 } 6097 6098 static int 6099 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6100 { 6101 int err; 6102 6103 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6104 mlxsw_sp_port->lag_id); 6105 if (err) 6106 return err; 6107 6108 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6109 mlxsw_sp_port->lag_id); 6110 if (err) 6111 goto err_col_port_disable; 6112 6113 return 0; 6114 6115 err_col_port_disable: 6116 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6117 return err; 6118 } 6119 6120 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6121 struct netdev_lag_lower_state_info *info) 6122 { 6123 if (info->tx_enabled) 6124 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6125 else 6126 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6127 } 6128 6129 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6130 bool enable) 6131 { 6132 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6133 enum mlxsw_reg_spms_state spms_state; 6134 char *spms_pl; 6135 u16 vid; 6136 int err; 6137 6138 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6139 MLXSW_REG_SPMS_STATE_DISCARDING; 6140 6141 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6142 if (!spms_pl) 6143 return -ENOMEM; 6144 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6145 6146 for (vid = 0; vid < VLAN_N_VID; vid++) 6147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6148 6149 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6150 kfree(spms_pl); 6151 return err; 6152 } 6153 6154 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6155 { 6156 u16 vid = 1; 6157 int err; 6158 6159 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6160 if (err) 6161 return err; 6162 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6163 if (err) 6164 goto err_port_stp_set; 6165 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6166 true, false); 6167 if (err) 6168 goto err_port_vlan_set; 6169 6170 for (; vid <= VLAN_N_VID - 1; vid++) { 6171 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6172 vid, false); 6173 if (err) 6174 goto err_vid_learning_set; 6175 } 6176 6177 return 0; 6178 6179 err_vid_learning_set: 6180 for (vid--; vid >= 1; vid--) 6181 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6182 err_port_vlan_set: 6183 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6184 err_port_stp_set: 6185 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6186 return err; 6187 } 6188 6189 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6190 { 6191 u16 vid; 6192 6193 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6194 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6195 vid, true); 6196 6197 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6198 false, false); 6199 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6200 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6201 } 6202 6203 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6204 { 6205 unsigned int num_vxlans = 0; 6206 struct net_device *dev; 6207 struct list_head *iter; 6208 6209 netdev_for_each_lower_dev(br_dev, dev, iter) { 6210 if (netif_is_vxlan(dev)) 6211 num_vxlans++; 6212 } 6213 6214 return num_vxlans > 1; 6215 } 6216 6217 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6218 { 6219 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6220 struct net_device *dev; 6221 struct list_head *iter; 6222 6223 netdev_for_each_lower_dev(br_dev, dev, iter) { 6224 u16 pvid; 6225 int err; 6226 6227 if (!netif_is_vxlan(dev)) 6228 continue; 6229 6230 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6231 if (err || !pvid) 6232 continue; 6233 6234 if (test_and_set_bit(pvid, vlans)) 6235 return false; 6236 } 6237 6238 return true; 6239 } 6240 6241 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6242 struct netlink_ext_ack *extack) 6243 { 6244 if (br_multicast_enabled(br_dev)) { 6245 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6246 return false; 6247 } 6248 6249 if (!br_vlan_enabled(br_dev) && 6250 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6251 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6252 return false; 6253 } 6254 6255 if (br_vlan_enabled(br_dev) && 6256 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6257 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6258 return false; 6259 } 6260 6261 return true; 6262 } 6263 6264 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6265 struct net_device *dev, 6266 unsigned long event, void *ptr) 6267 { 6268 struct netdev_notifier_changeupper_info *info; 6269 struct mlxsw_sp_port *mlxsw_sp_port; 6270 struct netlink_ext_ack *extack; 6271 struct net_device *upper_dev; 6272 struct mlxsw_sp *mlxsw_sp; 6273 int err = 0; 6274 6275 mlxsw_sp_port = netdev_priv(dev); 6276 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6277 info = ptr; 6278 extack = netdev_notifier_info_to_extack(&info->info); 6279 6280 switch (event) { 6281 case NETDEV_PRECHANGEUPPER: 6282 upper_dev = info->upper_dev; 6283 if (!is_vlan_dev(upper_dev) && 6284 !netif_is_lag_master(upper_dev) && 6285 !netif_is_bridge_master(upper_dev) && 6286 !netif_is_ovs_master(upper_dev) && 6287 !netif_is_macvlan(upper_dev)) { 6288 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6289 return -EINVAL; 6290 } 6291 if (!info->linking) 6292 break; 6293 if (netif_is_bridge_master(upper_dev) && 6294 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6295 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6296 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6297 return -EOPNOTSUPP; 6298 if (netdev_has_any_upper_dev(upper_dev) && 6299 (!netif_is_bridge_master(upper_dev) || 6300 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6301 upper_dev))) { 6302 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6303 return -EINVAL; 6304 } 6305 if (netif_is_lag_master(upper_dev) && 6306 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6307 info->upper_info, extack)) 6308 return -EINVAL; 6309 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6310 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6311 return -EINVAL; 6312 } 6313 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6314 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6315 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6316 return -EINVAL; 6317 } 6318 if (netif_is_macvlan(upper_dev) && 6319 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6320 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6321 return -EOPNOTSUPP; 6322 } 6323 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6324 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6325 return -EINVAL; 6326 } 6327 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6328 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6329 return -EINVAL; 6330 } 6331 break; 6332 case NETDEV_CHANGEUPPER: 6333 upper_dev = info->upper_dev; 6334 if (netif_is_bridge_master(upper_dev)) { 6335 if (info->linking) 6336 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6337 lower_dev, 6338 upper_dev, 6339 extack); 6340 else 6341 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6342 lower_dev, 6343 upper_dev); 6344 } else if (netif_is_lag_master(upper_dev)) { 6345 if (info->linking) { 6346 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6347 upper_dev); 6348 } else { 6349 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6350 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6351 upper_dev); 6352 } 6353 } else if (netif_is_ovs_master(upper_dev)) { 6354 if (info->linking) 6355 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6356 else 6357 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6358 } else if (netif_is_macvlan(upper_dev)) { 6359 if (!info->linking) 6360 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6361 } else if (is_vlan_dev(upper_dev)) { 6362 struct net_device *br_dev; 6363 6364 if (!netif_is_bridge_port(upper_dev)) 6365 break; 6366 if (info->linking) 6367 break; 6368 br_dev = netdev_master_upper_dev_get(upper_dev); 6369 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6370 br_dev); 6371 } 6372 break; 6373 } 6374 6375 return err; 6376 } 6377 6378 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6379 unsigned long event, void *ptr) 6380 { 6381 struct netdev_notifier_changelowerstate_info *info; 6382 struct mlxsw_sp_port *mlxsw_sp_port; 6383 int err; 6384 6385 mlxsw_sp_port = netdev_priv(dev); 6386 info = ptr; 6387 6388 switch (event) { 6389 case NETDEV_CHANGELOWERSTATE: 6390 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6391 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6392 info->lower_state_info); 6393 if (err) 6394 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6395 } 6396 break; 6397 } 6398 6399 return 0; 6400 } 6401 6402 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6403 struct net_device *port_dev, 6404 unsigned long event, void *ptr) 6405 { 6406 switch (event) { 6407 case NETDEV_PRECHANGEUPPER: 6408 case NETDEV_CHANGEUPPER: 6409 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6410 event, ptr); 6411 case NETDEV_CHANGELOWERSTATE: 6412 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6413 ptr); 6414 } 6415 6416 return 0; 6417 } 6418 6419 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6420 unsigned long event, void *ptr) 6421 { 6422 struct net_device *dev; 6423 struct list_head *iter; 6424 int ret; 6425 6426 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6427 if (mlxsw_sp_port_dev_check(dev)) { 6428 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6429 ptr); 6430 if (ret) 6431 return ret; 6432 } 6433 } 6434 6435 return 0; 6436 } 6437 6438 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6439 struct net_device *dev, 6440 unsigned long event, void *ptr, 6441 u16 vid) 6442 { 6443 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6444 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6445 struct netdev_notifier_changeupper_info *info = ptr; 6446 struct netlink_ext_ack *extack; 6447 struct net_device *upper_dev; 6448 int err = 0; 6449 6450 extack = netdev_notifier_info_to_extack(&info->info); 6451 6452 switch (event) { 6453 case NETDEV_PRECHANGEUPPER: 6454 upper_dev = info->upper_dev; 6455 if (!netif_is_bridge_master(upper_dev) && 6456 !netif_is_macvlan(upper_dev)) { 6457 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6458 return -EINVAL; 6459 } 6460 if (!info->linking) 6461 break; 6462 if (netif_is_bridge_master(upper_dev) && 6463 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6464 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6465 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6466 return -EOPNOTSUPP; 6467 if (netdev_has_any_upper_dev(upper_dev) && 6468 (!netif_is_bridge_master(upper_dev) || 6469 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6470 upper_dev))) { 6471 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6472 return -EINVAL; 6473 } 6474 if (netif_is_macvlan(upper_dev) && 6475 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6476 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6477 return -EOPNOTSUPP; 6478 } 6479 break; 6480 case NETDEV_CHANGEUPPER: 6481 upper_dev = info->upper_dev; 6482 if (netif_is_bridge_master(upper_dev)) { 6483 if (info->linking) 6484 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6485 vlan_dev, 6486 upper_dev, 6487 extack); 6488 else 6489 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6490 vlan_dev, 6491 upper_dev); 6492 } else if (netif_is_macvlan(upper_dev)) { 6493 if (!info->linking) 6494 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6495 } else { 6496 err = -EINVAL; 6497 WARN_ON(1); 6498 } 6499 break; 6500 } 6501 6502 return err; 6503 } 6504 6505 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6506 struct net_device *lag_dev, 6507 unsigned long event, 6508 void *ptr, u16 vid) 6509 { 6510 struct net_device *dev; 6511 struct list_head *iter; 6512 int ret; 6513 6514 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6515 if (mlxsw_sp_port_dev_check(dev)) { 6516 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6517 event, ptr, 6518 vid); 6519 if (ret) 6520 return ret; 6521 } 6522 } 6523 6524 return 0; 6525 } 6526 6527 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6528 struct net_device *br_dev, 6529 unsigned long event, void *ptr, 6530 u16 vid) 6531 { 6532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6533 struct netdev_notifier_changeupper_info *info = ptr; 6534 struct netlink_ext_ack *extack; 6535 struct net_device *upper_dev; 6536 6537 if (!mlxsw_sp) 6538 return 0; 6539 6540 extack = netdev_notifier_info_to_extack(&info->info); 6541 6542 switch (event) { 6543 case NETDEV_PRECHANGEUPPER: 6544 upper_dev = info->upper_dev; 6545 if (!netif_is_macvlan(upper_dev)) { 6546 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6547 return -EOPNOTSUPP; 6548 } 6549 if (!info->linking) 6550 break; 6551 if (netif_is_macvlan(upper_dev) && 6552 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6553 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6554 return -EOPNOTSUPP; 6555 } 6556 break; 6557 case NETDEV_CHANGEUPPER: 6558 upper_dev = info->upper_dev; 6559 if (info->linking) 6560 break; 6561 if (netif_is_macvlan(upper_dev)) 6562 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6563 break; 6564 } 6565 6566 return 0; 6567 } 6568 6569 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6570 unsigned long event, void *ptr) 6571 { 6572 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6573 u16 vid = vlan_dev_vlan_id(vlan_dev); 6574 6575 if (mlxsw_sp_port_dev_check(real_dev)) 6576 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6577 event, ptr, vid); 6578 else if (netif_is_lag_master(real_dev)) 6579 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6580 real_dev, event, 6581 ptr, vid); 6582 else if (netif_is_bridge_master(real_dev)) 6583 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6584 event, ptr, vid); 6585 6586 return 0; 6587 } 6588 6589 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6590 unsigned long event, void *ptr) 6591 { 6592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6593 struct netdev_notifier_changeupper_info *info = ptr; 6594 struct netlink_ext_ack *extack; 6595 struct net_device *upper_dev; 6596 6597 if (!mlxsw_sp) 6598 return 0; 6599 6600 extack = netdev_notifier_info_to_extack(&info->info); 6601 6602 switch (event) { 6603 case NETDEV_PRECHANGEUPPER: 6604 upper_dev = info->upper_dev; 6605 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6606 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6607 return -EOPNOTSUPP; 6608 } 6609 if (!info->linking) 6610 break; 6611 if (netif_is_macvlan(upper_dev) && 6612 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6613 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6614 return -EOPNOTSUPP; 6615 } 6616 break; 6617 case NETDEV_CHANGEUPPER: 6618 upper_dev = info->upper_dev; 6619 if (info->linking) 6620 break; 6621 if (is_vlan_dev(upper_dev)) 6622 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6623 if (netif_is_macvlan(upper_dev)) 6624 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6625 break; 6626 } 6627 6628 return 0; 6629 } 6630 6631 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6632 unsigned long event, void *ptr) 6633 { 6634 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6635 struct netdev_notifier_changeupper_info *info = ptr; 6636 struct netlink_ext_ack *extack; 6637 6638 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6639 return 0; 6640 6641 extack = netdev_notifier_info_to_extack(&info->info); 6642 6643 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6644 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6645 6646 return -EOPNOTSUPP; 6647 } 6648 6649 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6650 { 6651 struct netdev_notifier_changeupper_info *info = ptr; 6652 6653 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6654 return false; 6655 return netif_is_l3_master(info->upper_dev); 6656 } 6657 6658 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6659 struct net_device *dev, 6660 unsigned long event, void *ptr) 6661 { 6662 struct netdev_notifier_changeupper_info *cu_info; 6663 struct netdev_notifier_info *info = ptr; 6664 struct netlink_ext_ack *extack; 6665 struct net_device *upper_dev; 6666 6667 extack = netdev_notifier_info_to_extack(info); 6668 6669 switch (event) { 6670 case NETDEV_CHANGEUPPER: 6671 cu_info = container_of(info, 6672 struct netdev_notifier_changeupper_info, 6673 info); 6674 upper_dev = cu_info->upper_dev; 6675 if (!netif_is_bridge_master(upper_dev)) 6676 return 0; 6677 if (!mlxsw_sp_lower_get(upper_dev)) 6678 return 0; 6679 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6680 return -EOPNOTSUPP; 6681 if (cu_info->linking) { 6682 if (!netif_running(dev)) 6683 return 0; 6684 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6685 * device needs to be mapped to a VLAN, but at this 6686 * point no VLANs are configured on the VxLAN device 6687 */ 6688 if (br_vlan_enabled(upper_dev)) 6689 return 0; 6690 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6691 dev, 0, extack); 6692 } else { 6693 /* VLANs were already flushed, which triggered the 6694 * necessary cleanup 6695 */ 6696 if (br_vlan_enabled(upper_dev)) 6697 return 0; 6698 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6699 } 6700 break; 6701 case NETDEV_PRE_UP: 6702 upper_dev = netdev_master_upper_dev_get(dev); 6703 if (!upper_dev) 6704 return 0; 6705 if (!netif_is_bridge_master(upper_dev)) 6706 return 0; 6707 if (!mlxsw_sp_lower_get(upper_dev)) 6708 return 0; 6709 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6710 extack); 6711 case NETDEV_DOWN: 6712 upper_dev = netdev_master_upper_dev_get(dev); 6713 if (!upper_dev) 6714 return 0; 6715 if (!netif_is_bridge_master(upper_dev)) 6716 return 0; 6717 if (!mlxsw_sp_lower_get(upper_dev)) 6718 return 0; 6719 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6720 break; 6721 } 6722 6723 return 0; 6724 } 6725 6726 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6727 unsigned long event, void *ptr) 6728 { 6729 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6730 struct mlxsw_sp_span_entry *span_entry; 6731 struct mlxsw_sp *mlxsw_sp; 6732 int err = 0; 6733 6734 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6735 if (event == NETDEV_UNREGISTER) { 6736 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6737 if (span_entry) 6738 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6739 } 6740 mlxsw_sp_span_respin(mlxsw_sp); 6741 6742 if (netif_is_vxlan(dev)) 6743 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6744 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6745 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6746 event, ptr); 6747 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6748 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6749 event, ptr); 6750 else if (event == NETDEV_PRE_CHANGEADDR || 6751 event == NETDEV_CHANGEADDR || 6752 event == NETDEV_CHANGEMTU) 6753 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6754 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6755 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6756 else if (mlxsw_sp_port_dev_check(dev)) 6757 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6758 else if (netif_is_lag_master(dev)) 6759 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6760 else if (is_vlan_dev(dev)) 6761 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6762 else if (netif_is_bridge_master(dev)) 6763 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6764 else if (netif_is_macvlan(dev)) 6765 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6766 6767 return notifier_from_errno(err); 6768 } 6769 6770 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6771 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6772 }; 6773 6774 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6775 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6776 }; 6777 6778 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6779 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6780 {0, }, 6781 }; 6782 6783 static struct pci_driver mlxsw_sp1_pci_driver = { 6784 .name = mlxsw_sp1_driver_name, 6785 .id_table = mlxsw_sp1_pci_id_table, 6786 }; 6787 6788 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6789 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6790 {0, }, 6791 }; 6792 6793 static struct pci_driver mlxsw_sp2_pci_driver = { 6794 .name = mlxsw_sp2_driver_name, 6795 .id_table = mlxsw_sp2_pci_id_table, 6796 }; 6797 6798 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6799 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6800 {0, }, 6801 }; 6802 6803 static struct pci_driver mlxsw_sp3_pci_driver = { 6804 .name = mlxsw_sp3_driver_name, 6805 .id_table = mlxsw_sp3_pci_id_table, 6806 }; 6807 6808 static int __init mlxsw_sp_module_init(void) 6809 { 6810 int err; 6811 6812 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6813 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6814 6815 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6816 if (err) 6817 goto err_sp1_core_driver_register; 6818 6819 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6820 if (err) 6821 goto err_sp2_core_driver_register; 6822 6823 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6824 if (err) 6825 goto err_sp3_core_driver_register; 6826 6827 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6828 if (err) 6829 goto err_sp1_pci_driver_register; 6830 6831 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6832 if (err) 6833 goto err_sp2_pci_driver_register; 6834 6835 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6836 if (err) 6837 goto err_sp3_pci_driver_register; 6838 6839 return 0; 6840 6841 err_sp3_pci_driver_register: 6842 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6843 err_sp2_pci_driver_register: 6844 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6845 err_sp1_pci_driver_register: 6846 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6847 err_sp3_core_driver_register: 6848 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6849 err_sp2_core_driver_register: 6850 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6851 err_sp1_core_driver_register: 6852 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6853 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6854 return err; 6855 } 6856 6857 static void __exit mlxsw_sp_module_exit(void) 6858 { 6859 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6860 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6861 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6862 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6863 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6864 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6865 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6866 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6867 } 6868 6869 module_init(mlxsw_sp_module_init); 6870 module_exit(mlxsw_sp_module_exit); 6871 6872 MODULE_LICENSE("Dual BSD/GPL"); 6873 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6874 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6875 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6876 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6877 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6878 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6879 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6880