1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/tc_act/tc_mirred.h> 29 #include <net/netevent.h> 30 #include <net/tc_act/tc_sample.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "../mlxfw/mlxfw.h" 47 48 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 49 50 #define MLXSW_SP1_FWREV_MAJOR 13 51 #define MLXSW_SP1_FWREV_MINOR 2000 52 #define MLXSW_SP1_FWREV_SUBMINOR 2308 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP1_FWREV_MINOR, 58 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 #define MLXSW_SP2_FWREV_MINOR 2000 69 #define MLXSW_SP2_FWREV_SUBMINOR 2308 70 71 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 72 .major = MLXSW_SP2_FWREV_MAJOR, 73 .minor = MLXSW_SP2_FWREV_MINOR, 74 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 75 }; 76 77 #define MLXSW_SP2_FW_FILENAME \ 78 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 79 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 80 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 81 82 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 83 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 84 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 85 static const char mlxsw_sp_driver_version[] = "1.0"; 86 87 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 88 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 89 }; 90 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 91 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 92 }; 93 94 /* tx_hdr_version 95 * Tx header version. 96 * Must be set to 1. 97 */ 98 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 99 100 /* tx_hdr_ctl 101 * Packet control type. 102 * 0 - Ethernet control (e.g. EMADs, LACP) 103 * 1 - Ethernet data 104 */ 105 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 106 107 /* tx_hdr_proto 108 * Packet protocol type. Must be set to 1 (Ethernet). 109 */ 110 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 111 112 /* tx_hdr_rx_is_router 113 * Packet is sent from the router. Valid for data packets only. 114 */ 115 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 116 117 /* tx_hdr_fid_valid 118 * Indicates if the 'fid' field is valid and should be used for 119 * forwarding lookup. Valid for data packets only. 120 */ 121 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 122 123 /* tx_hdr_swid 124 * Switch partition ID. Must be set to 0. 125 */ 126 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 127 128 /* tx_hdr_control_tclass 129 * Indicates if the packet should use the control TClass and not one 130 * of the data TClasses. 131 */ 132 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 133 134 /* tx_hdr_etclass 135 * Egress TClass to be used on the egress device on the egress port. 136 */ 137 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 138 139 /* tx_hdr_port_mid 140 * Destination local port for unicast packets. 141 * Destination multicast ID for multicast packets. 142 * 143 * Control packets are directed to a specific egress port, while data 144 * packets are transmitted through the CPU port (0) into the switch partition, 145 * where forwarding rules are applied. 146 */ 147 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 148 149 /* tx_hdr_fid 150 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 151 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 152 * Valid for data packets only. 153 */ 154 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 155 156 /* tx_hdr_type 157 * 0 - Data packets 158 * 6 - Control packets 159 */ 160 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 161 162 struct mlxsw_sp_mlxfw_dev { 163 struct mlxfw_dev mlxfw_dev; 164 struct mlxsw_sp *mlxsw_sp; 165 }; 166 167 struct mlxsw_sp_ptp_ops { 168 struct mlxsw_sp_ptp_clock * 169 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 170 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 171 172 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 173 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 174 175 /* Notify a driver that a packet that might be PTP was received. Driver 176 * is responsible for freeing the passed-in SKB. 177 */ 178 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 179 u8 local_port); 180 181 /* Notify a driver that a timestamped packet was transmitted. Driver 182 * is responsible for freeing the passed-in SKB. 183 */ 184 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 185 u8 local_port); 186 187 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct hwtstamp_config *config); 189 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 190 struct hwtstamp_config *config); 191 void (*shaper_work)(struct work_struct *work); 192 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 193 struct ethtool_ts_info *info); 194 int (*get_stats_count)(void); 195 void (*get_stats_strings)(u8 **p); 196 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 197 u64 *data, int data_index); 198 }; 199 200 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 201 u16 component_index, u32 *p_max_size, 202 u8 *p_align_bits, u16 *p_max_write_size) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 208 int err; 209 210 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 212 if (err) 213 return err; 214 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 215 p_max_write_size); 216 217 *p_align_bits = max_t(u8, *p_align_bits, 2); 218 *p_max_write_size = min_t(u16, *p_max_write_size, 219 MLXSW_REG_MCDA_MAX_DATA_LEN); 220 return 0; 221 } 222 223 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 224 { 225 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 226 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 228 char mcc_pl[MLXSW_REG_MCC_LEN]; 229 u8 control_state; 230 int err; 231 232 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 233 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 234 if (err) 235 return err; 236 237 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 238 if (control_state != MLXFW_FSM_STATE_IDLE) 239 return -EBUSY; 240 241 mlxsw_reg_mcc_pack(mcc_pl, 242 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 243 0, *fwhandle, 0); 244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 245 } 246 247 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 248 u32 fwhandle, u16 component_index, 249 u32 component_size) 250 { 251 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 252 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 254 char mcc_pl[MLXSW_REG_MCC_LEN]; 255 256 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 257 component_index, fwhandle, component_size); 258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 259 } 260 261 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 262 u32 fwhandle, u8 *data, u16 size, 263 u32 offset) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcda_pl[MLXSW_REG_MCDA_LEN]; 269 270 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 272 } 273 274 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 275 u32 fwhandle, u16 component_index) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 283 component_index, fwhandle, 0); 284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 285 } 286 287 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 288 { 289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 292 char mcc_pl[MLXSW_REG_MCC_LEN]; 293 294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 295 fwhandle, 0); 296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 } 298 299 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 300 enum mlxfw_fsm_state *fsm_state, 301 enum mlxfw_fsm_state_err *fsm_state_err) 302 { 303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 306 char mcc_pl[MLXSW_REG_MCC_LEN]; 307 u8 control_state; 308 u8 error_code; 309 int err; 310 311 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 312 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 if (err) 314 return err; 315 316 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 317 *fsm_state = control_state; 318 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 319 MLXFW_FSM_STATE_ERR_MAX); 320 return 0; 321 } 322 323 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 324 { 325 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 326 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 328 char mcc_pl[MLXSW_REG_MCC_LEN]; 329 330 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 331 fwhandle, 0); 332 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 333 } 334 335 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 336 { 337 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 338 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 340 char mcc_pl[MLXSW_REG_MCC_LEN]; 341 342 mlxsw_reg_mcc_pack(mcc_pl, 343 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 344 fwhandle, 0); 345 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 346 } 347 348 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 349 const char *msg, const char *comp_name, 350 u32 done_bytes, u32 total_bytes) 351 { 352 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 353 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 355 356 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 357 msg, comp_name, 358 done_bytes, total_bytes); 359 } 360 361 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 362 .component_query = mlxsw_sp_component_query, 363 .fsm_lock = mlxsw_sp_fsm_lock, 364 .fsm_component_update = mlxsw_sp_fsm_component_update, 365 .fsm_block_download = mlxsw_sp_fsm_block_download, 366 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 367 .fsm_activate = mlxsw_sp_fsm_activate, 368 .fsm_query_state = mlxsw_sp_fsm_query_state, 369 .fsm_cancel = mlxsw_sp_fsm_cancel, 370 .fsm_release = mlxsw_sp_fsm_release, 371 .status_notify = mlxsw_sp_status_notify, 372 }; 373 374 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 375 const struct firmware *firmware, 376 struct netlink_ext_ack *extack) 377 { 378 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 379 .mlxfw_dev = { 380 .ops = &mlxsw_sp_mlxfw_dev_ops, 381 .psid = mlxsw_sp->bus_info->psid, 382 .psid_size = strlen(mlxsw_sp->bus_info->psid), 383 }, 384 .mlxsw_sp = mlxsw_sp 385 }; 386 int err; 387 388 mlxsw_core_fw_flash_start(mlxsw_sp->core); 389 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 390 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 391 firmware, extack); 392 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 393 mlxsw_core_fw_flash_end(mlxsw_sp->core); 394 395 return err; 396 } 397 398 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 399 { 400 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 401 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 402 const char *fw_filename = mlxsw_sp->fw_filename; 403 union devlink_param_value value; 404 const struct firmware *firmware; 405 int err; 406 407 /* Don't check if driver does not require it */ 408 if (!req_rev || !fw_filename) 409 return 0; 410 411 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 412 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 413 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 414 &value); 415 if (err) 416 return err; 417 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 418 return 0; 419 420 /* Validate driver & FW are compatible */ 421 if (rev->major != req_rev->major) { 422 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 423 rev->major, req_rev->major); 424 return -EINVAL; 425 } 426 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 427 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 428 mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 429 return 0; 430 431 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 432 rev->major, rev->minor, rev->subminor); 433 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 434 fw_filename); 435 436 err = request_firmware_direct(&firmware, fw_filename, 437 mlxsw_sp->bus_info->dev); 438 if (err) { 439 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 440 fw_filename); 441 return err; 442 } 443 444 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 445 release_firmware(firmware); 446 if (err) 447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 448 449 /* On FW flash success, tell the caller FW reset is needed 450 * if current FW supports it. 451 */ 452 if (rev->minor >= req_rev->can_reset_minor) 453 return err ? err : -EAGAIN; 454 else 455 return 0; 456 } 457 458 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 459 const char *file_name, const char *component, 460 struct netlink_ext_ack *extack) 461 { 462 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 463 const struct firmware *firmware; 464 int err; 465 466 if (component) 467 return -EOPNOTSUPP; 468 469 err = request_firmware_direct(&firmware, file_name, 470 mlxsw_sp->bus_info->dev); 471 if (err) 472 return err; 473 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 474 release_firmware(firmware); 475 476 return err; 477 } 478 479 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index, u64 *packets, 481 u64 *bytes) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 int err; 485 486 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 487 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 489 if (err) 490 return err; 491 if (packets) 492 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 493 if (bytes) 494 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 495 return 0; 496 } 497 498 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 499 unsigned int counter_index) 500 { 501 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 502 503 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 504 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 506 } 507 508 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 509 unsigned int *p_counter_index) 510 { 511 int err; 512 513 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 p_counter_index); 515 if (err) 516 return err; 517 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 518 if (err) 519 goto err_counter_clear; 520 return 0; 521 522 err_counter_clear: 523 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 524 *p_counter_index); 525 return err; 526 } 527 528 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 529 unsigned int counter_index) 530 { 531 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 532 counter_index); 533 } 534 535 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 536 const struct mlxsw_tx_info *tx_info) 537 { 538 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 539 540 memset(txhdr, 0, MLXSW_TXHDR_LEN); 541 542 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 543 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 544 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 545 mlxsw_tx_hdr_swid_set(txhdr, 0); 546 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 547 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 548 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 549 } 550 551 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 552 { 553 switch (state) { 554 case BR_STATE_FORWARDING: 555 return MLXSW_REG_SPMS_STATE_FORWARDING; 556 case BR_STATE_LEARNING: 557 return MLXSW_REG_SPMS_STATE_LEARNING; 558 case BR_STATE_LISTENING: /* fall-through */ 559 case BR_STATE_DISABLED: /* fall-through */ 560 case BR_STATE_BLOCKING: 561 return MLXSW_REG_SPMS_STATE_DISCARDING; 562 default: 563 BUG(); 564 } 565 } 566 567 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 568 u8 state) 569 { 570 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spms_pl; 573 int err; 574 575 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 576 if (!spms_pl) 577 return -ENOMEM; 578 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 579 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 580 581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 582 kfree(spms_pl); 583 return err; 584 } 585 586 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 587 { 588 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 589 int err; 590 591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 592 if (err) 593 return err; 594 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 595 return 0; 596 } 597 598 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 599 bool enable, u32 rate) 600 { 601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 602 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 603 604 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 606 } 607 608 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 609 bool is_up) 610 { 611 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 612 char paos_pl[MLXSW_REG_PAOS_LEN]; 613 614 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 615 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 616 MLXSW_PORT_ADMIN_STATUS_DOWN); 617 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 618 } 619 620 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 621 unsigned char *addr) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 char ppad_pl[MLXSW_REG_PPAD_LEN]; 625 626 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 627 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 629 } 630 631 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 635 636 ether_addr_copy(addr, mlxsw_sp->base_mac); 637 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 638 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 639 } 640 641 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 642 { 643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 644 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 645 int max_mtu; 646 int err; 647 648 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 649 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 650 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 651 if (err) 652 return err; 653 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 654 655 if (mtu > max_mtu) 656 return -EINVAL; 657 658 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 660 } 661 662 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char pspa_pl[MLXSW_REG_PSPA_LEN]; 666 667 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 669 } 670 671 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char svpe_pl[MLXSW_REG_SVPE_LEN]; 675 676 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 678 } 679 680 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 681 bool learn_enable) 682 { 683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 684 char *spvmlr_pl; 685 int err; 686 687 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 688 if (!spvmlr_pl) 689 return -ENOMEM; 690 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 691 learn_enable); 692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 693 kfree(spvmlr_pl); 694 return err; 695 } 696 697 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 u16 vid) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char spvid_pl[MLXSW_REG_SPVID_LEN]; 702 703 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 705 } 706 707 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 708 bool allow) 709 { 710 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 711 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 712 713 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 715 } 716 717 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 718 { 719 int err; 720 721 if (!vid) { 722 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 723 if (err) 724 return err; 725 } else { 726 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 727 if (err) 728 return err; 729 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 730 if (err) 731 goto err_port_allow_untagged_set; 732 } 733 734 mlxsw_sp_port->pvid = vid; 735 return 0; 736 737 err_port_allow_untagged_set: 738 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 739 return err; 740 } 741 742 static int 743 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 744 { 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 char sspr_pl[MLXSW_REG_SSPR_LEN]; 747 748 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 750 } 751 752 static int 753 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 754 struct mlxsw_sp_port_mapping *port_mapping) 755 { 756 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 757 bool separate_rxtx; 758 u8 module; 759 u8 width; 760 int err; 761 int i; 762 763 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 764 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 if (err) 766 return err; 767 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 768 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 769 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 770 771 if (width && !is_power_of_2(width)) { 772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 773 local_port); 774 return -EINVAL; 775 } 776 777 for (i = 0; i < width; i++) { 778 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 780 local_port); 781 return -EINVAL; 782 } 783 if (separate_rxtx && 784 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 785 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 787 local_port); 788 return -EINVAL; 789 } 790 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 792 local_port); 793 return -EINVAL; 794 } 795 } 796 797 port_mapping->module = module; 798 port_mapping->width = width; 799 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 800 return 0; 801 } 802 803 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 804 { 805 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 808 int i; 809 810 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 811 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 812 for (i = 0; i < port_mapping->width; i++) { 813 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 814 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 815 } 816 817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 818 } 819 820 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 821 { 822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 823 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 824 825 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 826 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 828 } 829 830 static int mlxsw_sp_port_open(struct net_device *dev) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 833 int err; 834 835 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 836 if (err) 837 return err; 838 netif_start_queue(dev); 839 return 0; 840 } 841 842 static int mlxsw_sp_port_stop(struct net_device *dev) 843 { 844 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 845 846 netif_stop_queue(dev); 847 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 848 } 849 850 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 851 struct net_device *dev) 852 { 853 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 855 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 856 const struct mlxsw_tx_info tx_info = { 857 .local_port = mlxsw_sp_port->local_port, 858 .is_emad = false, 859 }; 860 u64 len; 861 int err; 862 863 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 864 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 865 dev_kfree_skb_any(skb); 866 return NETDEV_TX_OK; 867 } 868 869 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 870 871 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 872 return NETDEV_TX_BUSY; 873 874 if (eth_skb_pad(skb)) { 875 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 876 return NETDEV_TX_OK; 877 } 878 879 mlxsw_sp_txhdr_construct(skb, &tx_info); 880 /* TX header is consumed by HW on the way so we shouldn't count its 881 * bytes as being sent. 882 */ 883 len = skb->len - MLXSW_TXHDR_LEN; 884 885 /* Due to a race we might fail here because of a full queue. In that 886 * unlikely case we simply drop the packet. 887 */ 888 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 889 890 if (!err) { 891 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 892 u64_stats_update_begin(&pcpu_stats->syncp); 893 pcpu_stats->tx_packets++; 894 pcpu_stats->tx_bytes += len; 895 u64_stats_update_end(&pcpu_stats->syncp); 896 } else { 897 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 898 dev_kfree_skb_any(skb); 899 } 900 return NETDEV_TX_OK; 901 } 902 903 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 904 { 905 } 906 907 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 908 { 909 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 910 struct sockaddr *addr = p; 911 int err; 912 913 if (!is_valid_ether_addr(addr->sa_data)) 914 return -EADDRNOTAVAIL; 915 916 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 917 if (err) 918 return err; 919 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 920 return 0; 921 } 922 923 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 924 int mtu) 925 { 926 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 927 } 928 929 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 930 931 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 932 u16 delay) 933 { 934 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 935 BITS_PER_BYTE)); 936 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 937 mtu); 938 } 939 940 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 941 * Assumes 100m cable and maximum MTU. 942 */ 943 #define MLXSW_SP_PAUSE_DELAY 58752 944 945 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 946 u16 delay, bool pfc, bool pause) 947 { 948 if (pfc) 949 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 950 else if (pause) 951 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 952 else 953 return 0; 954 } 955 956 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 957 bool lossy) 958 { 959 if (lossy) 960 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 961 else 962 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 963 thres); 964 } 965 966 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 967 u8 *prio_tc, bool pause_en, 968 struct ieee_pfc *my_pfc) 969 { 970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 971 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 972 u16 delay = !!my_pfc ? my_pfc->delay : 0; 973 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 974 u32 taken_headroom_cells = 0; 975 u32 max_headroom_cells; 976 int i, j, err; 977 978 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 979 980 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 981 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 982 if (err) 983 return err; 984 985 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 986 bool configure = false; 987 bool pfc = false; 988 u16 thres_cells; 989 u16 delay_cells; 990 u16 total_cells; 991 bool lossy; 992 993 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 994 if (prio_tc[j] == i) { 995 pfc = pfc_en & BIT(j); 996 configure = true; 997 break; 998 } 999 } 1000 1001 if (!configure) 1002 continue; 1003 1004 lossy = !(pfc || pause_en); 1005 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1006 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 1007 pfc, pause_en); 1008 total_cells = thres_cells + delay_cells; 1009 1010 taken_headroom_cells += total_cells; 1011 if (taken_headroom_cells > max_headroom_cells) 1012 return -ENOBUFS; 1013 1014 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1015 thres_cells, lossy); 1016 } 1017 1018 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1019 } 1020 1021 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1022 int mtu, bool pause_en) 1023 { 1024 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1025 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1026 struct ieee_pfc *my_pfc; 1027 u8 *prio_tc; 1028 1029 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1030 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1031 1032 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1033 pause_en, my_pfc); 1034 } 1035 1036 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1037 { 1038 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1039 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1040 int err; 1041 1042 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1043 if (err) 1044 return err; 1045 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1046 if (err) 1047 goto err_span_port_mtu_update; 1048 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1049 if (err) 1050 goto err_port_mtu_set; 1051 dev->mtu = mtu; 1052 return 0; 1053 1054 err_port_mtu_set: 1055 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1056 err_span_port_mtu_update: 1057 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1058 return err; 1059 } 1060 1061 static int 1062 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1063 struct rtnl_link_stats64 *stats) 1064 { 1065 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1066 struct mlxsw_sp_port_pcpu_stats *p; 1067 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1068 u32 tx_dropped = 0; 1069 unsigned int start; 1070 int i; 1071 1072 for_each_possible_cpu(i) { 1073 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1074 do { 1075 start = u64_stats_fetch_begin_irq(&p->syncp); 1076 rx_packets = p->rx_packets; 1077 rx_bytes = p->rx_bytes; 1078 tx_packets = p->tx_packets; 1079 tx_bytes = p->tx_bytes; 1080 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1081 1082 stats->rx_packets += rx_packets; 1083 stats->rx_bytes += rx_bytes; 1084 stats->tx_packets += tx_packets; 1085 stats->tx_bytes += tx_bytes; 1086 /* tx_dropped is u32, updated without syncp protection. */ 1087 tx_dropped += p->tx_dropped; 1088 } 1089 stats->tx_dropped = tx_dropped; 1090 return 0; 1091 } 1092 1093 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1094 { 1095 switch (attr_id) { 1096 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1097 return true; 1098 } 1099 1100 return false; 1101 } 1102 1103 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1104 void *sp) 1105 { 1106 switch (attr_id) { 1107 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1108 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1109 } 1110 1111 return -EINVAL; 1112 } 1113 1114 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1115 int prio, char *ppcnt_pl) 1116 { 1117 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1119 1120 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1121 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1122 } 1123 1124 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1125 struct rtnl_link_stats64 *stats) 1126 { 1127 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1128 int err; 1129 1130 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1131 0, ppcnt_pl); 1132 if (err) 1133 goto out; 1134 1135 stats->tx_packets = 1136 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1137 stats->rx_packets = 1138 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1139 stats->tx_bytes = 1140 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1141 stats->rx_bytes = 1142 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1143 stats->multicast = 1144 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1145 1146 stats->rx_crc_errors = 1147 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1148 stats->rx_frame_errors = 1149 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1150 1151 stats->rx_length_errors = ( 1152 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1153 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1154 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1155 1156 stats->rx_errors = (stats->rx_crc_errors + 1157 stats->rx_frame_errors + stats->rx_length_errors); 1158 1159 out: 1160 return err; 1161 } 1162 1163 static void 1164 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1165 struct mlxsw_sp_port_xstats *xstats) 1166 { 1167 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1168 int err, i; 1169 1170 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1171 ppcnt_pl); 1172 if (!err) 1173 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1174 1175 for (i = 0; i < TC_MAX_QUEUE; i++) { 1176 err = mlxsw_sp_port_get_stats_raw(dev, 1177 MLXSW_REG_PPCNT_TC_CONG_TC, 1178 i, ppcnt_pl); 1179 if (!err) 1180 xstats->wred_drop[i] = 1181 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1182 1183 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1184 i, ppcnt_pl); 1185 if (err) 1186 continue; 1187 1188 xstats->backlog[i] = 1189 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1190 xstats->tail_drop[i] = 1191 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1192 } 1193 1194 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1195 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1196 i, ppcnt_pl); 1197 if (err) 1198 continue; 1199 1200 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1201 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1202 } 1203 } 1204 1205 static void update_stats_cache(struct work_struct *work) 1206 { 1207 struct mlxsw_sp_port *mlxsw_sp_port = 1208 container_of(work, struct mlxsw_sp_port, 1209 periodic_hw_stats.update_dw.work); 1210 1211 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1212 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1213 * necessary when port goes down. 1214 */ 1215 goto out; 1216 1217 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1218 &mlxsw_sp_port->periodic_hw_stats.stats); 1219 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1220 &mlxsw_sp_port->periodic_hw_stats.xstats); 1221 1222 out: 1223 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1224 MLXSW_HW_STATS_UPDATE_TIME); 1225 } 1226 1227 /* Return the stats from a cache that is updated periodically, 1228 * as this function might get called in an atomic context. 1229 */ 1230 static void 1231 mlxsw_sp_port_get_stats64(struct net_device *dev, 1232 struct rtnl_link_stats64 *stats) 1233 { 1234 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1235 1236 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1237 } 1238 1239 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1240 u16 vid_begin, u16 vid_end, 1241 bool is_member, bool untagged) 1242 { 1243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1244 char *spvm_pl; 1245 int err; 1246 1247 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1248 if (!spvm_pl) 1249 return -ENOMEM; 1250 1251 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1252 vid_end, is_member, untagged); 1253 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1254 kfree(spvm_pl); 1255 return err; 1256 } 1257 1258 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1259 u16 vid_end, bool is_member, bool untagged) 1260 { 1261 u16 vid, vid_e; 1262 int err; 1263 1264 for (vid = vid_begin; vid <= vid_end; 1265 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1266 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1267 vid_end); 1268 1269 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1270 is_member, untagged); 1271 if (err) 1272 return err; 1273 } 1274 1275 return 0; 1276 } 1277 1278 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1279 bool flush_default) 1280 { 1281 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1282 1283 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1284 &mlxsw_sp_port->vlans_list, list) { 1285 if (!flush_default && 1286 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1287 continue; 1288 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1289 } 1290 } 1291 1292 static void 1293 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1294 { 1295 if (mlxsw_sp_port_vlan->bridge_port) 1296 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1297 else if (mlxsw_sp_port_vlan->fid) 1298 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1299 } 1300 1301 struct mlxsw_sp_port_vlan * 1302 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1303 { 1304 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1305 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1306 int err; 1307 1308 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1309 if (mlxsw_sp_port_vlan) 1310 return ERR_PTR(-EEXIST); 1311 1312 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1313 if (err) 1314 return ERR_PTR(err); 1315 1316 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1317 if (!mlxsw_sp_port_vlan) { 1318 err = -ENOMEM; 1319 goto err_port_vlan_alloc; 1320 } 1321 1322 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1323 mlxsw_sp_port_vlan->vid = vid; 1324 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1325 1326 return mlxsw_sp_port_vlan; 1327 1328 err_port_vlan_alloc: 1329 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1330 return ERR_PTR(err); 1331 } 1332 1333 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1334 { 1335 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1336 u16 vid = mlxsw_sp_port_vlan->vid; 1337 1338 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1339 list_del(&mlxsw_sp_port_vlan->list); 1340 kfree(mlxsw_sp_port_vlan); 1341 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1342 } 1343 1344 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1345 __be16 __always_unused proto, u16 vid) 1346 { 1347 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1348 1349 /* VLAN 0 is added to HW filter when device goes up, but it is 1350 * reserved in our case, so simply return. 1351 */ 1352 if (!vid) 1353 return 0; 1354 1355 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1356 } 1357 1358 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1359 __be16 __always_unused proto, u16 vid) 1360 { 1361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1362 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1363 1364 /* VLAN 0 is removed from HW filter when device goes down, but 1365 * it is reserved in our case, so simply return. 1366 */ 1367 if (!vid) 1368 return 0; 1369 1370 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1371 if (!mlxsw_sp_port_vlan) 1372 return 0; 1373 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1374 1375 return 0; 1376 } 1377 1378 static struct mlxsw_sp_port_mall_tc_entry * 1379 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1380 unsigned long cookie) { 1381 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1382 1383 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1384 if (mall_tc_entry->cookie == cookie) 1385 return mall_tc_entry; 1386 1387 return NULL; 1388 } 1389 1390 static int 1391 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1392 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1393 const struct flow_action_entry *act, 1394 bool ingress) 1395 { 1396 enum mlxsw_sp_span_type span_type; 1397 1398 if (!act->dev) { 1399 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1400 return -EINVAL; 1401 } 1402 1403 mirror->ingress = ingress; 1404 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1405 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1406 true, &mirror->span_id); 1407 } 1408 1409 static void 1410 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1411 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1412 { 1413 enum mlxsw_sp_span_type span_type; 1414 1415 span_type = mirror->ingress ? 1416 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1417 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1418 span_type, true); 1419 } 1420 1421 static int 1422 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1423 struct tc_cls_matchall_offload *cls, 1424 const struct flow_action_entry *act, 1425 bool ingress) 1426 { 1427 int err; 1428 1429 if (!mlxsw_sp_port->sample) 1430 return -EOPNOTSUPP; 1431 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1432 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1433 return -EEXIST; 1434 } 1435 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1436 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1437 return -EOPNOTSUPP; 1438 } 1439 1440 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1441 act->sample.psample_group); 1442 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1443 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1444 mlxsw_sp_port->sample->rate = act->sample.rate; 1445 1446 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1447 if (err) 1448 goto err_port_sample_set; 1449 return 0; 1450 1451 err_port_sample_set: 1452 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1453 return err; 1454 } 1455 1456 static void 1457 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1458 { 1459 if (!mlxsw_sp_port->sample) 1460 return; 1461 1462 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1463 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1464 } 1465 1466 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1467 struct tc_cls_matchall_offload *f, 1468 bool ingress) 1469 { 1470 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1471 __be16 protocol = f->common.protocol; 1472 struct flow_action_entry *act; 1473 int err; 1474 1475 if (!flow_offload_has_one_action(&f->rule->action)) { 1476 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1477 return -EOPNOTSUPP; 1478 } 1479 1480 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1481 if (!mall_tc_entry) 1482 return -ENOMEM; 1483 mall_tc_entry->cookie = f->cookie; 1484 1485 act = &f->rule->action.entries[0]; 1486 1487 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1488 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1489 1490 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1491 mirror = &mall_tc_entry->mirror; 1492 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1493 mirror, act, 1494 ingress); 1495 } else if (act->id == FLOW_ACTION_SAMPLE && 1496 protocol == htons(ETH_P_ALL)) { 1497 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1498 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1499 act, ingress); 1500 } else { 1501 err = -EOPNOTSUPP; 1502 } 1503 1504 if (err) 1505 goto err_add_action; 1506 1507 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1508 return 0; 1509 1510 err_add_action: 1511 kfree(mall_tc_entry); 1512 return err; 1513 } 1514 1515 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1516 struct tc_cls_matchall_offload *f) 1517 { 1518 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1519 1520 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1521 f->cookie); 1522 if (!mall_tc_entry) { 1523 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1524 return; 1525 } 1526 list_del(&mall_tc_entry->list); 1527 1528 switch (mall_tc_entry->type) { 1529 case MLXSW_SP_PORT_MALL_MIRROR: 1530 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1531 &mall_tc_entry->mirror); 1532 break; 1533 case MLXSW_SP_PORT_MALL_SAMPLE: 1534 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1535 break; 1536 default: 1537 WARN_ON(1); 1538 } 1539 1540 kfree(mall_tc_entry); 1541 } 1542 1543 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1544 struct tc_cls_matchall_offload *f, 1545 bool ingress) 1546 { 1547 switch (f->command) { 1548 case TC_CLSMATCHALL_REPLACE: 1549 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1550 ingress); 1551 case TC_CLSMATCHALL_DESTROY: 1552 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1553 return 0; 1554 default: 1555 return -EOPNOTSUPP; 1556 } 1557 } 1558 1559 static int 1560 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1561 struct flow_cls_offload *f) 1562 { 1563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1564 1565 switch (f->command) { 1566 case FLOW_CLS_REPLACE: 1567 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1568 case FLOW_CLS_DESTROY: 1569 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1570 return 0; 1571 case FLOW_CLS_STATS: 1572 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1573 case FLOW_CLS_TMPLT_CREATE: 1574 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1575 case FLOW_CLS_TMPLT_DESTROY: 1576 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1577 return 0; 1578 default: 1579 return -EOPNOTSUPP; 1580 } 1581 } 1582 1583 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1584 void *type_data, 1585 void *cb_priv, bool ingress) 1586 { 1587 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1588 1589 switch (type) { 1590 case TC_SETUP_CLSMATCHALL: 1591 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1592 type_data)) 1593 return -EOPNOTSUPP; 1594 1595 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1596 ingress); 1597 case TC_SETUP_CLSFLOWER: 1598 return 0; 1599 default: 1600 return -EOPNOTSUPP; 1601 } 1602 } 1603 1604 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1605 void *type_data, 1606 void *cb_priv) 1607 { 1608 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1609 cb_priv, true); 1610 } 1611 1612 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1613 void *type_data, 1614 void *cb_priv) 1615 { 1616 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1617 cb_priv, false); 1618 } 1619 1620 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1621 void *type_data, void *cb_priv) 1622 { 1623 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1624 1625 switch (type) { 1626 case TC_SETUP_CLSMATCHALL: 1627 return 0; 1628 case TC_SETUP_CLSFLOWER: 1629 if (mlxsw_sp_acl_block_disabled(acl_block)) 1630 return -EOPNOTSUPP; 1631 1632 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1633 default: 1634 return -EOPNOTSUPP; 1635 } 1636 } 1637 1638 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1639 { 1640 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1641 1642 mlxsw_sp_acl_block_destroy(acl_block); 1643 } 1644 1645 static LIST_HEAD(mlxsw_sp_block_cb_list); 1646 1647 static int 1648 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1649 struct flow_block_offload *f, bool ingress) 1650 { 1651 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1652 struct mlxsw_sp_acl_block *acl_block; 1653 struct flow_block_cb *block_cb; 1654 bool register_block = false; 1655 int err; 1656 1657 block_cb = flow_block_cb_lookup(f->block, 1658 mlxsw_sp_setup_tc_block_cb_flower, 1659 mlxsw_sp); 1660 if (!block_cb) { 1661 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1662 if (!acl_block) 1663 return -ENOMEM; 1664 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1665 mlxsw_sp, acl_block, 1666 mlxsw_sp_tc_block_flower_release); 1667 if (IS_ERR(block_cb)) { 1668 mlxsw_sp_acl_block_destroy(acl_block); 1669 err = PTR_ERR(block_cb); 1670 goto err_cb_register; 1671 } 1672 register_block = true; 1673 } else { 1674 acl_block = flow_block_cb_priv(block_cb); 1675 } 1676 flow_block_cb_incref(block_cb); 1677 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1678 mlxsw_sp_port, ingress, f->extack); 1679 if (err) 1680 goto err_block_bind; 1681 1682 if (ingress) 1683 mlxsw_sp_port->ing_acl_block = acl_block; 1684 else 1685 mlxsw_sp_port->eg_acl_block = acl_block; 1686 1687 if (register_block) { 1688 flow_block_cb_add(block_cb, f); 1689 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1690 } 1691 1692 return 0; 1693 1694 err_block_bind: 1695 if (!flow_block_cb_decref(block_cb)) 1696 flow_block_cb_free(block_cb); 1697 err_cb_register: 1698 return err; 1699 } 1700 1701 static void 1702 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1703 struct flow_block_offload *f, bool ingress) 1704 { 1705 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1706 struct mlxsw_sp_acl_block *acl_block; 1707 struct flow_block_cb *block_cb; 1708 int err; 1709 1710 block_cb = flow_block_cb_lookup(f->block, 1711 mlxsw_sp_setup_tc_block_cb_flower, 1712 mlxsw_sp); 1713 if (!block_cb) 1714 return; 1715 1716 if (ingress) 1717 mlxsw_sp_port->ing_acl_block = NULL; 1718 else 1719 mlxsw_sp_port->eg_acl_block = NULL; 1720 1721 acl_block = flow_block_cb_priv(block_cb); 1722 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1723 mlxsw_sp_port, ingress); 1724 if (!err && !flow_block_cb_decref(block_cb)) { 1725 flow_block_cb_remove(block_cb, f); 1726 list_del(&block_cb->driver_list); 1727 } 1728 } 1729 1730 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1731 struct flow_block_offload *f) 1732 { 1733 struct flow_block_cb *block_cb; 1734 flow_setup_cb_t *cb; 1735 bool ingress; 1736 int err; 1737 1738 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1739 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1740 ingress = true; 1741 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1742 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1743 ingress = false; 1744 } else { 1745 return -EOPNOTSUPP; 1746 } 1747 1748 f->driver_block_list = &mlxsw_sp_block_cb_list; 1749 1750 switch (f->command) { 1751 case FLOW_BLOCK_BIND: 1752 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1753 &mlxsw_sp_block_cb_list)) 1754 return -EBUSY; 1755 1756 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1757 mlxsw_sp_port, NULL); 1758 if (IS_ERR(block_cb)) 1759 return PTR_ERR(block_cb); 1760 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1761 ingress); 1762 if (err) { 1763 flow_block_cb_free(block_cb); 1764 return err; 1765 } 1766 flow_block_cb_add(block_cb, f); 1767 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1768 return 0; 1769 case FLOW_BLOCK_UNBIND: 1770 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1771 f, ingress); 1772 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1773 if (!block_cb) 1774 return -ENOENT; 1775 1776 flow_block_cb_remove(block_cb, f); 1777 list_del(&block_cb->driver_list); 1778 return 0; 1779 default: 1780 return -EOPNOTSUPP; 1781 } 1782 } 1783 1784 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1785 void *type_data) 1786 { 1787 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1788 1789 switch (type) { 1790 case TC_SETUP_BLOCK: 1791 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1792 case TC_SETUP_QDISC_RED: 1793 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1794 case TC_SETUP_QDISC_PRIO: 1795 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1796 default: 1797 return -EOPNOTSUPP; 1798 } 1799 } 1800 1801 1802 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1803 { 1804 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1805 1806 if (!enable) { 1807 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1808 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1809 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1810 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1811 return -EINVAL; 1812 } 1813 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1814 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1815 } else { 1816 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1817 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1818 } 1819 return 0; 1820 } 1821 1822 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1823 { 1824 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1825 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1826 int err; 1827 1828 if (netif_running(dev)) 1829 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1830 1831 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1832 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1833 pplr_pl); 1834 1835 if (netif_running(dev)) 1836 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1837 1838 return err; 1839 } 1840 1841 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1842 1843 static int mlxsw_sp_handle_feature(struct net_device *dev, 1844 netdev_features_t wanted_features, 1845 netdev_features_t feature, 1846 mlxsw_sp_feature_handler feature_handler) 1847 { 1848 netdev_features_t changes = wanted_features ^ dev->features; 1849 bool enable = !!(wanted_features & feature); 1850 int err; 1851 1852 if (!(changes & feature)) 1853 return 0; 1854 1855 err = feature_handler(dev, enable); 1856 if (err) { 1857 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1858 enable ? "Enable" : "Disable", &feature, err); 1859 return err; 1860 } 1861 1862 if (enable) 1863 dev->features |= feature; 1864 else 1865 dev->features &= ~feature; 1866 1867 return 0; 1868 } 1869 static int mlxsw_sp_set_features(struct net_device *dev, 1870 netdev_features_t features) 1871 { 1872 netdev_features_t oper_features = dev->features; 1873 int err = 0; 1874 1875 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1876 mlxsw_sp_feature_hw_tc); 1877 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1878 mlxsw_sp_feature_loopback); 1879 1880 if (err) { 1881 dev->features = oper_features; 1882 return -EINVAL; 1883 } 1884 1885 return 0; 1886 } 1887 1888 static struct devlink_port * 1889 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1890 { 1891 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1892 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1893 1894 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1895 mlxsw_sp_port->local_port); 1896 } 1897 1898 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1899 struct ifreq *ifr) 1900 { 1901 struct hwtstamp_config config; 1902 int err; 1903 1904 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1905 return -EFAULT; 1906 1907 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1908 &config); 1909 if (err) 1910 return err; 1911 1912 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1913 return -EFAULT; 1914 1915 return 0; 1916 } 1917 1918 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1919 struct ifreq *ifr) 1920 { 1921 struct hwtstamp_config config; 1922 int err; 1923 1924 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1925 &config); 1926 if (err) 1927 return err; 1928 1929 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1930 return -EFAULT; 1931 1932 return 0; 1933 } 1934 1935 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1936 { 1937 struct hwtstamp_config config = {0}; 1938 1939 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1940 } 1941 1942 static int 1943 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1944 { 1945 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1946 1947 switch (cmd) { 1948 case SIOCSHWTSTAMP: 1949 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1950 case SIOCGHWTSTAMP: 1951 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1952 default: 1953 return -EOPNOTSUPP; 1954 } 1955 } 1956 1957 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1958 .ndo_open = mlxsw_sp_port_open, 1959 .ndo_stop = mlxsw_sp_port_stop, 1960 .ndo_start_xmit = mlxsw_sp_port_xmit, 1961 .ndo_setup_tc = mlxsw_sp_setup_tc, 1962 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1963 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1964 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1965 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1966 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1967 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1968 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1969 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1970 .ndo_set_features = mlxsw_sp_set_features, 1971 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1972 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1973 }; 1974 1975 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1976 struct ethtool_drvinfo *drvinfo) 1977 { 1978 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1979 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1980 1981 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1982 sizeof(drvinfo->driver)); 1983 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1984 sizeof(drvinfo->version)); 1985 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1986 "%d.%d.%d", 1987 mlxsw_sp->bus_info->fw_rev.major, 1988 mlxsw_sp->bus_info->fw_rev.minor, 1989 mlxsw_sp->bus_info->fw_rev.subminor); 1990 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1991 sizeof(drvinfo->bus_info)); 1992 } 1993 1994 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1995 struct ethtool_pauseparam *pause) 1996 { 1997 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1998 1999 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 2000 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 2001 } 2002 2003 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 2004 struct ethtool_pauseparam *pause) 2005 { 2006 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 2007 2008 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 2009 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 2010 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 2011 2012 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 2013 pfcc_pl); 2014 } 2015 2016 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 2017 struct ethtool_pauseparam *pause) 2018 { 2019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2020 bool pause_en = pause->tx_pause || pause->rx_pause; 2021 int err; 2022 2023 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 2024 netdev_err(dev, "PFC already enabled on port\n"); 2025 return -EINVAL; 2026 } 2027 2028 if (pause->autoneg) { 2029 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 2030 return -EINVAL; 2031 } 2032 2033 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2034 if (err) { 2035 netdev_err(dev, "Failed to configure port's headroom\n"); 2036 return err; 2037 } 2038 2039 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 2040 if (err) { 2041 netdev_err(dev, "Failed to set PAUSE parameters\n"); 2042 goto err_port_pause_configure; 2043 } 2044 2045 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2046 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2047 2048 return 0; 2049 2050 err_port_pause_configure: 2051 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2052 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2053 return err; 2054 } 2055 2056 struct mlxsw_sp_port_hw_stats { 2057 char str[ETH_GSTRING_LEN]; 2058 u64 (*getter)(const char *payload); 2059 bool cells_bytes; 2060 }; 2061 2062 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2063 { 2064 .str = "a_frames_transmitted_ok", 2065 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2066 }, 2067 { 2068 .str = "a_frames_received_ok", 2069 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2070 }, 2071 { 2072 .str = "a_frame_check_sequence_errors", 2073 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2074 }, 2075 { 2076 .str = "a_alignment_errors", 2077 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2078 }, 2079 { 2080 .str = "a_octets_transmitted_ok", 2081 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2082 }, 2083 { 2084 .str = "a_octets_received_ok", 2085 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2086 }, 2087 { 2088 .str = "a_multicast_frames_xmitted_ok", 2089 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2090 }, 2091 { 2092 .str = "a_broadcast_frames_xmitted_ok", 2093 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2094 }, 2095 { 2096 .str = "a_multicast_frames_received_ok", 2097 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2098 }, 2099 { 2100 .str = "a_broadcast_frames_received_ok", 2101 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2102 }, 2103 { 2104 .str = "a_in_range_length_errors", 2105 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2106 }, 2107 { 2108 .str = "a_out_of_range_length_field", 2109 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2110 }, 2111 { 2112 .str = "a_frame_too_long_errors", 2113 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2114 }, 2115 { 2116 .str = "a_symbol_error_during_carrier", 2117 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2118 }, 2119 { 2120 .str = "a_mac_control_frames_transmitted", 2121 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2122 }, 2123 { 2124 .str = "a_mac_control_frames_received", 2125 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2126 }, 2127 { 2128 .str = "a_unsupported_opcodes_received", 2129 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2130 }, 2131 { 2132 .str = "a_pause_mac_ctrl_frames_received", 2133 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2134 }, 2135 { 2136 .str = "a_pause_mac_ctrl_frames_xmitted", 2137 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2138 }, 2139 }; 2140 2141 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2142 2143 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2144 { 2145 .str = "if_in_discards", 2146 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2147 }, 2148 { 2149 .str = "if_out_discards", 2150 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2151 }, 2152 { 2153 .str = "if_out_errors", 2154 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2155 }, 2156 }; 2157 2158 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2159 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2160 2161 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2162 { 2163 .str = "ether_stats_undersize_pkts", 2164 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2165 }, 2166 { 2167 .str = "ether_stats_oversize_pkts", 2168 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2169 }, 2170 { 2171 .str = "ether_stats_fragments", 2172 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2173 }, 2174 { 2175 .str = "ether_pkts64octets", 2176 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2177 }, 2178 { 2179 .str = "ether_pkts65to127octets", 2180 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2181 }, 2182 { 2183 .str = "ether_pkts128to255octets", 2184 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2185 }, 2186 { 2187 .str = "ether_pkts256to511octets", 2188 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2189 }, 2190 { 2191 .str = "ether_pkts512to1023octets", 2192 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2193 }, 2194 { 2195 .str = "ether_pkts1024to1518octets", 2196 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2197 }, 2198 { 2199 .str = "ether_pkts1519to2047octets", 2200 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2201 }, 2202 { 2203 .str = "ether_pkts2048to4095octets", 2204 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2205 }, 2206 { 2207 .str = "ether_pkts4096to8191octets", 2208 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2209 }, 2210 { 2211 .str = "ether_pkts8192to10239octets", 2212 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2213 }, 2214 }; 2215 2216 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2217 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2218 2219 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2220 { 2221 .str = "dot3stats_fcs_errors", 2222 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2223 }, 2224 { 2225 .str = "dot3stats_symbol_errors", 2226 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2227 }, 2228 { 2229 .str = "dot3control_in_unknown_opcodes", 2230 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2231 }, 2232 { 2233 .str = "dot3in_pause_frames", 2234 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2235 }, 2236 }; 2237 2238 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2239 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2240 2241 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2242 { 2243 .str = "discard_ingress_general", 2244 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2245 }, 2246 { 2247 .str = "discard_ingress_policy_engine", 2248 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2249 }, 2250 { 2251 .str = "discard_ingress_vlan_membership", 2252 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2253 }, 2254 { 2255 .str = "discard_ingress_tag_frame_type", 2256 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2257 }, 2258 { 2259 .str = "discard_egress_vlan_membership", 2260 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2261 }, 2262 { 2263 .str = "discard_loopback_filter", 2264 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2265 }, 2266 { 2267 .str = "discard_egress_general", 2268 .getter = mlxsw_reg_ppcnt_egress_general_get, 2269 }, 2270 { 2271 .str = "discard_egress_hoq", 2272 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2273 }, 2274 { 2275 .str = "discard_egress_policy_engine", 2276 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2277 }, 2278 { 2279 .str = "discard_ingress_tx_link_down", 2280 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2281 }, 2282 { 2283 .str = "discard_egress_stp_filter", 2284 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2285 }, 2286 { 2287 .str = "discard_egress_sll", 2288 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2289 }, 2290 }; 2291 2292 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2293 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2294 2295 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2296 { 2297 .str = "rx_octets_prio", 2298 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2299 }, 2300 { 2301 .str = "rx_frames_prio", 2302 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2303 }, 2304 { 2305 .str = "tx_octets_prio", 2306 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2307 }, 2308 { 2309 .str = "tx_frames_prio", 2310 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2311 }, 2312 { 2313 .str = "rx_pause_prio", 2314 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2315 }, 2316 { 2317 .str = "rx_pause_duration_prio", 2318 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2319 }, 2320 { 2321 .str = "tx_pause_prio", 2322 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2323 }, 2324 { 2325 .str = "tx_pause_duration_prio", 2326 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2327 }, 2328 }; 2329 2330 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2331 2332 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2333 { 2334 .str = "tc_transmit_queue_tc", 2335 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2336 .cells_bytes = true, 2337 }, 2338 { 2339 .str = "tc_no_buffer_discard_uc_tc", 2340 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2341 }, 2342 }; 2343 2344 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2345 2346 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2347 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2348 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2349 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2350 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2351 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2352 IEEE_8021QAZ_MAX_TCS) + \ 2353 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2354 TC_MAX_QUEUE)) 2355 2356 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2357 { 2358 int i; 2359 2360 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2361 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2362 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2363 *p += ETH_GSTRING_LEN; 2364 } 2365 } 2366 2367 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2368 { 2369 int i; 2370 2371 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2372 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2373 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2374 *p += ETH_GSTRING_LEN; 2375 } 2376 } 2377 2378 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2379 u32 stringset, u8 *data) 2380 { 2381 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2382 u8 *p = data; 2383 int i; 2384 2385 switch (stringset) { 2386 case ETH_SS_STATS: 2387 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2388 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2389 ETH_GSTRING_LEN); 2390 p += ETH_GSTRING_LEN; 2391 } 2392 2393 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2394 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2395 ETH_GSTRING_LEN); 2396 p += ETH_GSTRING_LEN; 2397 } 2398 2399 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2400 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2401 ETH_GSTRING_LEN); 2402 p += ETH_GSTRING_LEN; 2403 } 2404 2405 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2406 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2407 ETH_GSTRING_LEN); 2408 p += ETH_GSTRING_LEN; 2409 } 2410 2411 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2412 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2413 ETH_GSTRING_LEN); 2414 p += ETH_GSTRING_LEN; 2415 } 2416 2417 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2418 mlxsw_sp_port_get_prio_strings(&p, i); 2419 2420 for (i = 0; i < TC_MAX_QUEUE; i++) 2421 mlxsw_sp_port_get_tc_strings(&p, i); 2422 2423 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2424 break; 2425 } 2426 } 2427 2428 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2429 enum ethtool_phys_id_state state) 2430 { 2431 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2432 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2433 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2434 bool active; 2435 2436 switch (state) { 2437 case ETHTOOL_ID_ACTIVE: 2438 active = true; 2439 break; 2440 case ETHTOOL_ID_INACTIVE: 2441 active = false; 2442 break; 2443 default: 2444 return -EOPNOTSUPP; 2445 } 2446 2447 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2448 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2449 } 2450 2451 static int 2452 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2453 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2454 { 2455 switch (grp) { 2456 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2457 *p_hw_stats = mlxsw_sp_port_hw_stats; 2458 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2459 break; 2460 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2461 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2462 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2463 break; 2464 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2465 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2466 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2467 break; 2468 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2469 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2470 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2471 break; 2472 case MLXSW_REG_PPCNT_DISCARD_CNT: 2473 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2474 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2475 break; 2476 case MLXSW_REG_PPCNT_PRIO_CNT: 2477 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2478 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2479 break; 2480 case MLXSW_REG_PPCNT_TC_CNT: 2481 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2482 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2483 break; 2484 default: 2485 WARN_ON(1); 2486 return -EOPNOTSUPP; 2487 } 2488 return 0; 2489 } 2490 2491 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2492 enum mlxsw_reg_ppcnt_grp grp, int prio, 2493 u64 *data, int data_index) 2494 { 2495 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2497 struct mlxsw_sp_port_hw_stats *hw_stats; 2498 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2499 int i, len; 2500 int err; 2501 2502 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2503 if (err) 2504 return; 2505 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2506 for (i = 0; i < len; i++) { 2507 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2508 if (!hw_stats[i].cells_bytes) 2509 continue; 2510 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2511 data[data_index + i]); 2512 } 2513 } 2514 2515 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2516 struct ethtool_stats *stats, u64 *data) 2517 { 2518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2519 int i, data_index = 0; 2520 2521 /* IEEE 802.3 Counters */ 2522 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2523 data, data_index); 2524 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2525 2526 /* RFC 2863 Counters */ 2527 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2528 data, data_index); 2529 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2530 2531 /* RFC 2819 Counters */ 2532 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2533 data, data_index); 2534 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2535 2536 /* RFC 3635 Counters */ 2537 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2538 data, data_index); 2539 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2540 2541 /* Discard Counters */ 2542 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2543 data, data_index); 2544 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2545 2546 /* Per-Priority Counters */ 2547 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2548 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2549 data, data_index); 2550 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2551 } 2552 2553 /* Per-TC Counters */ 2554 for (i = 0; i < TC_MAX_QUEUE; i++) { 2555 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2556 data, data_index); 2557 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2558 } 2559 2560 /* PTP counters */ 2561 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2562 data, data_index); 2563 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2564 } 2565 2566 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2567 { 2568 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2569 2570 switch (sset) { 2571 case ETH_SS_STATS: 2572 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2573 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2574 default: 2575 return -EOPNOTSUPP; 2576 } 2577 } 2578 2579 struct mlxsw_sp1_port_link_mode { 2580 enum ethtool_link_mode_bit_indices mask_ethtool; 2581 u32 mask; 2582 u32 speed; 2583 }; 2584 2585 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2586 { 2587 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2588 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2589 .speed = SPEED_100, 2590 }, 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2593 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2594 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2595 .speed = SPEED_1000, 2596 }, 2597 { 2598 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2599 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2600 .speed = SPEED_10000, 2601 }, 2602 { 2603 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2604 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2605 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2606 .speed = SPEED_10000, 2607 }, 2608 { 2609 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2610 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2611 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2612 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2613 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2614 .speed = SPEED_10000, 2615 }, 2616 { 2617 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2619 .speed = SPEED_20000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2624 .speed = SPEED_40000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2629 .speed = SPEED_40000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2634 .speed = SPEED_40000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2639 .speed = SPEED_40000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2644 .speed = SPEED_25000, 2645 }, 2646 { 2647 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2648 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2649 .speed = SPEED_25000, 2650 }, 2651 { 2652 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2653 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2654 .speed = SPEED_25000, 2655 }, 2656 { 2657 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2658 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2659 .speed = SPEED_50000, 2660 }, 2661 { 2662 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2663 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2664 .speed = SPEED_50000, 2665 }, 2666 { 2667 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2668 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2669 .speed = SPEED_50000, 2670 }, 2671 { 2672 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2673 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2674 .speed = SPEED_100000, 2675 }, 2676 { 2677 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2678 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2679 .speed = SPEED_100000, 2680 }, 2681 { 2682 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2683 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2684 .speed = SPEED_100000, 2685 }, 2686 { 2687 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2688 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2689 .speed = SPEED_100000, 2690 }, 2691 }; 2692 2693 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2694 2695 static void 2696 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2697 u32 ptys_eth_proto, 2698 struct ethtool_link_ksettings *cmd) 2699 { 2700 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2701 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2702 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2703 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2704 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2705 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2706 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2707 2708 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2709 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2710 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2711 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2712 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2713 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2714 } 2715 2716 static void 2717 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2718 u8 width, unsigned long *mode) 2719 { 2720 int i; 2721 2722 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2723 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2724 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2725 mode); 2726 } 2727 } 2728 2729 static u32 2730 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2731 { 2732 int i; 2733 2734 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2735 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2736 return mlxsw_sp1_port_link_mode[i].speed; 2737 } 2738 2739 return SPEED_UNKNOWN; 2740 } 2741 2742 static void 2743 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2744 u32 ptys_eth_proto, 2745 struct ethtool_link_ksettings *cmd) 2746 { 2747 cmd->base.speed = SPEED_UNKNOWN; 2748 cmd->base.duplex = DUPLEX_UNKNOWN; 2749 2750 if (!carrier_ok) 2751 return; 2752 2753 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2754 if (cmd->base.speed != SPEED_UNKNOWN) 2755 cmd->base.duplex = DUPLEX_FULL; 2756 } 2757 2758 static u32 2759 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2760 const struct ethtool_link_ksettings *cmd) 2761 { 2762 u32 ptys_proto = 0; 2763 int i; 2764 2765 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2766 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2767 cmd->link_modes.advertising)) 2768 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2769 } 2770 return ptys_proto; 2771 } 2772 2773 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2774 u32 speed) 2775 { 2776 u32 ptys_proto = 0; 2777 int i; 2778 2779 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2780 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2781 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2782 } 2783 return ptys_proto; 2784 } 2785 2786 static u32 2787 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2788 { 2789 u32 ptys_proto = 0; 2790 int i; 2791 2792 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2793 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2794 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2795 } 2796 return ptys_proto; 2797 } 2798 2799 static int 2800 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2801 u32 *base_speed) 2802 { 2803 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2804 return 0; 2805 } 2806 2807 static void 2808 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2809 u8 local_port, u32 proto_admin, bool autoneg) 2810 { 2811 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2812 } 2813 2814 static void 2815 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2816 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2817 u32 *p_eth_proto_oper) 2818 { 2819 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2820 p_eth_proto_oper); 2821 } 2822 2823 static const struct mlxsw_sp_port_type_speed_ops 2824 mlxsw_sp1_port_type_speed_ops = { 2825 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2826 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2827 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2828 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2829 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2830 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2831 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2832 .port_speed_base = mlxsw_sp1_port_speed_base, 2833 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2834 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2835 }; 2836 2837 static const enum ethtool_link_mode_bit_indices 2838 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2839 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2840 }; 2841 2842 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2843 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2844 2845 static const enum ethtool_link_mode_bit_indices 2846 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2847 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2848 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2849 }; 2850 2851 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2852 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2853 2854 static const enum ethtool_link_mode_bit_indices 2855 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2856 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2857 }; 2858 2859 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2860 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2861 2862 static const enum ethtool_link_mode_bit_indices 2863 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2864 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2865 }; 2866 2867 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2868 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2869 2870 static const enum ethtool_link_mode_bit_indices 2871 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2872 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2873 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2874 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2875 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2876 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2877 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2878 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2879 }; 2880 2881 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2882 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2883 2884 static const enum ethtool_link_mode_bit_indices 2885 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2886 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2887 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2888 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2889 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2890 }; 2891 2892 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2893 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2894 2895 static const enum ethtool_link_mode_bit_indices 2896 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2897 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2898 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2899 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2900 }; 2901 2902 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2903 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2904 2905 static const enum ethtool_link_mode_bit_indices 2906 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2907 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2908 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2909 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2910 }; 2911 2912 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2913 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2914 2915 static const enum ethtool_link_mode_bit_indices 2916 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2917 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2918 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2919 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2920 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2921 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2922 }; 2923 2924 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2925 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2926 2927 static const enum ethtool_link_mode_bit_indices 2928 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2929 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2930 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2931 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2932 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2933 }; 2934 2935 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2936 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2937 2938 static const enum ethtool_link_mode_bit_indices 2939 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2940 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2941 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2942 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2943 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2944 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2945 }; 2946 2947 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2948 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2949 2950 static const enum ethtool_link_mode_bit_indices 2951 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2952 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2953 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2954 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2955 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2956 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2957 }; 2958 2959 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2960 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2961 2962 static const enum ethtool_link_mode_bit_indices 2963 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2964 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2965 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2966 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2967 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2968 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2969 }; 2970 2971 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2972 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2973 2974 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2975 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2976 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2977 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2978 2979 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2980 { 2981 switch (width) { 2982 case 1: 2983 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2984 case 2: 2985 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2986 case 4: 2987 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2988 case 8: 2989 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2990 default: 2991 WARN_ON_ONCE(1); 2992 return 0; 2993 } 2994 } 2995 2996 struct mlxsw_sp2_port_link_mode { 2997 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2998 int m_ethtool_len; 2999 u32 mask; 3000 u32 speed; 3001 u8 mask_width; 3002 }; 3003 3004 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 3005 { 3006 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 3007 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 3008 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 3009 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3010 MLXSW_SP_PORT_MASK_WIDTH_2X | 3011 MLXSW_SP_PORT_MASK_WIDTH_4X | 3012 MLXSW_SP_PORT_MASK_WIDTH_8X, 3013 .speed = SPEED_100, 3014 }, 3015 { 3016 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 3017 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 3018 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 3019 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3020 MLXSW_SP_PORT_MASK_WIDTH_2X | 3021 MLXSW_SP_PORT_MASK_WIDTH_4X | 3022 MLXSW_SP_PORT_MASK_WIDTH_8X, 3023 .speed = SPEED_1000, 3024 }, 3025 { 3026 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 3027 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 3028 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 3029 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3030 MLXSW_SP_PORT_MASK_WIDTH_2X | 3031 MLXSW_SP_PORT_MASK_WIDTH_4X | 3032 MLXSW_SP_PORT_MASK_WIDTH_8X, 3033 .speed = SPEED_2500, 3034 }, 3035 { 3036 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 3037 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 3038 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 3039 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3040 MLXSW_SP_PORT_MASK_WIDTH_2X | 3041 MLXSW_SP_PORT_MASK_WIDTH_4X | 3042 MLXSW_SP_PORT_MASK_WIDTH_8X, 3043 .speed = SPEED_5000, 3044 }, 3045 { 3046 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 3047 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 3048 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 3049 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3050 MLXSW_SP_PORT_MASK_WIDTH_2X | 3051 MLXSW_SP_PORT_MASK_WIDTH_4X | 3052 MLXSW_SP_PORT_MASK_WIDTH_8X, 3053 .speed = SPEED_10000, 3054 }, 3055 { 3056 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 3057 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 3058 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 3059 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3060 MLXSW_SP_PORT_MASK_WIDTH_8X, 3061 .speed = SPEED_40000, 3062 }, 3063 { 3064 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 3065 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3066 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3067 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3068 MLXSW_SP_PORT_MASK_WIDTH_2X | 3069 MLXSW_SP_PORT_MASK_WIDTH_4X | 3070 MLXSW_SP_PORT_MASK_WIDTH_8X, 3071 .speed = SPEED_25000, 3072 }, 3073 { 3074 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3075 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3076 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3077 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3078 MLXSW_SP_PORT_MASK_WIDTH_4X | 3079 MLXSW_SP_PORT_MASK_WIDTH_8X, 3080 .speed = SPEED_50000, 3081 }, 3082 { 3083 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3084 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3085 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3086 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3087 .speed = SPEED_50000, 3088 }, 3089 { 3090 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3091 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3092 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3093 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3094 MLXSW_SP_PORT_MASK_WIDTH_8X, 3095 .speed = SPEED_100000, 3096 }, 3097 { 3098 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3099 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3100 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3101 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3102 .speed = SPEED_100000, 3103 }, 3104 { 3105 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3106 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3107 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3108 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 3109 MLXSW_SP_PORT_MASK_WIDTH_8X, 3110 .speed = SPEED_200000, 3111 }, 3112 { 3113 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 3114 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 3115 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 3116 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 3117 .speed = SPEED_400000, 3118 }, 3119 }; 3120 3121 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3122 3123 static void 3124 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3125 u32 ptys_eth_proto, 3126 struct ethtool_link_ksettings *cmd) 3127 { 3128 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3129 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3130 } 3131 3132 static void 3133 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3134 unsigned long *mode) 3135 { 3136 int i; 3137 3138 for (i = 0; i < link_mode->m_ethtool_len; i++) 3139 __set_bit(link_mode->mask_ethtool[i], mode); 3140 } 3141 3142 static void 3143 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3144 u8 width, unsigned long *mode) 3145 { 3146 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3147 int i; 3148 3149 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3150 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3151 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3152 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3153 mode); 3154 } 3155 } 3156 3157 static u32 3158 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3159 { 3160 int i; 3161 3162 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3163 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3164 return mlxsw_sp2_port_link_mode[i].speed; 3165 } 3166 3167 return SPEED_UNKNOWN; 3168 } 3169 3170 static void 3171 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3172 u32 ptys_eth_proto, 3173 struct ethtool_link_ksettings *cmd) 3174 { 3175 cmd->base.speed = SPEED_UNKNOWN; 3176 cmd->base.duplex = DUPLEX_UNKNOWN; 3177 3178 if (!carrier_ok) 3179 return; 3180 3181 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3182 if (cmd->base.speed != SPEED_UNKNOWN) 3183 cmd->base.duplex = DUPLEX_FULL; 3184 } 3185 3186 static bool 3187 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3188 const unsigned long *mode) 3189 { 3190 int cnt = 0; 3191 int i; 3192 3193 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3194 if (test_bit(link_mode->mask_ethtool[i], mode)) 3195 cnt++; 3196 } 3197 3198 return cnt == link_mode->m_ethtool_len; 3199 } 3200 3201 static u32 3202 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3203 const struct ethtool_link_ksettings *cmd) 3204 { 3205 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3206 u32 ptys_proto = 0; 3207 int i; 3208 3209 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3210 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3211 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3212 cmd->link_modes.advertising)) 3213 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3214 } 3215 return ptys_proto; 3216 } 3217 3218 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3219 u8 width, u32 speed) 3220 { 3221 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3222 u32 ptys_proto = 0; 3223 int i; 3224 3225 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3226 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3227 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3228 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3229 } 3230 return ptys_proto; 3231 } 3232 3233 static u32 3234 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3235 { 3236 u32 ptys_proto = 0; 3237 int i; 3238 3239 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3240 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3241 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3242 } 3243 return ptys_proto; 3244 } 3245 3246 static int 3247 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3248 u32 *base_speed) 3249 { 3250 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3251 u32 eth_proto_cap; 3252 int err; 3253 3254 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3255 * it from firmware. 3256 */ 3257 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3258 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3259 if (err) 3260 return err; 3261 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3262 3263 if (eth_proto_cap & 3264 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3265 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3266 return 0; 3267 } 3268 3269 if (eth_proto_cap & 3270 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3271 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3272 return 0; 3273 } 3274 3275 return -EIO; 3276 } 3277 3278 static void 3279 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3280 u8 local_port, u32 proto_admin, 3281 bool autoneg) 3282 { 3283 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3284 } 3285 3286 static void 3287 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3288 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3289 u32 *p_eth_proto_oper) 3290 { 3291 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3292 p_eth_proto_admin, p_eth_proto_oper); 3293 } 3294 3295 static const struct mlxsw_sp_port_type_speed_ops 3296 mlxsw_sp2_port_type_speed_ops = { 3297 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3298 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3299 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3300 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3301 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3302 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3303 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3304 .port_speed_base = mlxsw_sp2_port_speed_base, 3305 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3306 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3307 }; 3308 3309 static void 3310 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3311 u8 width, struct ethtool_link_ksettings *cmd) 3312 { 3313 const struct mlxsw_sp_port_type_speed_ops *ops; 3314 3315 ops = mlxsw_sp->port_type_speed_ops; 3316 3317 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3318 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3319 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3320 3321 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3322 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3323 cmd->link_modes.supported); 3324 } 3325 3326 static void 3327 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3328 u32 eth_proto_admin, bool autoneg, u8 width, 3329 struct ethtool_link_ksettings *cmd) 3330 { 3331 const struct mlxsw_sp_port_type_speed_ops *ops; 3332 3333 ops = mlxsw_sp->port_type_speed_ops; 3334 3335 if (!autoneg) 3336 return; 3337 3338 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3339 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3340 cmd->link_modes.advertising); 3341 } 3342 3343 static u8 3344 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3345 { 3346 switch (connector_type) { 3347 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3348 return PORT_OTHER; 3349 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3350 return PORT_NONE; 3351 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3352 return PORT_TP; 3353 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3354 return PORT_AUI; 3355 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3356 return PORT_BNC; 3357 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3358 return PORT_MII; 3359 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3360 return PORT_FIBRE; 3361 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3362 return PORT_DA; 3363 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3364 return PORT_OTHER; 3365 default: 3366 WARN_ON_ONCE(1); 3367 return PORT_OTHER; 3368 } 3369 } 3370 3371 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3372 struct ethtool_link_ksettings *cmd) 3373 { 3374 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3375 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3377 const struct mlxsw_sp_port_type_speed_ops *ops; 3378 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3379 u8 connector_type; 3380 bool autoneg; 3381 int err; 3382 3383 ops = mlxsw_sp->port_type_speed_ops; 3384 3385 autoneg = mlxsw_sp_port->link.autoneg; 3386 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3387 0, false); 3388 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3389 if (err) 3390 return err; 3391 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3392 ð_proto_admin, ð_proto_oper); 3393 3394 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3395 mlxsw_sp_port->mapping.width, cmd); 3396 3397 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3398 mlxsw_sp_port->mapping.width, cmd); 3399 3400 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3401 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3402 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3403 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3404 eth_proto_oper, cmd); 3405 3406 return 0; 3407 } 3408 3409 static int 3410 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3411 const struct ethtool_link_ksettings *cmd) 3412 { 3413 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3414 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3415 const struct mlxsw_sp_port_type_speed_ops *ops; 3416 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3417 u32 eth_proto_cap, eth_proto_new; 3418 bool autoneg; 3419 int err; 3420 3421 ops = mlxsw_sp->port_type_speed_ops; 3422 3423 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3424 0, false); 3425 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3426 if (err) 3427 return err; 3428 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3429 3430 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3431 eth_proto_new = autoneg ? 3432 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3433 cmd) : 3434 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3435 cmd->base.speed); 3436 3437 eth_proto_new = eth_proto_new & eth_proto_cap; 3438 if (!eth_proto_new) { 3439 netdev_err(dev, "No supported speed requested\n"); 3440 return -EINVAL; 3441 } 3442 3443 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3444 eth_proto_new, autoneg); 3445 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3446 if (err) 3447 return err; 3448 3449 mlxsw_sp_port->link.autoneg = autoneg; 3450 3451 if (!netif_running(dev)) 3452 return 0; 3453 3454 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3455 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3456 3457 return 0; 3458 } 3459 3460 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3461 struct ethtool_modinfo *modinfo) 3462 { 3463 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3465 int err; 3466 3467 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3468 mlxsw_sp_port->mapping.module, 3469 modinfo); 3470 3471 return err; 3472 } 3473 3474 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3475 struct ethtool_eeprom *ee, 3476 u8 *data) 3477 { 3478 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3479 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3480 int err; 3481 3482 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3483 mlxsw_sp_port->mapping.module, ee, 3484 data); 3485 3486 return err; 3487 } 3488 3489 static int 3490 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3491 { 3492 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3494 3495 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3496 } 3497 3498 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3499 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3500 .get_link = ethtool_op_get_link, 3501 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3502 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3503 .get_strings = mlxsw_sp_port_get_strings, 3504 .set_phys_id = mlxsw_sp_port_set_phys_id, 3505 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3506 .get_sset_count = mlxsw_sp_port_get_sset_count, 3507 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3508 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3509 .get_module_info = mlxsw_sp_get_module_info, 3510 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3511 .get_ts_info = mlxsw_sp_get_ts_info, 3512 }; 3513 3514 static int 3515 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3516 { 3517 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3518 const struct mlxsw_sp_port_type_speed_ops *ops; 3519 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3520 u32 eth_proto_admin; 3521 u32 upper_speed; 3522 u32 base_speed; 3523 int err; 3524 3525 ops = mlxsw_sp->port_type_speed_ops; 3526 3527 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3528 &base_speed); 3529 if (err) 3530 return err; 3531 upper_speed = base_speed * mlxsw_sp_port->mapping.width; 3532 3533 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3534 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3535 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3537 } 3538 3539 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3540 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3541 bool dwrr, u8 dwrr_weight) 3542 { 3543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3544 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3545 3546 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3547 next_index); 3548 mlxsw_reg_qeec_de_set(qeec_pl, true); 3549 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3550 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3552 } 3553 3554 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3555 enum mlxsw_reg_qeec_hr hr, u8 index, 3556 u8 next_index, u32 maxrate) 3557 { 3558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3559 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3560 3561 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3562 next_index); 3563 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3564 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3566 } 3567 3568 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3569 enum mlxsw_reg_qeec_hr hr, u8 index, 3570 u8 next_index, u32 minrate) 3571 { 3572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3573 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3574 3575 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3576 next_index); 3577 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3578 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3579 3580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3581 } 3582 3583 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3584 u8 switch_prio, u8 tclass) 3585 { 3586 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3587 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3588 3589 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3590 tclass); 3591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3592 } 3593 3594 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3595 { 3596 int err, i; 3597 3598 /* Setup the elements hierarcy, so that each TC is linked to 3599 * one subgroup, which are all member in the same group. 3600 */ 3601 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3602 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3603 0); 3604 if (err) 3605 return err; 3606 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3607 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3608 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3609 0, false, 0); 3610 if (err) 3611 return err; 3612 } 3613 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3614 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3615 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3616 false, 0); 3617 if (err) 3618 return err; 3619 3620 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3621 MLXSW_REG_QEEC_HIERARCY_TC, 3622 i + 8, i, 3623 true, 100); 3624 if (err) 3625 return err; 3626 } 3627 3628 /* Make sure the max shaper is disabled in all hierarchies that support 3629 * it. Note that this disables ptps (PTP shaper), but that is intended 3630 * for the initial configuration. 3631 */ 3632 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3633 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3634 MLXSW_REG_QEEC_MAS_DIS); 3635 if (err) 3636 return err; 3637 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3638 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3639 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3640 i, 0, 3641 MLXSW_REG_QEEC_MAS_DIS); 3642 if (err) 3643 return err; 3644 } 3645 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3646 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3647 MLXSW_REG_QEEC_HIERARCY_TC, 3648 i, i, 3649 MLXSW_REG_QEEC_MAS_DIS); 3650 if (err) 3651 return err; 3652 3653 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3654 MLXSW_REG_QEEC_HIERARCY_TC, 3655 i + 8, i, 3656 MLXSW_REG_QEEC_MAS_DIS); 3657 if (err) 3658 return err; 3659 } 3660 3661 /* Configure the min shaper for multicast TCs. */ 3662 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3663 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3664 MLXSW_REG_QEEC_HIERARCY_TC, 3665 i + 8, i, 3666 MLXSW_REG_QEEC_MIS_MIN); 3667 if (err) 3668 return err; 3669 } 3670 3671 /* Map all priorities to traffic class 0. */ 3672 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3673 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3674 if (err) 3675 return err; 3676 } 3677 3678 return 0; 3679 } 3680 3681 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3682 bool enable) 3683 { 3684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3685 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3686 3687 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3688 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3689 } 3690 3691 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3692 u8 split_base_local_port, 3693 struct mlxsw_sp_port_mapping *port_mapping) 3694 { 3695 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3696 bool split = !!split_base_local_port; 3697 struct mlxsw_sp_port *mlxsw_sp_port; 3698 struct net_device *dev; 3699 int err; 3700 3701 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3702 port_mapping->module + 1, split, 3703 port_mapping->lane / port_mapping->width, 3704 mlxsw_sp->base_mac, 3705 sizeof(mlxsw_sp->base_mac)); 3706 if (err) { 3707 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3708 local_port); 3709 return err; 3710 } 3711 3712 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3713 if (!dev) { 3714 err = -ENOMEM; 3715 goto err_alloc_etherdev; 3716 } 3717 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3718 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3719 mlxsw_sp_port = netdev_priv(dev); 3720 mlxsw_sp_port->dev = dev; 3721 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3722 mlxsw_sp_port->local_port = local_port; 3723 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3724 mlxsw_sp_port->split = split; 3725 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3726 mlxsw_sp_port->mapping = *port_mapping; 3727 mlxsw_sp_port->link.autoneg = 1; 3728 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3729 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3730 3731 mlxsw_sp_port->pcpu_stats = 3732 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3733 if (!mlxsw_sp_port->pcpu_stats) { 3734 err = -ENOMEM; 3735 goto err_alloc_stats; 3736 } 3737 3738 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3739 GFP_KERNEL); 3740 if (!mlxsw_sp_port->sample) { 3741 err = -ENOMEM; 3742 goto err_alloc_sample; 3743 } 3744 3745 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3746 &update_stats_cache); 3747 3748 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3749 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3750 3751 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3752 if (err) { 3753 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3754 mlxsw_sp_port->local_port); 3755 goto err_port_module_map; 3756 } 3757 3758 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3759 if (err) { 3760 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3761 mlxsw_sp_port->local_port); 3762 goto err_port_swid_set; 3763 } 3764 3765 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3766 if (err) { 3767 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3768 mlxsw_sp_port->local_port); 3769 goto err_dev_addr_init; 3770 } 3771 3772 netif_carrier_off(dev); 3773 3774 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3775 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3776 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3777 3778 dev->min_mtu = 0; 3779 dev->max_mtu = ETH_MAX_MTU; 3780 3781 /* Each packet needs to have a Tx header (metadata) on top all other 3782 * headers. 3783 */ 3784 dev->needed_headroom = MLXSW_TXHDR_LEN; 3785 3786 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3787 if (err) { 3788 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3789 mlxsw_sp_port->local_port); 3790 goto err_port_system_port_mapping_set; 3791 } 3792 3793 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3794 if (err) { 3795 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3796 mlxsw_sp_port->local_port); 3797 goto err_port_speed_by_width_set; 3798 } 3799 3800 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3801 if (err) { 3802 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3803 mlxsw_sp_port->local_port); 3804 goto err_port_mtu_set; 3805 } 3806 3807 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3808 if (err) 3809 goto err_port_admin_status_set; 3810 3811 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3812 if (err) { 3813 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3814 mlxsw_sp_port->local_port); 3815 goto err_port_buffers_init; 3816 } 3817 3818 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3819 if (err) { 3820 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3821 mlxsw_sp_port->local_port); 3822 goto err_port_ets_init; 3823 } 3824 3825 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3826 if (err) { 3827 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3828 mlxsw_sp_port->local_port); 3829 goto err_port_tc_mc_mode; 3830 } 3831 3832 /* ETS and buffers must be initialized before DCB. */ 3833 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3834 if (err) { 3835 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3836 mlxsw_sp_port->local_port); 3837 goto err_port_dcb_init; 3838 } 3839 3840 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3841 if (err) { 3842 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3843 mlxsw_sp_port->local_port); 3844 goto err_port_fids_init; 3845 } 3846 3847 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3848 if (err) { 3849 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3850 mlxsw_sp_port->local_port); 3851 goto err_port_qdiscs_init; 3852 } 3853 3854 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3855 false); 3856 if (err) { 3857 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3858 mlxsw_sp_port->local_port); 3859 goto err_port_vlan_clear; 3860 } 3861 3862 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3863 if (err) { 3864 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3865 mlxsw_sp_port->local_port); 3866 goto err_port_nve_init; 3867 } 3868 3869 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3870 if (err) { 3871 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3872 mlxsw_sp_port->local_port); 3873 goto err_port_pvid_set; 3874 } 3875 3876 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3877 MLXSW_SP_DEFAULT_VID); 3878 if (IS_ERR(mlxsw_sp_port_vlan)) { 3879 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3880 mlxsw_sp_port->local_port); 3881 err = PTR_ERR(mlxsw_sp_port_vlan); 3882 goto err_port_vlan_create; 3883 } 3884 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3885 3886 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3887 mlxsw_sp->ptp_ops->shaper_work); 3888 3889 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3890 err = register_netdev(dev); 3891 if (err) { 3892 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3893 mlxsw_sp_port->local_port); 3894 goto err_register_netdev; 3895 } 3896 3897 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3898 mlxsw_sp_port, dev); 3899 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3900 return 0; 3901 3902 err_register_netdev: 3903 mlxsw_sp->ports[local_port] = NULL; 3904 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3905 err_port_vlan_create: 3906 err_port_pvid_set: 3907 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3908 err_port_nve_init: 3909 err_port_vlan_clear: 3910 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3911 err_port_qdiscs_init: 3912 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3913 err_port_fids_init: 3914 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3915 err_port_dcb_init: 3916 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3917 err_port_tc_mc_mode: 3918 err_port_ets_init: 3919 err_port_buffers_init: 3920 err_port_admin_status_set: 3921 err_port_mtu_set: 3922 err_port_speed_by_width_set: 3923 err_port_system_port_mapping_set: 3924 err_dev_addr_init: 3925 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3926 err_port_swid_set: 3927 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3928 err_port_module_map: 3929 kfree(mlxsw_sp_port->sample); 3930 err_alloc_sample: 3931 free_percpu(mlxsw_sp_port->pcpu_stats); 3932 err_alloc_stats: 3933 free_netdev(dev); 3934 err_alloc_etherdev: 3935 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3936 return err; 3937 } 3938 3939 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3940 { 3941 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3942 3943 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3944 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3945 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3946 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3947 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3948 mlxsw_sp->ports[local_port] = NULL; 3949 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3950 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3951 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3952 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3953 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3954 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3955 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3956 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3957 kfree(mlxsw_sp_port->sample); 3958 free_percpu(mlxsw_sp_port->pcpu_stats); 3959 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3960 free_netdev(mlxsw_sp_port->dev); 3961 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3962 } 3963 3964 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3965 { 3966 struct mlxsw_sp_port *mlxsw_sp_port; 3967 int err; 3968 3969 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3970 if (!mlxsw_sp_port) 3971 return -ENOMEM; 3972 3973 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3974 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3975 3976 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3977 mlxsw_sp_port, 3978 mlxsw_sp->base_mac, 3979 sizeof(mlxsw_sp->base_mac)); 3980 if (err) { 3981 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3982 goto err_core_cpu_port_init; 3983 } 3984 3985 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3986 return 0; 3987 3988 err_core_cpu_port_init: 3989 kfree(mlxsw_sp_port); 3990 return err; 3991 } 3992 3993 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3994 { 3995 struct mlxsw_sp_port *mlxsw_sp_port = 3996 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3997 3998 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3999 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 4000 kfree(mlxsw_sp_port); 4001 } 4002 4003 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4004 { 4005 return mlxsw_sp->ports[local_port] != NULL; 4006 } 4007 4008 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 4009 { 4010 int i; 4011 4012 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4013 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4014 mlxsw_sp_port_remove(mlxsw_sp, i); 4015 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4016 kfree(mlxsw_sp->ports); 4017 } 4018 4019 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 4020 { 4021 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4022 struct mlxsw_sp_port_mapping *port_mapping; 4023 size_t alloc_size; 4024 int i; 4025 int err; 4026 4027 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 4028 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 4029 if (!mlxsw_sp->ports) 4030 return -ENOMEM; 4031 4032 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 4033 if (err) 4034 goto err_cpu_port_create; 4035 4036 for (i = 1; i < max_ports; i++) { 4037 port_mapping = mlxsw_sp->port_mapping[i]; 4038 if (!port_mapping) 4039 continue; 4040 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 4041 if (err) 4042 goto err_port_create; 4043 } 4044 return 0; 4045 4046 err_port_create: 4047 for (i--; i >= 1; i--) 4048 if (mlxsw_sp_port_created(mlxsw_sp, i)) 4049 mlxsw_sp_port_remove(mlxsw_sp, i); 4050 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4051 err_cpu_port_create: 4052 kfree(mlxsw_sp->ports); 4053 return err; 4054 } 4055 4056 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 4057 { 4058 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 4059 struct mlxsw_sp_port_mapping port_mapping; 4060 int i; 4061 int err; 4062 4063 mlxsw_sp->port_mapping = kcalloc(max_ports, 4064 sizeof(struct mlxsw_sp_port_mapping *), 4065 GFP_KERNEL); 4066 if (!mlxsw_sp->port_mapping) 4067 return -ENOMEM; 4068 4069 for (i = 1; i < max_ports; i++) { 4070 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 4071 if (err) 4072 goto err_port_module_info_get; 4073 if (!port_mapping.width) 4074 continue; 4075 4076 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 4077 sizeof(port_mapping), 4078 GFP_KERNEL); 4079 if (!mlxsw_sp->port_mapping[i]) { 4080 err = -ENOMEM; 4081 goto err_port_module_info_dup; 4082 } 4083 } 4084 return 0; 4085 4086 err_port_module_info_get: 4087 err_port_module_info_dup: 4088 for (i--; i >= 1; i--) 4089 kfree(mlxsw_sp->port_mapping[i]); 4090 kfree(mlxsw_sp->port_mapping); 4091 return err; 4092 } 4093 4094 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 4095 { 4096 int i; 4097 4098 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 4099 kfree(mlxsw_sp->port_mapping[i]); 4100 kfree(mlxsw_sp->port_mapping); 4101 } 4102 4103 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 4104 { 4105 u8 offset = (local_port - 1) % max_width; 4106 4107 return local_port - offset; 4108 } 4109 4110 static int 4111 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4112 struct mlxsw_sp_port_mapping *port_mapping, 4113 unsigned int count, u8 offset) 4114 { 4115 struct mlxsw_sp_port_mapping split_port_mapping; 4116 int err, i; 4117 4118 split_port_mapping = *port_mapping; 4119 split_port_mapping.width /= count; 4120 for (i = 0; i < count; i++) { 4121 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4122 base_port, &split_port_mapping); 4123 if (err) 4124 goto err_port_create; 4125 split_port_mapping.lane += split_port_mapping.width; 4126 } 4127 4128 return 0; 4129 4130 err_port_create: 4131 for (i--; i >= 0; i--) 4132 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4133 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4134 return err; 4135 } 4136 4137 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4138 u8 base_port, 4139 unsigned int count, u8 offset) 4140 { 4141 struct mlxsw_sp_port_mapping *port_mapping; 4142 int i; 4143 4144 /* Go over original unsplit ports in the gap and recreate them. */ 4145 for (i = 0; i < count * offset; i++) { 4146 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 4147 if (!port_mapping) 4148 continue; 4149 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 4150 } 4151 } 4152 4153 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 4154 unsigned int count, 4155 unsigned int max_width) 4156 { 4157 enum mlxsw_res_id local_ports_in_x_res_id; 4158 int split_width = max_width / count; 4159 4160 if (split_width == 1) 4161 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 4162 else if (split_width == 2) 4163 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 4164 else if (split_width == 4) 4165 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 4166 else 4167 return -EINVAL; 4168 4169 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 4170 return -EINVAL; 4171 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4172 } 4173 4174 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4175 unsigned int count, 4176 struct netlink_ext_ack *extack) 4177 { 4178 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4179 struct mlxsw_sp_port_mapping port_mapping; 4180 struct mlxsw_sp_port *mlxsw_sp_port; 4181 int max_width; 4182 u8 base_port; 4183 int offset; 4184 int i; 4185 int err; 4186 4187 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4188 if (!mlxsw_sp_port) { 4189 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4190 local_port); 4191 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4192 return -EINVAL; 4193 } 4194 4195 /* Split ports cannot be split. */ 4196 if (mlxsw_sp_port->split) { 4197 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4198 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4199 return -EINVAL; 4200 } 4201 4202 max_width = mlxsw_core_module_max_width(mlxsw_core, 4203 mlxsw_sp_port->mapping.module); 4204 if (max_width < 0) { 4205 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4206 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4207 return max_width; 4208 } 4209 4210 /* Split port with non-max and 1 module width cannot be split. */ 4211 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 4212 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 4213 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 4214 return -EINVAL; 4215 } 4216 4217 if (count == 1 || !is_power_of_2(count) || count > max_width) { 4218 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 4219 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 4220 return -EINVAL; 4221 } 4222 4223 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4224 if (offset < 0) { 4225 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4226 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4227 return -EINVAL; 4228 } 4229 4230 /* Only in case max split is being done, the local port and 4231 * base port may differ. 4232 */ 4233 base_port = count == max_width ? 4234 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 4235 local_port; 4236 4237 for (i = 0; i < count * offset; i++) { 4238 /* Expect base port to exist and also the one in the middle in 4239 * case of maximal split count. 4240 */ 4241 if (i == 0 || (count == max_width && i == count / 2)) 4242 continue; 4243 4244 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 4245 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4246 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4247 return -EINVAL; 4248 } 4249 } 4250 4251 port_mapping = mlxsw_sp_port->mapping; 4252 4253 for (i = 0; i < count; i++) 4254 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4255 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4256 4257 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 4258 count, offset); 4259 if (err) { 4260 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4261 goto err_port_split_create; 4262 } 4263 4264 return 0; 4265 4266 err_port_split_create: 4267 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4268 return err; 4269 } 4270 4271 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4272 struct netlink_ext_ack *extack) 4273 { 4274 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4275 struct mlxsw_sp_port *mlxsw_sp_port; 4276 unsigned int count; 4277 int max_width; 4278 u8 base_port; 4279 int offset; 4280 int i; 4281 4282 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4283 if (!mlxsw_sp_port) { 4284 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4285 local_port); 4286 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4287 return -EINVAL; 4288 } 4289 4290 if (!mlxsw_sp_port->split) { 4291 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4292 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4293 return -EINVAL; 4294 } 4295 4296 max_width = mlxsw_core_module_max_width(mlxsw_core, 4297 mlxsw_sp_port->mapping.module); 4298 if (max_width < 0) { 4299 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 4300 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 4301 return max_width; 4302 } 4303 4304 count = max_width / mlxsw_sp_port->mapping.width; 4305 4306 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 4307 if (WARN_ON(offset < 0)) { 4308 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 4309 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 4310 return -EINVAL; 4311 } 4312 4313 base_port = mlxsw_sp_port->split_base_local_port; 4314 4315 for (i = 0; i < count; i++) 4316 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4317 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4318 4319 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 4320 4321 return 0; 4322 } 4323 4324 static void 4325 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 4326 { 4327 int i; 4328 4329 for (i = 0; i < TC_MAX_QUEUE; i++) 4330 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 4331 } 4332 4333 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4334 char *pude_pl, void *priv) 4335 { 4336 struct mlxsw_sp *mlxsw_sp = priv; 4337 struct mlxsw_sp_port *mlxsw_sp_port; 4338 enum mlxsw_reg_pude_oper_status status; 4339 u8 local_port; 4340 4341 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4342 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4343 if (!mlxsw_sp_port) 4344 return; 4345 4346 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4347 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4348 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4349 netif_carrier_on(mlxsw_sp_port->dev); 4350 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4351 } else { 4352 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4353 netif_carrier_off(mlxsw_sp_port->dev); 4354 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 4355 } 4356 } 4357 4358 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4359 char *mtpptr_pl, bool ingress) 4360 { 4361 u8 local_port; 4362 u8 num_rec; 4363 int i; 4364 4365 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4366 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4367 for (i = 0; i < num_rec; i++) { 4368 u8 domain_number; 4369 u8 message_type; 4370 u16 sequence_id; 4371 u64 timestamp; 4372 4373 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4374 &domain_number, &sequence_id, 4375 ×tamp); 4376 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4377 message_type, domain_number, 4378 sequence_id, timestamp); 4379 } 4380 } 4381 4382 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4383 char *mtpptr_pl, void *priv) 4384 { 4385 struct mlxsw_sp *mlxsw_sp = priv; 4386 4387 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4388 } 4389 4390 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4391 char *mtpptr_pl, void *priv) 4392 { 4393 struct mlxsw_sp *mlxsw_sp = priv; 4394 4395 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4396 } 4397 4398 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4399 u8 local_port, void *priv) 4400 { 4401 struct mlxsw_sp *mlxsw_sp = priv; 4402 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4403 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4404 4405 if (unlikely(!mlxsw_sp_port)) { 4406 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4407 local_port); 4408 return; 4409 } 4410 4411 skb->dev = mlxsw_sp_port->dev; 4412 4413 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4414 u64_stats_update_begin(&pcpu_stats->syncp); 4415 pcpu_stats->rx_packets++; 4416 pcpu_stats->rx_bytes += skb->len; 4417 u64_stats_update_end(&pcpu_stats->syncp); 4418 4419 skb->protocol = eth_type_trans(skb, skb->dev); 4420 netif_receive_skb(skb); 4421 } 4422 4423 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4424 void *priv) 4425 { 4426 skb->offload_fwd_mark = 1; 4427 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4428 } 4429 4430 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4431 u8 local_port, void *priv) 4432 { 4433 skb->offload_l3_fwd_mark = 1; 4434 skb->offload_fwd_mark = 1; 4435 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4436 } 4437 4438 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4439 void *priv) 4440 { 4441 struct mlxsw_sp *mlxsw_sp = priv; 4442 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4443 struct psample_group *psample_group; 4444 u32 size; 4445 4446 if (unlikely(!mlxsw_sp_port)) { 4447 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4448 local_port); 4449 goto out; 4450 } 4451 if (unlikely(!mlxsw_sp_port->sample)) { 4452 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4453 local_port); 4454 goto out; 4455 } 4456 4457 size = mlxsw_sp_port->sample->truncate ? 4458 mlxsw_sp_port->sample->trunc_size : skb->len; 4459 4460 rcu_read_lock(); 4461 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4462 if (!psample_group) 4463 goto out_unlock; 4464 psample_sample_packet(psample_group, skb, size, 4465 mlxsw_sp_port->dev->ifindex, 0, 4466 mlxsw_sp_port->sample->rate); 4467 out_unlock: 4468 rcu_read_unlock(); 4469 out: 4470 consume_skb(skb); 4471 } 4472 4473 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4474 void *priv) 4475 { 4476 struct mlxsw_sp *mlxsw_sp = priv; 4477 4478 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4479 } 4480 4481 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4482 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4483 _is_ctrl, SP_##_trap_group, DISCARD) 4484 4485 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4486 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4487 _is_ctrl, SP_##_trap_group, DISCARD) 4488 4489 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4490 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4491 _is_ctrl, SP_##_trap_group, DISCARD) 4492 4493 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4494 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4495 4496 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4497 /* Events */ 4498 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4499 /* L2 traps */ 4500 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4501 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4502 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4503 false, SP_LLDP, DISCARD), 4504 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4505 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4506 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4507 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4508 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4509 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4510 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4511 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4512 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4513 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4514 false), 4515 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4516 false), 4517 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4518 false), 4519 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4520 false), 4521 /* L3 traps */ 4522 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4523 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4524 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4525 false), 4526 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4527 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4528 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4529 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4530 false), 4531 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4532 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4533 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4534 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4535 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4536 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4537 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4538 false), 4539 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4540 false), 4541 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4542 false), 4543 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4544 false), 4545 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4546 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4547 false), 4548 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4549 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4550 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4551 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4552 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4553 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4554 /* PKT Sample trap */ 4555 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4556 false, SP_IP2ME, DISCARD), 4557 /* ACL trap */ 4558 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4559 /* Multicast Router Traps */ 4560 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4561 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4562 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4563 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4564 /* NVE traps */ 4565 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4566 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4567 /* PTP traps */ 4568 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4569 false, SP_PTP0, DISCARD), 4570 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4571 }; 4572 4573 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4574 /* Events */ 4575 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4576 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4577 }; 4578 4579 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4580 { 4581 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4582 enum mlxsw_reg_qpcr_ir_units ir_units; 4583 int max_cpu_policers; 4584 bool is_bytes; 4585 u8 burst_size; 4586 u32 rate; 4587 int i, err; 4588 4589 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4590 return -EIO; 4591 4592 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4593 4594 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4595 for (i = 0; i < max_cpu_policers; i++) { 4596 is_bytes = false; 4597 switch (i) { 4598 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4599 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4600 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4601 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4602 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4603 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4604 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4605 rate = 128; 4606 burst_size = 7; 4607 break; 4608 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4610 rate = 16 * 1024; 4611 burst_size = 10; 4612 break; 4613 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4614 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4616 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4617 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4618 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4619 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4620 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4621 rate = 1024; 4622 burst_size = 7; 4623 break; 4624 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4625 rate = 1024; 4626 burst_size = 7; 4627 break; 4628 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4629 rate = 24 * 1024; 4630 burst_size = 12; 4631 break; 4632 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4633 rate = 19 * 1024; 4634 burst_size = 12; 4635 break; 4636 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4637 rate = 360; 4638 burst_size = 7; 4639 break; 4640 default: 4641 continue; 4642 } 4643 4644 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4645 burst_size); 4646 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4647 if (err) 4648 return err; 4649 } 4650 4651 return 0; 4652 } 4653 4654 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4655 { 4656 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4657 enum mlxsw_reg_htgt_trap_group i; 4658 int max_cpu_policers; 4659 int max_trap_groups; 4660 u8 priority, tc; 4661 u16 policer_id; 4662 int err; 4663 4664 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4665 return -EIO; 4666 4667 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4668 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4669 4670 for (i = 0; i < max_trap_groups; i++) { 4671 policer_id = i; 4672 switch (i) { 4673 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4674 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4675 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4676 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4677 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4678 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4679 case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4680 priority = 5; 4681 tc = 5; 4682 break; 4683 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4684 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4685 priority = 4; 4686 tc = 4; 4687 break; 4688 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4689 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4690 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4691 priority = 3; 4692 tc = 3; 4693 break; 4694 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4695 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4696 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4697 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4698 priority = 2; 4699 tc = 2; 4700 break; 4701 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4702 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4703 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4704 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4705 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4706 priority = 1; 4707 tc = 1; 4708 break; 4709 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4710 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4711 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4712 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4713 break; 4714 default: 4715 continue; 4716 } 4717 4718 if (max_cpu_policers <= policer_id && 4719 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4720 return -EIO; 4721 4722 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4723 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4724 if (err) 4725 return err; 4726 } 4727 4728 return 0; 4729 } 4730 4731 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4732 const struct mlxsw_listener listeners[], 4733 size_t listeners_count) 4734 { 4735 int i; 4736 int err; 4737 4738 for (i = 0; i < listeners_count; i++) { 4739 err = mlxsw_core_trap_register(mlxsw_sp->core, 4740 &listeners[i], 4741 mlxsw_sp); 4742 if (err) 4743 goto err_listener_register; 4744 4745 } 4746 return 0; 4747 4748 err_listener_register: 4749 for (i--; i >= 0; i--) { 4750 mlxsw_core_trap_unregister(mlxsw_sp->core, 4751 &listeners[i], 4752 mlxsw_sp); 4753 } 4754 return err; 4755 } 4756 4757 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4758 const struct mlxsw_listener listeners[], 4759 size_t listeners_count) 4760 { 4761 int i; 4762 4763 for (i = 0; i < listeners_count; i++) { 4764 mlxsw_core_trap_unregister(mlxsw_sp->core, 4765 &listeners[i], 4766 mlxsw_sp); 4767 } 4768 } 4769 4770 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4771 { 4772 int err; 4773 4774 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4775 if (err) 4776 return err; 4777 4778 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4779 if (err) 4780 return err; 4781 4782 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4783 ARRAY_SIZE(mlxsw_sp_listener)); 4784 if (err) 4785 return err; 4786 4787 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4788 mlxsw_sp->listeners_count); 4789 if (err) 4790 goto err_extra_traps_init; 4791 4792 return 0; 4793 4794 err_extra_traps_init: 4795 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4796 ARRAY_SIZE(mlxsw_sp_listener)); 4797 return err; 4798 } 4799 4800 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4801 { 4802 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4803 mlxsw_sp->listeners_count); 4804 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4805 ARRAY_SIZE(mlxsw_sp_listener)); 4806 } 4807 4808 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4809 4810 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4811 { 4812 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4813 u32 seed; 4814 int err; 4815 4816 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4817 MLXSW_SP_LAG_SEED_INIT); 4818 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4819 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4820 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4821 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4822 MLXSW_REG_SLCR_LAG_HASH_SIP | 4823 MLXSW_REG_SLCR_LAG_HASH_DIP | 4824 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4825 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4826 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4827 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4828 if (err) 4829 return err; 4830 4831 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4832 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4833 return -EIO; 4834 4835 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4836 sizeof(struct mlxsw_sp_upper), 4837 GFP_KERNEL); 4838 if (!mlxsw_sp->lags) 4839 return -ENOMEM; 4840 4841 return 0; 4842 } 4843 4844 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4845 { 4846 kfree(mlxsw_sp->lags); 4847 } 4848 4849 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4850 { 4851 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4852 4853 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4854 MLXSW_REG_HTGT_INVALID_POLICER, 4855 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4856 MLXSW_REG_HTGT_DEFAULT_TC); 4857 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4858 } 4859 4860 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4861 .clock_init = mlxsw_sp1_ptp_clock_init, 4862 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4863 .init = mlxsw_sp1_ptp_init, 4864 .fini = mlxsw_sp1_ptp_fini, 4865 .receive = mlxsw_sp1_ptp_receive, 4866 .transmitted = mlxsw_sp1_ptp_transmitted, 4867 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4868 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4869 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4870 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4871 .get_stats_count = mlxsw_sp1_get_stats_count, 4872 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4873 .get_stats = mlxsw_sp1_get_stats, 4874 }; 4875 4876 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4877 .clock_init = mlxsw_sp2_ptp_clock_init, 4878 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4879 .init = mlxsw_sp2_ptp_init, 4880 .fini = mlxsw_sp2_ptp_fini, 4881 .receive = mlxsw_sp2_ptp_receive, 4882 .transmitted = mlxsw_sp2_ptp_transmitted, 4883 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4884 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4885 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4886 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4887 .get_stats_count = mlxsw_sp2_get_stats_count, 4888 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4889 .get_stats = mlxsw_sp2_get_stats, 4890 }; 4891 4892 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4893 unsigned long event, void *ptr); 4894 4895 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4896 const struct mlxsw_bus_info *mlxsw_bus_info, 4897 struct netlink_ext_ack *extack) 4898 { 4899 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4900 int err; 4901 4902 mlxsw_sp->core = mlxsw_core; 4903 mlxsw_sp->bus_info = mlxsw_bus_info; 4904 4905 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4906 if (err) 4907 return err; 4908 4909 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4910 4911 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4912 if (err) { 4913 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4914 return err; 4915 } 4916 4917 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4918 if (err) { 4919 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4920 return err; 4921 } 4922 4923 err = mlxsw_sp_fids_init(mlxsw_sp); 4924 if (err) { 4925 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4926 goto err_fids_init; 4927 } 4928 4929 err = mlxsw_sp_traps_init(mlxsw_sp); 4930 if (err) { 4931 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4932 goto err_traps_init; 4933 } 4934 4935 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4936 if (err) { 4937 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4938 goto err_devlink_traps_init; 4939 } 4940 4941 err = mlxsw_sp_buffers_init(mlxsw_sp); 4942 if (err) { 4943 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4944 goto err_buffers_init; 4945 } 4946 4947 err = mlxsw_sp_lag_init(mlxsw_sp); 4948 if (err) { 4949 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4950 goto err_lag_init; 4951 } 4952 4953 /* Initialize SPAN before router and switchdev, so that those components 4954 * can call mlxsw_sp_span_respin(). 4955 */ 4956 err = mlxsw_sp_span_init(mlxsw_sp); 4957 if (err) { 4958 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4959 goto err_span_init; 4960 } 4961 4962 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4963 if (err) { 4964 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4965 goto err_switchdev_init; 4966 } 4967 4968 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4969 if (err) { 4970 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4971 goto err_counter_pool_init; 4972 } 4973 4974 err = mlxsw_sp_afa_init(mlxsw_sp); 4975 if (err) { 4976 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4977 goto err_afa_init; 4978 } 4979 4980 err = mlxsw_sp_nve_init(mlxsw_sp); 4981 if (err) { 4982 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4983 goto err_nve_init; 4984 } 4985 4986 err = mlxsw_sp_acl_init(mlxsw_sp); 4987 if (err) { 4988 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4989 goto err_acl_init; 4990 } 4991 4992 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4993 if (err) { 4994 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4995 goto err_router_init; 4996 } 4997 4998 if (mlxsw_sp->bus_info->read_frc_capable) { 4999 /* NULL is a valid return value from clock_init */ 5000 mlxsw_sp->clock = 5001 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 5002 mlxsw_sp->bus_info->dev); 5003 if (IS_ERR(mlxsw_sp->clock)) { 5004 err = PTR_ERR(mlxsw_sp->clock); 5005 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 5006 goto err_ptp_clock_init; 5007 } 5008 } 5009 5010 if (mlxsw_sp->clock) { 5011 /* NULL is a valid return value from ptp_ops->init */ 5012 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 5013 if (IS_ERR(mlxsw_sp->ptp_state)) { 5014 err = PTR_ERR(mlxsw_sp->ptp_state); 5015 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 5016 goto err_ptp_init; 5017 } 5018 } 5019 5020 /* Initialize netdevice notifier after router and SPAN is initialized, 5021 * so that the event handler can use router structures and call SPAN 5022 * respin. 5023 */ 5024 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 5025 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5026 &mlxsw_sp->netdevice_nb); 5027 if (err) { 5028 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 5029 goto err_netdev_notifier; 5030 } 5031 5032 err = mlxsw_sp_dpipe_init(mlxsw_sp); 5033 if (err) { 5034 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 5035 goto err_dpipe_init; 5036 } 5037 5038 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 5039 if (err) { 5040 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 5041 goto err_port_module_info_init; 5042 } 5043 5044 err = mlxsw_sp_ports_create(mlxsw_sp); 5045 if (err) { 5046 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 5047 goto err_ports_create; 5048 } 5049 5050 return 0; 5051 5052 err_ports_create: 5053 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5054 err_port_module_info_init: 5055 mlxsw_sp_dpipe_fini(mlxsw_sp); 5056 err_dpipe_init: 5057 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5058 &mlxsw_sp->netdevice_nb); 5059 err_netdev_notifier: 5060 if (mlxsw_sp->clock) 5061 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5062 err_ptp_init: 5063 if (mlxsw_sp->clock) 5064 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5065 err_ptp_clock_init: 5066 mlxsw_sp_router_fini(mlxsw_sp); 5067 err_router_init: 5068 mlxsw_sp_acl_fini(mlxsw_sp); 5069 err_acl_init: 5070 mlxsw_sp_nve_fini(mlxsw_sp); 5071 err_nve_init: 5072 mlxsw_sp_afa_fini(mlxsw_sp); 5073 err_afa_init: 5074 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5075 err_counter_pool_init: 5076 mlxsw_sp_switchdev_fini(mlxsw_sp); 5077 err_switchdev_init: 5078 mlxsw_sp_span_fini(mlxsw_sp); 5079 err_span_init: 5080 mlxsw_sp_lag_fini(mlxsw_sp); 5081 err_lag_init: 5082 mlxsw_sp_buffers_fini(mlxsw_sp); 5083 err_buffers_init: 5084 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5085 err_devlink_traps_init: 5086 mlxsw_sp_traps_fini(mlxsw_sp); 5087 err_traps_init: 5088 mlxsw_sp_fids_fini(mlxsw_sp); 5089 err_fids_init: 5090 mlxsw_sp_kvdl_fini(mlxsw_sp); 5091 return err; 5092 } 5093 5094 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 5095 const struct mlxsw_bus_info *mlxsw_bus_info, 5096 struct netlink_ext_ack *extack) 5097 { 5098 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5099 5100 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 5101 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 5102 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 5103 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 5104 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 5105 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 5106 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 5107 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 5108 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 5109 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 5110 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 5111 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 5112 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 5113 mlxsw_sp->listeners = mlxsw_sp1_listener; 5114 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 5115 5116 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5117 } 5118 5119 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 5120 const struct mlxsw_bus_info *mlxsw_bus_info, 5121 struct netlink_ext_ack *extack) 5122 { 5123 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5124 5125 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 5126 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 5127 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5128 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5129 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5130 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5131 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5132 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5133 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5134 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5135 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5136 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5137 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5138 5139 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5140 } 5141 5142 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 5143 const struct mlxsw_bus_info *mlxsw_bus_info, 5144 struct netlink_ext_ack *extack) 5145 { 5146 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5147 5148 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 5149 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 5150 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 5151 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 5152 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 5153 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 5154 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 5155 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 5156 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 5157 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 5158 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 5159 5160 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 5161 } 5162 5163 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 5164 { 5165 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5166 5167 mlxsw_sp_ports_remove(mlxsw_sp); 5168 mlxsw_sp_port_module_info_fini(mlxsw_sp); 5169 mlxsw_sp_dpipe_fini(mlxsw_sp); 5170 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 5171 &mlxsw_sp->netdevice_nb); 5172 if (mlxsw_sp->clock) { 5173 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 5174 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 5175 } 5176 mlxsw_sp_router_fini(mlxsw_sp); 5177 mlxsw_sp_acl_fini(mlxsw_sp); 5178 mlxsw_sp_nve_fini(mlxsw_sp); 5179 mlxsw_sp_afa_fini(mlxsw_sp); 5180 mlxsw_sp_counter_pool_fini(mlxsw_sp); 5181 mlxsw_sp_switchdev_fini(mlxsw_sp); 5182 mlxsw_sp_span_fini(mlxsw_sp); 5183 mlxsw_sp_lag_fini(mlxsw_sp); 5184 mlxsw_sp_buffers_fini(mlxsw_sp); 5185 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 5186 mlxsw_sp_traps_fini(mlxsw_sp); 5187 mlxsw_sp_fids_fini(mlxsw_sp); 5188 mlxsw_sp_kvdl_fini(mlxsw_sp); 5189 } 5190 5191 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 5192 * 802.1Q FIDs 5193 */ 5194 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5195 VLAN_VID_MASK - 1) 5196 5197 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5198 .used_max_mid = 1, 5199 .max_mid = MLXSW_SP_MID_MAX, 5200 .used_flood_tables = 1, 5201 .used_flood_mode = 1, 5202 .flood_mode = 3, 5203 .max_fid_flood_tables = 3, 5204 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5205 .used_max_ib_mc = 1, 5206 .max_ib_mc = 0, 5207 .used_max_pkey = 1, 5208 .max_pkey = 0, 5209 .used_kvd_sizes = 1, 5210 .kvd_hash_single_parts = 59, 5211 .kvd_hash_double_parts = 41, 5212 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5213 .swid_config = { 5214 { 5215 .used_type = 1, 5216 .type = MLXSW_PORT_SWID_TYPE_ETH, 5217 } 5218 }, 5219 }; 5220 5221 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5222 .used_max_mid = 1, 5223 .max_mid = MLXSW_SP_MID_MAX, 5224 .used_flood_tables = 1, 5225 .used_flood_mode = 1, 5226 .flood_mode = 3, 5227 .max_fid_flood_tables = 3, 5228 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5229 .used_max_ib_mc = 1, 5230 .max_ib_mc = 0, 5231 .used_max_pkey = 1, 5232 .max_pkey = 0, 5233 .swid_config = { 5234 { 5235 .used_type = 1, 5236 .type = MLXSW_PORT_SWID_TYPE_ETH, 5237 } 5238 }, 5239 }; 5240 5241 static void 5242 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5243 struct devlink_resource_size_params *kvd_size_params, 5244 struct devlink_resource_size_params *linear_size_params, 5245 struct devlink_resource_size_params *hash_double_size_params, 5246 struct devlink_resource_size_params *hash_single_size_params) 5247 { 5248 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5249 KVD_SINGLE_MIN_SIZE); 5250 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5251 KVD_DOUBLE_MIN_SIZE); 5252 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5253 u32 linear_size_min = 0; 5254 5255 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5256 MLXSW_SP_KVD_GRANULARITY, 5257 DEVLINK_RESOURCE_UNIT_ENTRY); 5258 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5259 kvd_size - single_size_min - 5260 double_size_min, 5261 MLXSW_SP_KVD_GRANULARITY, 5262 DEVLINK_RESOURCE_UNIT_ENTRY); 5263 devlink_resource_size_params_init(hash_double_size_params, 5264 double_size_min, 5265 kvd_size - single_size_min - 5266 linear_size_min, 5267 MLXSW_SP_KVD_GRANULARITY, 5268 DEVLINK_RESOURCE_UNIT_ENTRY); 5269 devlink_resource_size_params_init(hash_single_size_params, 5270 single_size_min, 5271 kvd_size - double_size_min - 5272 linear_size_min, 5273 MLXSW_SP_KVD_GRANULARITY, 5274 DEVLINK_RESOURCE_UNIT_ENTRY); 5275 } 5276 5277 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5278 { 5279 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5280 struct devlink_resource_size_params hash_single_size_params; 5281 struct devlink_resource_size_params hash_double_size_params; 5282 struct devlink_resource_size_params linear_size_params; 5283 struct devlink_resource_size_params kvd_size_params; 5284 u32 kvd_size, single_size, double_size, linear_size; 5285 const struct mlxsw_config_profile *profile; 5286 int err; 5287 5288 profile = &mlxsw_sp1_config_profile; 5289 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5290 return -EIO; 5291 5292 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5293 &linear_size_params, 5294 &hash_double_size_params, 5295 &hash_single_size_params); 5296 5297 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5298 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5299 kvd_size, MLXSW_SP_RESOURCE_KVD, 5300 DEVLINK_RESOURCE_ID_PARENT_TOP, 5301 &kvd_size_params); 5302 if (err) 5303 return err; 5304 5305 linear_size = profile->kvd_linear_size; 5306 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5307 linear_size, 5308 MLXSW_SP_RESOURCE_KVD_LINEAR, 5309 MLXSW_SP_RESOURCE_KVD, 5310 &linear_size_params); 5311 if (err) 5312 return err; 5313 5314 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5315 if (err) 5316 return err; 5317 5318 double_size = kvd_size - linear_size; 5319 double_size *= profile->kvd_hash_double_parts; 5320 double_size /= profile->kvd_hash_double_parts + 5321 profile->kvd_hash_single_parts; 5322 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5323 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5324 double_size, 5325 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5326 MLXSW_SP_RESOURCE_KVD, 5327 &hash_double_size_params); 5328 if (err) 5329 return err; 5330 5331 single_size = kvd_size - double_size - linear_size; 5332 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5333 single_size, 5334 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5335 MLXSW_SP_RESOURCE_KVD, 5336 &hash_single_size_params); 5337 if (err) 5338 return err; 5339 5340 return 0; 5341 } 5342 5343 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5344 { 5345 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5346 struct devlink_resource_size_params kvd_size_params; 5347 u32 kvd_size; 5348 5349 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5350 return -EIO; 5351 5352 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5353 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5354 MLXSW_SP_KVD_GRANULARITY, 5355 DEVLINK_RESOURCE_UNIT_ENTRY); 5356 5357 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5358 kvd_size, MLXSW_SP_RESOURCE_KVD, 5359 DEVLINK_RESOURCE_ID_PARENT_TOP, 5360 &kvd_size_params); 5361 } 5362 5363 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 5364 { 5365 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5366 struct devlink_resource_size_params span_size_params; 5367 u32 max_span; 5368 5369 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 5370 return -EIO; 5371 5372 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 5373 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 5374 1, DEVLINK_RESOURCE_UNIT_ENTRY); 5375 5376 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 5377 max_span, MLXSW_SP_RESOURCE_SPAN, 5378 DEVLINK_RESOURCE_ID_PARENT_TOP, 5379 &span_size_params); 5380 } 5381 5382 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5383 { 5384 int err; 5385 5386 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 5387 if (err) 5388 return err; 5389 5390 err = mlxsw_sp_resources_span_register(mlxsw_core); 5391 if (err) 5392 goto err_resources_span_register; 5393 5394 return 0; 5395 5396 err_resources_span_register: 5397 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5398 return err; 5399 } 5400 5401 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5402 { 5403 int err; 5404 5405 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 5406 if (err) 5407 return err; 5408 5409 err = mlxsw_sp_resources_span_register(mlxsw_core); 5410 if (err) 5411 goto err_resources_span_register; 5412 5413 return 0; 5414 5415 err_resources_span_register: 5416 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 5417 return err; 5418 } 5419 5420 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5421 const struct mlxsw_config_profile *profile, 5422 u64 *p_single_size, u64 *p_double_size, 5423 u64 *p_linear_size) 5424 { 5425 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5426 u32 double_size; 5427 int err; 5428 5429 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5430 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5431 return -EIO; 5432 5433 /* The hash part is what left of the kvd without the 5434 * linear part. It is split to the single size and 5435 * double size by the parts ratio from the profile. 5436 * Both sizes must be a multiplications of the 5437 * granularity from the profile. In case the user 5438 * provided the sizes they are obtained via devlink. 5439 */ 5440 err = devlink_resource_size_get(devlink, 5441 MLXSW_SP_RESOURCE_KVD_LINEAR, 5442 p_linear_size); 5443 if (err) 5444 *p_linear_size = profile->kvd_linear_size; 5445 5446 err = devlink_resource_size_get(devlink, 5447 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5448 p_double_size); 5449 if (err) { 5450 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5451 *p_linear_size; 5452 double_size *= profile->kvd_hash_double_parts; 5453 double_size /= profile->kvd_hash_double_parts + 5454 profile->kvd_hash_single_parts; 5455 *p_double_size = rounddown(double_size, 5456 MLXSW_SP_KVD_GRANULARITY); 5457 } 5458 5459 err = devlink_resource_size_get(devlink, 5460 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5461 p_single_size); 5462 if (err) 5463 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5464 *p_double_size - *p_linear_size; 5465 5466 /* Check results are legal. */ 5467 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5468 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5469 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5470 return -EIO; 5471 5472 return 0; 5473 } 5474 5475 static int 5476 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5477 union devlink_param_value val, 5478 struct netlink_ext_ack *extack) 5479 { 5480 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5481 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5482 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5483 return -EINVAL; 5484 } 5485 5486 return 0; 5487 } 5488 5489 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5490 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5491 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5492 NULL, NULL, 5493 mlxsw_sp_devlink_param_fw_load_policy_validate), 5494 }; 5495 5496 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5497 { 5498 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5499 union devlink_param_value value; 5500 int err; 5501 5502 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5503 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5504 if (err) 5505 return err; 5506 5507 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5508 devlink_param_driverinit_value_set(devlink, 5509 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5510 value); 5511 return 0; 5512 } 5513 5514 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5515 { 5516 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5517 mlxsw_sp_devlink_params, 5518 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5519 } 5520 5521 static int 5522 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5523 struct devlink_param_gset_ctx *ctx) 5524 { 5525 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5526 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5527 5528 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5529 return 0; 5530 } 5531 5532 static int 5533 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5534 struct devlink_param_gset_ctx *ctx) 5535 { 5536 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5537 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5538 5539 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5540 } 5541 5542 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5543 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5544 "acl_region_rehash_interval", 5545 DEVLINK_PARAM_TYPE_U32, 5546 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5547 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5548 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5549 NULL), 5550 }; 5551 5552 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5553 { 5554 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5555 union devlink_param_value value; 5556 int err; 5557 5558 err = mlxsw_sp_params_register(mlxsw_core); 5559 if (err) 5560 return err; 5561 5562 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5563 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5564 if (err) 5565 goto err_devlink_params_register; 5566 5567 value.vu32 = 0; 5568 devlink_param_driverinit_value_set(devlink, 5569 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5570 value); 5571 return 0; 5572 5573 err_devlink_params_register: 5574 mlxsw_sp_params_unregister(mlxsw_core); 5575 return err; 5576 } 5577 5578 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5579 { 5580 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5581 mlxsw_sp2_devlink_params, 5582 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5583 mlxsw_sp_params_unregister(mlxsw_core); 5584 } 5585 5586 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5587 struct sk_buff *skb, u8 local_port) 5588 { 5589 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5590 5591 skb_pull(skb, MLXSW_TXHDR_LEN); 5592 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5593 } 5594 5595 static struct mlxsw_driver mlxsw_sp1_driver = { 5596 .kind = mlxsw_sp1_driver_name, 5597 .priv_size = sizeof(struct mlxsw_sp), 5598 .init = mlxsw_sp1_init, 5599 .fini = mlxsw_sp_fini, 5600 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5601 .port_split = mlxsw_sp_port_split, 5602 .port_unsplit = mlxsw_sp_port_unsplit, 5603 .sb_pool_get = mlxsw_sp_sb_pool_get, 5604 .sb_pool_set = mlxsw_sp_sb_pool_set, 5605 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5606 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5607 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5608 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5609 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5610 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5611 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5612 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5613 .flash_update = mlxsw_sp_flash_update, 5614 .trap_init = mlxsw_sp_trap_init, 5615 .trap_fini = mlxsw_sp_trap_fini, 5616 .trap_action_set = mlxsw_sp_trap_action_set, 5617 .trap_group_init = mlxsw_sp_trap_group_init, 5618 .txhdr_construct = mlxsw_sp_txhdr_construct, 5619 .resources_register = mlxsw_sp1_resources_register, 5620 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5621 .params_register = mlxsw_sp_params_register, 5622 .params_unregister = mlxsw_sp_params_unregister, 5623 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5624 .txhdr_len = MLXSW_TXHDR_LEN, 5625 .profile = &mlxsw_sp1_config_profile, 5626 .res_query_enabled = true, 5627 }; 5628 5629 static struct mlxsw_driver mlxsw_sp2_driver = { 5630 .kind = mlxsw_sp2_driver_name, 5631 .priv_size = sizeof(struct mlxsw_sp), 5632 .init = mlxsw_sp2_init, 5633 .fini = mlxsw_sp_fini, 5634 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5635 .port_split = mlxsw_sp_port_split, 5636 .port_unsplit = mlxsw_sp_port_unsplit, 5637 .sb_pool_get = mlxsw_sp_sb_pool_get, 5638 .sb_pool_set = mlxsw_sp_sb_pool_set, 5639 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5640 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5641 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5642 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5643 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5644 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5645 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5646 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5647 .flash_update = mlxsw_sp_flash_update, 5648 .trap_init = mlxsw_sp_trap_init, 5649 .trap_fini = mlxsw_sp_trap_fini, 5650 .trap_action_set = mlxsw_sp_trap_action_set, 5651 .trap_group_init = mlxsw_sp_trap_group_init, 5652 .txhdr_construct = mlxsw_sp_txhdr_construct, 5653 .resources_register = mlxsw_sp2_resources_register, 5654 .params_register = mlxsw_sp2_params_register, 5655 .params_unregister = mlxsw_sp2_params_unregister, 5656 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5657 .txhdr_len = MLXSW_TXHDR_LEN, 5658 .profile = &mlxsw_sp2_config_profile, 5659 .res_query_enabled = true, 5660 }; 5661 5662 static struct mlxsw_driver mlxsw_sp3_driver = { 5663 .kind = mlxsw_sp3_driver_name, 5664 .priv_size = sizeof(struct mlxsw_sp), 5665 .init = mlxsw_sp3_init, 5666 .fini = mlxsw_sp_fini, 5667 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5668 .port_split = mlxsw_sp_port_split, 5669 .port_unsplit = mlxsw_sp_port_unsplit, 5670 .sb_pool_get = mlxsw_sp_sb_pool_get, 5671 .sb_pool_set = mlxsw_sp_sb_pool_set, 5672 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5673 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5674 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5675 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5676 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5677 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5678 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5679 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5680 .flash_update = mlxsw_sp_flash_update, 5681 .trap_init = mlxsw_sp_trap_init, 5682 .trap_fini = mlxsw_sp_trap_fini, 5683 .trap_action_set = mlxsw_sp_trap_action_set, 5684 .trap_group_init = mlxsw_sp_trap_group_init, 5685 .txhdr_construct = mlxsw_sp_txhdr_construct, 5686 .resources_register = mlxsw_sp2_resources_register, 5687 .params_register = mlxsw_sp2_params_register, 5688 .params_unregister = mlxsw_sp2_params_unregister, 5689 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5690 .txhdr_len = MLXSW_TXHDR_LEN, 5691 .profile = &mlxsw_sp2_config_profile, 5692 .res_query_enabled = true, 5693 }; 5694 5695 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5696 { 5697 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5698 } 5699 5700 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5701 { 5702 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5703 int ret = 0; 5704 5705 if (mlxsw_sp_port_dev_check(lower_dev)) { 5706 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5707 ret = 1; 5708 } 5709 5710 return ret; 5711 } 5712 5713 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5714 { 5715 struct mlxsw_sp_port *mlxsw_sp_port; 5716 5717 if (mlxsw_sp_port_dev_check(dev)) 5718 return netdev_priv(dev); 5719 5720 mlxsw_sp_port = NULL; 5721 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5722 5723 return mlxsw_sp_port; 5724 } 5725 5726 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5727 { 5728 struct mlxsw_sp_port *mlxsw_sp_port; 5729 5730 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5731 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5732 } 5733 5734 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5735 { 5736 struct mlxsw_sp_port *mlxsw_sp_port; 5737 5738 if (mlxsw_sp_port_dev_check(dev)) 5739 return netdev_priv(dev); 5740 5741 mlxsw_sp_port = NULL; 5742 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5743 &mlxsw_sp_port); 5744 5745 return mlxsw_sp_port; 5746 } 5747 5748 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5749 { 5750 struct mlxsw_sp_port *mlxsw_sp_port; 5751 5752 rcu_read_lock(); 5753 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5754 if (mlxsw_sp_port) 5755 dev_hold(mlxsw_sp_port->dev); 5756 rcu_read_unlock(); 5757 return mlxsw_sp_port; 5758 } 5759 5760 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5761 { 5762 dev_put(mlxsw_sp_port->dev); 5763 } 5764 5765 static void 5766 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5767 struct net_device *lag_dev) 5768 { 5769 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5770 struct net_device *upper_dev; 5771 struct list_head *iter; 5772 5773 if (netif_is_bridge_port(lag_dev)) 5774 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5775 5776 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5777 if (!netif_is_bridge_port(upper_dev)) 5778 continue; 5779 br_dev = netdev_master_upper_dev_get(upper_dev); 5780 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5781 } 5782 } 5783 5784 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5785 { 5786 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5787 5788 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5789 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5790 } 5791 5792 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5793 { 5794 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5795 5796 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5797 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5798 } 5799 5800 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5801 u16 lag_id, u8 port_index) 5802 { 5803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5804 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5805 5806 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5807 lag_id, port_index); 5808 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5809 } 5810 5811 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5812 u16 lag_id) 5813 { 5814 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5815 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5816 5817 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5818 lag_id); 5819 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5820 } 5821 5822 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5823 u16 lag_id) 5824 { 5825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5826 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5827 5828 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5829 lag_id); 5830 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5831 } 5832 5833 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5834 u16 lag_id) 5835 { 5836 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5837 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5838 5839 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5840 lag_id); 5841 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5842 } 5843 5844 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5845 struct net_device *lag_dev, 5846 u16 *p_lag_id) 5847 { 5848 struct mlxsw_sp_upper *lag; 5849 int free_lag_id = -1; 5850 u64 max_lag; 5851 int i; 5852 5853 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5854 for (i = 0; i < max_lag; i++) { 5855 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5856 if (lag->ref_count) { 5857 if (lag->dev == lag_dev) { 5858 *p_lag_id = i; 5859 return 0; 5860 } 5861 } else if (free_lag_id < 0) { 5862 free_lag_id = i; 5863 } 5864 } 5865 if (free_lag_id < 0) 5866 return -EBUSY; 5867 *p_lag_id = free_lag_id; 5868 return 0; 5869 } 5870 5871 static bool 5872 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5873 struct net_device *lag_dev, 5874 struct netdev_lag_upper_info *lag_upper_info, 5875 struct netlink_ext_ack *extack) 5876 { 5877 u16 lag_id; 5878 5879 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5880 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5881 return false; 5882 } 5883 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5884 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5885 return false; 5886 } 5887 return true; 5888 } 5889 5890 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5891 u16 lag_id, u8 *p_port_index) 5892 { 5893 u64 max_lag_members; 5894 int i; 5895 5896 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5897 MAX_LAG_MEMBERS); 5898 for (i = 0; i < max_lag_members; i++) { 5899 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5900 *p_port_index = i; 5901 return 0; 5902 } 5903 } 5904 return -EBUSY; 5905 } 5906 5907 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5908 struct net_device *lag_dev) 5909 { 5910 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5911 struct mlxsw_sp_upper *lag; 5912 u16 lag_id; 5913 u8 port_index; 5914 int err; 5915 5916 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5917 if (err) 5918 return err; 5919 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5920 if (!lag->ref_count) { 5921 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5922 if (err) 5923 return err; 5924 lag->dev = lag_dev; 5925 } 5926 5927 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5928 if (err) 5929 return err; 5930 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5931 if (err) 5932 goto err_col_port_add; 5933 5934 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5935 mlxsw_sp_port->local_port); 5936 mlxsw_sp_port->lag_id = lag_id; 5937 mlxsw_sp_port->lagged = 1; 5938 lag->ref_count++; 5939 5940 /* Port is no longer usable as a router interface */ 5941 if (mlxsw_sp_port->default_vlan->fid) 5942 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5943 5944 return 0; 5945 5946 err_col_port_add: 5947 if (!lag->ref_count) 5948 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5949 return err; 5950 } 5951 5952 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5953 struct net_device *lag_dev) 5954 { 5955 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5956 u16 lag_id = mlxsw_sp_port->lag_id; 5957 struct mlxsw_sp_upper *lag; 5958 5959 if (!mlxsw_sp_port->lagged) 5960 return; 5961 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5962 WARN_ON(lag->ref_count == 0); 5963 5964 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5965 5966 /* Any VLANs configured on the port are no longer valid */ 5967 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5968 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5969 /* Make the LAG and its directly linked uppers leave bridges they 5970 * are memeber in 5971 */ 5972 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5973 5974 if (lag->ref_count == 1) 5975 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5976 5977 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5978 mlxsw_sp_port->local_port); 5979 mlxsw_sp_port->lagged = 0; 5980 lag->ref_count--; 5981 5982 /* Make sure untagged frames are allowed to ingress */ 5983 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5984 } 5985 5986 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5987 u16 lag_id) 5988 { 5989 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5990 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5991 5992 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5993 mlxsw_sp_port->local_port); 5994 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5995 } 5996 5997 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5998 u16 lag_id) 5999 { 6000 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6001 char sldr_pl[MLXSW_REG_SLDR_LEN]; 6002 6003 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 6004 mlxsw_sp_port->local_port); 6005 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 6006 } 6007 6008 static int 6009 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 6010 { 6011 int err; 6012 6013 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 6014 mlxsw_sp_port->lag_id); 6015 if (err) 6016 return err; 6017 6018 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6019 if (err) 6020 goto err_dist_port_add; 6021 6022 return 0; 6023 6024 err_dist_port_add: 6025 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6026 return err; 6027 } 6028 6029 static int 6030 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 6031 { 6032 int err; 6033 6034 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 6035 mlxsw_sp_port->lag_id); 6036 if (err) 6037 return err; 6038 6039 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 6040 mlxsw_sp_port->lag_id); 6041 if (err) 6042 goto err_col_port_disable; 6043 6044 return 0; 6045 6046 err_col_port_disable: 6047 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 6048 return err; 6049 } 6050 6051 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 6052 struct netdev_lag_lower_state_info *info) 6053 { 6054 if (info->tx_enabled) 6055 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 6056 else 6057 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6058 } 6059 6060 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 6061 bool enable) 6062 { 6063 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6064 enum mlxsw_reg_spms_state spms_state; 6065 char *spms_pl; 6066 u16 vid; 6067 int err; 6068 6069 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 6070 MLXSW_REG_SPMS_STATE_DISCARDING; 6071 6072 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 6073 if (!spms_pl) 6074 return -ENOMEM; 6075 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 6076 6077 for (vid = 0; vid < VLAN_N_VID; vid++) 6078 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 6079 6080 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 6081 kfree(spms_pl); 6082 return err; 6083 } 6084 6085 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 6086 { 6087 u16 vid = 1; 6088 int err; 6089 6090 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 6091 if (err) 6092 return err; 6093 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 6094 if (err) 6095 goto err_port_stp_set; 6096 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6097 true, false); 6098 if (err) 6099 goto err_port_vlan_set; 6100 6101 for (; vid <= VLAN_N_VID - 1; vid++) { 6102 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6103 vid, false); 6104 if (err) 6105 goto err_vid_learning_set; 6106 } 6107 6108 return 0; 6109 6110 err_vid_learning_set: 6111 for (vid--; vid >= 1; vid--) 6112 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6113 err_port_vlan_set: 6114 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6115 err_port_stp_set: 6116 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6117 return err; 6118 } 6119 6120 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 6121 { 6122 u16 vid; 6123 6124 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 6125 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 6126 vid, true); 6127 6128 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 6129 false, false); 6130 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 6131 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 6132 } 6133 6134 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 6135 { 6136 unsigned int num_vxlans = 0; 6137 struct net_device *dev; 6138 struct list_head *iter; 6139 6140 netdev_for_each_lower_dev(br_dev, dev, iter) { 6141 if (netif_is_vxlan(dev)) 6142 num_vxlans++; 6143 } 6144 6145 return num_vxlans > 1; 6146 } 6147 6148 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 6149 { 6150 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 6151 struct net_device *dev; 6152 struct list_head *iter; 6153 6154 netdev_for_each_lower_dev(br_dev, dev, iter) { 6155 u16 pvid; 6156 int err; 6157 6158 if (!netif_is_vxlan(dev)) 6159 continue; 6160 6161 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 6162 if (err || !pvid) 6163 continue; 6164 6165 if (test_and_set_bit(pvid, vlans)) 6166 return false; 6167 } 6168 6169 return true; 6170 } 6171 6172 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 6173 struct netlink_ext_ack *extack) 6174 { 6175 if (br_multicast_enabled(br_dev)) { 6176 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 6177 return false; 6178 } 6179 6180 if (!br_vlan_enabled(br_dev) && 6181 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 6182 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 6183 return false; 6184 } 6185 6186 if (br_vlan_enabled(br_dev) && 6187 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 6188 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 6189 return false; 6190 } 6191 6192 return true; 6193 } 6194 6195 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 6196 struct net_device *dev, 6197 unsigned long event, void *ptr) 6198 { 6199 struct netdev_notifier_changeupper_info *info; 6200 struct mlxsw_sp_port *mlxsw_sp_port; 6201 struct netlink_ext_ack *extack; 6202 struct net_device *upper_dev; 6203 struct mlxsw_sp *mlxsw_sp; 6204 int err = 0; 6205 6206 mlxsw_sp_port = netdev_priv(dev); 6207 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6208 info = ptr; 6209 extack = netdev_notifier_info_to_extack(&info->info); 6210 6211 switch (event) { 6212 case NETDEV_PRECHANGEUPPER: 6213 upper_dev = info->upper_dev; 6214 if (!is_vlan_dev(upper_dev) && 6215 !netif_is_lag_master(upper_dev) && 6216 !netif_is_bridge_master(upper_dev) && 6217 !netif_is_ovs_master(upper_dev) && 6218 !netif_is_macvlan(upper_dev)) { 6219 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6220 return -EINVAL; 6221 } 6222 if (!info->linking) 6223 break; 6224 if (netif_is_bridge_master(upper_dev) && 6225 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6226 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6227 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6228 return -EOPNOTSUPP; 6229 if (netdev_has_any_upper_dev(upper_dev) && 6230 (!netif_is_bridge_master(upper_dev) || 6231 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6232 upper_dev))) { 6233 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6234 return -EINVAL; 6235 } 6236 if (netif_is_lag_master(upper_dev) && 6237 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 6238 info->upper_info, extack)) 6239 return -EINVAL; 6240 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 6241 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6242 return -EINVAL; 6243 } 6244 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6245 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6246 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6247 return -EINVAL; 6248 } 6249 if (netif_is_macvlan(upper_dev) && 6250 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6251 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6252 return -EOPNOTSUPP; 6253 } 6254 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6255 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6256 return -EINVAL; 6257 } 6258 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6259 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6260 return -EINVAL; 6261 } 6262 break; 6263 case NETDEV_CHANGEUPPER: 6264 upper_dev = info->upper_dev; 6265 if (netif_is_bridge_master(upper_dev)) { 6266 if (info->linking) 6267 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6268 lower_dev, 6269 upper_dev, 6270 extack); 6271 else 6272 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6273 lower_dev, 6274 upper_dev); 6275 } else if (netif_is_lag_master(upper_dev)) { 6276 if (info->linking) { 6277 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6278 upper_dev); 6279 } else { 6280 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6281 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6282 upper_dev); 6283 } 6284 } else if (netif_is_ovs_master(upper_dev)) { 6285 if (info->linking) 6286 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6287 else 6288 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6289 } else if (netif_is_macvlan(upper_dev)) { 6290 if (!info->linking) 6291 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6292 } else if (is_vlan_dev(upper_dev)) { 6293 struct net_device *br_dev; 6294 6295 if (!netif_is_bridge_port(upper_dev)) 6296 break; 6297 if (info->linking) 6298 break; 6299 br_dev = netdev_master_upper_dev_get(upper_dev); 6300 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6301 br_dev); 6302 } 6303 break; 6304 } 6305 6306 return err; 6307 } 6308 6309 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6310 unsigned long event, void *ptr) 6311 { 6312 struct netdev_notifier_changelowerstate_info *info; 6313 struct mlxsw_sp_port *mlxsw_sp_port; 6314 int err; 6315 6316 mlxsw_sp_port = netdev_priv(dev); 6317 info = ptr; 6318 6319 switch (event) { 6320 case NETDEV_CHANGELOWERSTATE: 6321 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6322 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6323 info->lower_state_info); 6324 if (err) 6325 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6326 } 6327 break; 6328 } 6329 6330 return 0; 6331 } 6332 6333 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6334 struct net_device *port_dev, 6335 unsigned long event, void *ptr) 6336 { 6337 switch (event) { 6338 case NETDEV_PRECHANGEUPPER: 6339 case NETDEV_CHANGEUPPER: 6340 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6341 event, ptr); 6342 case NETDEV_CHANGELOWERSTATE: 6343 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6344 ptr); 6345 } 6346 6347 return 0; 6348 } 6349 6350 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6351 unsigned long event, void *ptr) 6352 { 6353 struct net_device *dev; 6354 struct list_head *iter; 6355 int ret; 6356 6357 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6358 if (mlxsw_sp_port_dev_check(dev)) { 6359 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6360 ptr); 6361 if (ret) 6362 return ret; 6363 } 6364 } 6365 6366 return 0; 6367 } 6368 6369 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6370 struct net_device *dev, 6371 unsigned long event, void *ptr, 6372 u16 vid) 6373 { 6374 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6376 struct netdev_notifier_changeupper_info *info = ptr; 6377 struct netlink_ext_ack *extack; 6378 struct net_device *upper_dev; 6379 int err = 0; 6380 6381 extack = netdev_notifier_info_to_extack(&info->info); 6382 6383 switch (event) { 6384 case NETDEV_PRECHANGEUPPER: 6385 upper_dev = info->upper_dev; 6386 if (!netif_is_bridge_master(upper_dev) && 6387 !netif_is_macvlan(upper_dev)) { 6388 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6389 return -EINVAL; 6390 } 6391 if (!info->linking) 6392 break; 6393 if (netif_is_bridge_master(upper_dev) && 6394 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6395 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6396 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6397 return -EOPNOTSUPP; 6398 if (netdev_has_any_upper_dev(upper_dev) && 6399 (!netif_is_bridge_master(upper_dev) || 6400 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6401 upper_dev))) { 6402 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6403 return -EINVAL; 6404 } 6405 if (netif_is_macvlan(upper_dev) && 6406 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6407 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6408 return -EOPNOTSUPP; 6409 } 6410 break; 6411 case NETDEV_CHANGEUPPER: 6412 upper_dev = info->upper_dev; 6413 if (netif_is_bridge_master(upper_dev)) { 6414 if (info->linking) 6415 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6416 vlan_dev, 6417 upper_dev, 6418 extack); 6419 else 6420 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6421 vlan_dev, 6422 upper_dev); 6423 } else if (netif_is_macvlan(upper_dev)) { 6424 if (!info->linking) 6425 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6426 } else { 6427 err = -EINVAL; 6428 WARN_ON(1); 6429 } 6430 break; 6431 } 6432 6433 return err; 6434 } 6435 6436 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6437 struct net_device *lag_dev, 6438 unsigned long event, 6439 void *ptr, u16 vid) 6440 { 6441 struct net_device *dev; 6442 struct list_head *iter; 6443 int ret; 6444 6445 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6446 if (mlxsw_sp_port_dev_check(dev)) { 6447 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6448 event, ptr, 6449 vid); 6450 if (ret) 6451 return ret; 6452 } 6453 } 6454 6455 return 0; 6456 } 6457 6458 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6459 struct net_device *br_dev, 6460 unsigned long event, void *ptr, 6461 u16 vid) 6462 { 6463 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6464 struct netdev_notifier_changeupper_info *info = ptr; 6465 struct netlink_ext_ack *extack; 6466 struct net_device *upper_dev; 6467 6468 if (!mlxsw_sp) 6469 return 0; 6470 6471 extack = netdev_notifier_info_to_extack(&info->info); 6472 6473 switch (event) { 6474 case NETDEV_PRECHANGEUPPER: 6475 upper_dev = info->upper_dev; 6476 if (!netif_is_macvlan(upper_dev)) { 6477 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6478 return -EOPNOTSUPP; 6479 } 6480 if (!info->linking) 6481 break; 6482 if (netif_is_macvlan(upper_dev) && 6483 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6484 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6485 return -EOPNOTSUPP; 6486 } 6487 break; 6488 case NETDEV_CHANGEUPPER: 6489 upper_dev = info->upper_dev; 6490 if (info->linking) 6491 break; 6492 if (netif_is_macvlan(upper_dev)) 6493 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6494 break; 6495 } 6496 6497 return 0; 6498 } 6499 6500 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6501 unsigned long event, void *ptr) 6502 { 6503 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6504 u16 vid = vlan_dev_vlan_id(vlan_dev); 6505 6506 if (mlxsw_sp_port_dev_check(real_dev)) 6507 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6508 event, ptr, vid); 6509 else if (netif_is_lag_master(real_dev)) 6510 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6511 real_dev, event, 6512 ptr, vid); 6513 else if (netif_is_bridge_master(real_dev)) 6514 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6515 event, ptr, vid); 6516 6517 return 0; 6518 } 6519 6520 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6521 unsigned long event, void *ptr) 6522 { 6523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6524 struct netdev_notifier_changeupper_info *info = ptr; 6525 struct netlink_ext_ack *extack; 6526 struct net_device *upper_dev; 6527 6528 if (!mlxsw_sp) 6529 return 0; 6530 6531 extack = netdev_notifier_info_to_extack(&info->info); 6532 6533 switch (event) { 6534 case NETDEV_PRECHANGEUPPER: 6535 upper_dev = info->upper_dev; 6536 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6537 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6538 return -EOPNOTSUPP; 6539 } 6540 if (!info->linking) 6541 break; 6542 if (netif_is_macvlan(upper_dev) && 6543 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6544 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6545 return -EOPNOTSUPP; 6546 } 6547 break; 6548 case NETDEV_CHANGEUPPER: 6549 upper_dev = info->upper_dev; 6550 if (info->linking) 6551 break; 6552 if (is_vlan_dev(upper_dev)) 6553 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6554 if (netif_is_macvlan(upper_dev)) 6555 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6556 break; 6557 } 6558 6559 return 0; 6560 } 6561 6562 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6563 unsigned long event, void *ptr) 6564 { 6565 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6566 struct netdev_notifier_changeupper_info *info = ptr; 6567 struct netlink_ext_ack *extack; 6568 6569 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6570 return 0; 6571 6572 extack = netdev_notifier_info_to_extack(&info->info); 6573 6574 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6575 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6576 6577 return -EOPNOTSUPP; 6578 } 6579 6580 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6581 { 6582 struct netdev_notifier_changeupper_info *info = ptr; 6583 6584 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6585 return false; 6586 return netif_is_l3_master(info->upper_dev); 6587 } 6588 6589 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6590 struct net_device *dev, 6591 unsigned long event, void *ptr) 6592 { 6593 struct netdev_notifier_changeupper_info *cu_info; 6594 struct netdev_notifier_info *info = ptr; 6595 struct netlink_ext_ack *extack; 6596 struct net_device *upper_dev; 6597 6598 extack = netdev_notifier_info_to_extack(info); 6599 6600 switch (event) { 6601 case NETDEV_CHANGEUPPER: 6602 cu_info = container_of(info, 6603 struct netdev_notifier_changeupper_info, 6604 info); 6605 upper_dev = cu_info->upper_dev; 6606 if (!netif_is_bridge_master(upper_dev)) 6607 return 0; 6608 if (!mlxsw_sp_lower_get(upper_dev)) 6609 return 0; 6610 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6611 return -EOPNOTSUPP; 6612 if (cu_info->linking) { 6613 if (!netif_running(dev)) 6614 return 0; 6615 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6616 * device needs to be mapped to a VLAN, but at this 6617 * point no VLANs are configured on the VxLAN device 6618 */ 6619 if (br_vlan_enabled(upper_dev)) 6620 return 0; 6621 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6622 dev, 0, extack); 6623 } else { 6624 /* VLANs were already flushed, which triggered the 6625 * necessary cleanup 6626 */ 6627 if (br_vlan_enabled(upper_dev)) 6628 return 0; 6629 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6630 } 6631 break; 6632 case NETDEV_PRE_UP: 6633 upper_dev = netdev_master_upper_dev_get(dev); 6634 if (!upper_dev) 6635 return 0; 6636 if (!netif_is_bridge_master(upper_dev)) 6637 return 0; 6638 if (!mlxsw_sp_lower_get(upper_dev)) 6639 return 0; 6640 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6641 extack); 6642 case NETDEV_DOWN: 6643 upper_dev = netdev_master_upper_dev_get(dev); 6644 if (!upper_dev) 6645 return 0; 6646 if (!netif_is_bridge_master(upper_dev)) 6647 return 0; 6648 if (!mlxsw_sp_lower_get(upper_dev)) 6649 return 0; 6650 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6651 break; 6652 } 6653 6654 return 0; 6655 } 6656 6657 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6658 unsigned long event, void *ptr) 6659 { 6660 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6661 struct mlxsw_sp_span_entry *span_entry; 6662 struct mlxsw_sp *mlxsw_sp; 6663 int err = 0; 6664 6665 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6666 if (event == NETDEV_UNREGISTER) { 6667 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6668 if (span_entry) 6669 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6670 } 6671 mlxsw_sp_span_respin(mlxsw_sp); 6672 6673 if (netif_is_vxlan(dev)) 6674 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6675 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6676 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6677 event, ptr); 6678 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6679 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6680 event, ptr); 6681 else if (event == NETDEV_PRE_CHANGEADDR || 6682 event == NETDEV_CHANGEADDR || 6683 event == NETDEV_CHANGEMTU) 6684 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6685 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6686 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6687 else if (mlxsw_sp_port_dev_check(dev)) 6688 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6689 else if (netif_is_lag_master(dev)) 6690 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6691 else if (is_vlan_dev(dev)) 6692 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6693 else if (netif_is_bridge_master(dev)) 6694 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6695 else if (netif_is_macvlan(dev)) 6696 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6697 6698 return notifier_from_errno(err); 6699 } 6700 6701 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6702 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6703 }; 6704 6705 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6706 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6707 }; 6708 6709 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6710 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6711 {0, }, 6712 }; 6713 6714 static struct pci_driver mlxsw_sp1_pci_driver = { 6715 .name = mlxsw_sp1_driver_name, 6716 .id_table = mlxsw_sp1_pci_id_table, 6717 }; 6718 6719 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6720 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6721 {0, }, 6722 }; 6723 6724 static struct pci_driver mlxsw_sp2_pci_driver = { 6725 .name = mlxsw_sp2_driver_name, 6726 .id_table = mlxsw_sp2_pci_id_table, 6727 }; 6728 6729 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6730 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6731 {0, }, 6732 }; 6733 6734 static struct pci_driver mlxsw_sp3_pci_driver = { 6735 .name = mlxsw_sp3_driver_name, 6736 .id_table = mlxsw_sp3_pci_id_table, 6737 }; 6738 6739 static int __init mlxsw_sp_module_init(void) 6740 { 6741 int err; 6742 6743 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6744 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6745 6746 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6747 if (err) 6748 goto err_sp1_core_driver_register; 6749 6750 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6751 if (err) 6752 goto err_sp2_core_driver_register; 6753 6754 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6755 if (err) 6756 goto err_sp3_core_driver_register; 6757 6758 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6759 if (err) 6760 goto err_sp1_pci_driver_register; 6761 6762 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6763 if (err) 6764 goto err_sp2_pci_driver_register; 6765 6766 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6767 if (err) 6768 goto err_sp3_pci_driver_register; 6769 6770 return 0; 6771 6772 err_sp3_pci_driver_register: 6773 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6774 err_sp2_pci_driver_register: 6775 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6776 err_sp1_pci_driver_register: 6777 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6778 err_sp3_core_driver_register: 6779 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6780 err_sp2_core_driver_register: 6781 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6782 err_sp1_core_driver_register: 6783 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6784 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6785 return err; 6786 } 6787 6788 static void __exit mlxsw_sp_module_exit(void) 6789 { 6790 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6791 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6792 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6793 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6794 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6795 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6796 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6797 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6798 } 6799 6800 module_init(mlxsw_sp_module_init); 6801 module_exit(mlxsw_sp_module_exit); 6802 6803 MODULE_LICENSE("Dual BSD/GPL"); 6804 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6805 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6806 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6807 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6808 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6809 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6810 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6811