1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <net/switchdev.h> 27 #include <net/pkt_cls.h> 28 #include <net/netevent.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "core_env.h" 35 #include "reg.h" 36 #include "port.h" 37 #include "trap.h" 38 #include "txheader.h" 39 #include "spectrum_cnt.h" 40 #include "spectrum_dpipe.h" 41 #include "spectrum_acl_flex_actions.h" 42 #include "spectrum_span.h" 43 #include "spectrum_ptp.h" 44 #include "spectrum_trap.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP1_FWREV_MAJOR 13 48 #define MLXSW_SP1_FWREV_MINOR 2007 49 #define MLXSW_SP1_FWREV_SUBMINOR 1168 50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 51 52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 53 .major = MLXSW_SP1_FWREV_MAJOR, 54 .minor = MLXSW_SP1_FWREV_MINOR, 55 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 57 }; 58 59 #define MLXSW_SP1_FW_FILENAME \ 60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 63 64 #define MLXSW_SP2_FWREV_MAJOR 29 65 #define MLXSW_SP2_FWREV_MINOR 2007 66 #define MLXSW_SP2_FWREV_SUBMINOR 1168 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP2_FWREV_MINOR, 71 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 #define MLXSW_SP3_FWREV_MINOR 2007 81 #define MLXSW_SP3_FWREV_SUBMINOR 1168 82 83 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 84 .major = MLXSW_SP3_FWREV_MAJOR, 85 .minor = MLXSW_SP3_FWREV_MINOR, 86 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 87 }; 88 89 #define MLXSW_SP3_FW_FILENAME \ 90 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 91 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 93 94 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 95 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 96 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 97 static const char mlxsw_sp_driver_version[] = "1.0"; 98 99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 101 }; 102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 104 }; 105 106 /* tx_hdr_version 107 * Tx header version. 108 * Must be set to 1. 109 */ 110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 111 112 /* tx_hdr_ctl 113 * Packet control type. 114 * 0 - Ethernet control (e.g. EMADs, LACP) 115 * 1 - Ethernet data 116 */ 117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 118 119 /* tx_hdr_proto 120 * Packet protocol type. Must be set to 1 (Ethernet). 121 */ 122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 123 124 /* tx_hdr_rx_is_router 125 * Packet is sent from the router. Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 128 129 /* tx_hdr_fid_valid 130 * Indicates if the 'fid' field is valid and should be used for 131 * forwarding lookup. Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 134 135 /* tx_hdr_swid 136 * Switch partition ID. Must be set to 0. 137 */ 138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 139 140 /* tx_hdr_control_tclass 141 * Indicates if the packet should use the control TClass and not one 142 * of the data TClasses. 143 */ 144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 145 146 /* tx_hdr_etclass 147 * Egress TClass to be used on the egress device on the egress port. 148 */ 149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 150 151 /* tx_hdr_port_mid 152 * Destination local port for unicast packets. 153 * Destination multicast ID for multicast packets. 154 * 155 * Control packets are directed to a specific egress port, while data 156 * packets are transmitted through the CPU port (0) into the switch partition, 157 * where forwarding rules are applied. 158 */ 159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 160 161 /* tx_hdr_fid 162 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 163 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 164 * Valid for data packets only. 165 */ 166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 167 168 /* tx_hdr_type 169 * 0 - Data packets 170 * 6 - Control packets 171 */ 172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 173 174 struct mlxsw_sp_mlxfw_dev { 175 struct mlxfw_dev mlxfw_dev; 176 struct mlxsw_sp *mlxsw_sp; 177 }; 178 179 struct mlxsw_sp_ptp_ops { 180 struct mlxsw_sp_ptp_clock * 181 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 182 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 183 184 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 185 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 186 187 /* Notify a driver that a packet that might be PTP was received. Driver 188 * is responsible for freeing the passed-in SKB. 189 */ 190 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 191 u8 local_port); 192 193 /* Notify a driver that a timestamped packet was transmitted. Driver 194 * is responsible for freeing the passed-in SKB. 195 */ 196 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 197 u8 local_port); 198 199 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 200 struct hwtstamp_config *config); 201 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 202 struct hwtstamp_config *config); 203 void (*shaper_work)(struct work_struct *work); 204 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 205 struct ethtool_ts_info *info); 206 int (*get_stats_count)(void); 207 void (*get_stats_strings)(u8 **p); 208 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 209 u64 *data, int data_index); 210 }; 211 212 struct mlxsw_sp_span_ops { 213 u32 (*buffsize_get)(int mtu, u32 speed); 214 }; 215 216 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 217 u16 component_index, u32 *p_max_size, 218 u8 *p_align_bits, u16 *p_max_write_size) 219 { 220 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 221 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 222 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 223 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 224 int err; 225 226 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 227 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 228 if (err) 229 return err; 230 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 231 p_max_write_size); 232 233 *p_align_bits = max_t(u8, *p_align_bits, 2); 234 *p_max_write_size = min_t(u16, *p_max_write_size, 235 MLXSW_REG_MCDA_MAX_DATA_LEN); 236 return 0; 237 } 238 239 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 240 { 241 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 242 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 244 char mcc_pl[MLXSW_REG_MCC_LEN]; 245 u8 control_state; 246 int err; 247 248 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 249 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 250 if (err) 251 return err; 252 253 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 254 if (control_state != MLXFW_FSM_STATE_IDLE) 255 return -EBUSY; 256 257 mlxsw_reg_mcc_pack(mcc_pl, 258 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 259 0, *fwhandle, 0); 260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 261 } 262 263 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 264 u32 fwhandle, u16 component_index, 265 u32 component_size) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcc_pl[MLXSW_REG_MCC_LEN]; 271 272 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 273 component_index, fwhandle, component_size); 274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 275 } 276 277 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 278 u32 fwhandle, u8 *data, u16 size, 279 u32 offset) 280 { 281 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 282 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 284 char mcda_pl[MLXSW_REG_MCDA_LEN]; 285 286 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 288 } 289 290 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 291 u32 fwhandle, u16 component_index) 292 { 293 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 294 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 295 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 296 char mcc_pl[MLXSW_REG_MCC_LEN]; 297 298 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 299 component_index, fwhandle, 0); 300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 301 } 302 303 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 304 { 305 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 306 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 308 char mcc_pl[MLXSW_REG_MCC_LEN]; 309 310 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 311 fwhandle, 0); 312 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 313 } 314 315 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 316 enum mlxfw_fsm_state *fsm_state, 317 enum mlxfw_fsm_state_err *fsm_state_err) 318 { 319 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 320 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 322 char mcc_pl[MLXSW_REG_MCC_LEN]; 323 u8 control_state; 324 u8 error_code; 325 int err; 326 327 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 328 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 329 if (err) 330 return err; 331 332 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 333 *fsm_state = control_state; 334 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 335 MLXFW_FSM_STATE_ERR_MAX); 336 return 0; 337 } 338 339 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 340 { 341 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 342 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 344 char mcc_pl[MLXSW_REG_MCC_LEN]; 345 346 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 347 fwhandle, 0); 348 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 349 } 350 351 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 352 { 353 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 354 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 355 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 356 char mcc_pl[MLXSW_REG_MCC_LEN]; 357 358 mlxsw_reg_mcc_pack(mcc_pl, 359 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 360 fwhandle, 0); 361 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 362 } 363 364 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 365 .component_query = mlxsw_sp_component_query, 366 .fsm_lock = mlxsw_sp_fsm_lock, 367 .fsm_component_update = mlxsw_sp_fsm_component_update, 368 .fsm_block_download = mlxsw_sp_fsm_block_download, 369 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 370 .fsm_activate = mlxsw_sp_fsm_activate, 371 .fsm_query_state = mlxsw_sp_fsm_query_state, 372 .fsm_cancel = mlxsw_sp_fsm_cancel, 373 .fsm_release = mlxsw_sp_fsm_release, 374 }; 375 376 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 377 const struct firmware *firmware, 378 struct netlink_ext_ack *extack) 379 { 380 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 381 .mlxfw_dev = { 382 .ops = &mlxsw_sp_mlxfw_dev_ops, 383 .psid = mlxsw_sp->bus_info->psid, 384 .psid_size = strlen(mlxsw_sp->bus_info->psid), 385 .devlink = priv_to_devlink(mlxsw_sp->core), 386 }, 387 .mlxsw_sp = mlxsw_sp 388 }; 389 int err; 390 391 mlxsw_core_fw_flash_start(mlxsw_sp->core); 392 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 393 firmware, extack); 394 mlxsw_core_fw_flash_end(mlxsw_sp->core); 395 396 return err; 397 } 398 399 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 400 { 401 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 402 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 403 const char *fw_filename = mlxsw_sp->fw_filename; 404 union devlink_param_value value; 405 const struct firmware *firmware; 406 int err; 407 408 /* Don't check if driver does not require it */ 409 if (!req_rev || !fw_filename) 410 return 0; 411 412 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 413 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 414 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 415 &value); 416 if (err) 417 return err; 418 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 419 return 0; 420 421 /* Validate driver & FW are compatible */ 422 if (rev->major != req_rev->major) { 423 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 424 rev->major, req_rev->major); 425 return -EINVAL; 426 } 427 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev)) 428 return 0; 429 430 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n", 431 rev->major, rev->minor, rev->subminor, req_rev->major, 432 req_rev->minor, req_rev->subminor); 433 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 434 fw_filename); 435 436 err = request_firmware_direct(&firmware, fw_filename, 437 mlxsw_sp->bus_info->dev); 438 if (err) { 439 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 440 fw_filename); 441 return err; 442 } 443 444 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 445 release_firmware(firmware); 446 if (err) 447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 448 449 /* On FW flash success, tell the caller FW reset is needed 450 * if current FW supports it. 451 */ 452 if (rev->minor >= req_rev->can_reset_minor) 453 return err ? err : -EAGAIN; 454 else 455 return 0; 456 } 457 458 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 459 const char *file_name, const char *component, 460 struct netlink_ext_ack *extack) 461 { 462 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 463 const struct firmware *firmware; 464 int err; 465 466 if (component) 467 return -EOPNOTSUPP; 468 469 err = request_firmware_direct(&firmware, file_name, 470 mlxsw_sp->bus_info->dev); 471 if (err) 472 return err; 473 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 474 release_firmware(firmware); 475 476 return err; 477 } 478 479 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index, u64 *packets, 481 u64 *bytes) 482 { 483 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 484 int err; 485 486 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 487 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 489 if (err) 490 return err; 491 if (packets) 492 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 493 if (bytes) 494 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 495 return 0; 496 } 497 498 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 499 unsigned int counter_index) 500 { 501 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 502 503 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 504 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 506 } 507 508 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 509 unsigned int *p_counter_index) 510 { 511 int err; 512 513 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 514 p_counter_index); 515 if (err) 516 return err; 517 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 518 if (err) 519 goto err_counter_clear; 520 return 0; 521 522 err_counter_clear: 523 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 524 *p_counter_index); 525 return err; 526 } 527 528 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 529 unsigned int counter_index) 530 { 531 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 532 counter_index); 533 } 534 535 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 536 const struct mlxsw_tx_info *tx_info) 537 { 538 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 539 540 memset(txhdr, 0, MLXSW_TXHDR_LEN); 541 542 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 543 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 544 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 545 mlxsw_tx_hdr_swid_set(txhdr, 0); 546 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 547 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 548 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 549 } 550 551 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 552 { 553 switch (state) { 554 case BR_STATE_FORWARDING: 555 return MLXSW_REG_SPMS_STATE_FORWARDING; 556 case BR_STATE_LEARNING: 557 return MLXSW_REG_SPMS_STATE_LEARNING; 558 case BR_STATE_LISTENING: /* fall-through */ 559 case BR_STATE_DISABLED: /* fall-through */ 560 case BR_STATE_BLOCKING: 561 return MLXSW_REG_SPMS_STATE_DISCARDING; 562 default: 563 BUG(); 564 } 565 } 566 567 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 568 u8 state) 569 { 570 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 572 char *spms_pl; 573 int err; 574 575 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 576 if (!spms_pl) 577 return -ENOMEM; 578 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 579 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 580 581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 582 kfree(spms_pl); 583 return err; 584 } 585 586 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 587 { 588 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 589 int err; 590 591 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 592 if (err) 593 return err; 594 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 595 return 0; 596 } 597 598 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 599 bool is_up) 600 { 601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 602 char paos_pl[MLXSW_REG_PAOS_LEN]; 603 604 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 605 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 606 MLXSW_PORT_ADMIN_STATUS_DOWN); 607 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 608 } 609 610 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 611 unsigned char *addr) 612 { 613 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 614 char ppad_pl[MLXSW_REG_PPAD_LEN]; 615 616 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 617 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 618 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 619 } 620 621 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 622 { 623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 624 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 625 626 ether_addr_copy(addr, mlxsw_sp->base_mac); 627 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 628 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 629 } 630 631 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 632 { 633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 634 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 635 int max_mtu; 636 int err; 637 638 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 639 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 640 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 641 if (err) 642 return err; 643 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 644 645 if (mtu > max_mtu) 646 return -EINVAL; 647 648 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 650 } 651 652 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 653 { 654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 655 char pspa_pl[MLXSW_REG_PSPA_LEN]; 656 657 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 659 } 660 661 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 662 { 663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 664 char svpe_pl[MLXSW_REG_SVPE_LEN]; 665 666 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 667 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 668 } 669 670 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 671 bool learn_enable) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char *spvmlr_pl; 675 int err; 676 677 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 678 if (!spvmlr_pl) 679 return -ENOMEM; 680 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 681 learn_enable); 682 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 683 kfree(spvmlr_pl); 684 return err; 685 } 686 687 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 688 u16 vid) 689 { 690 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 691 char spvid_pl[MLXSW_REG_SPVID_LEN]; 692 693 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 695 } 696 697 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 698 bool allow) 699 { 700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 701 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 702 703 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 705 } 706 707 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 708 { 709 int err; 710 711 if (!vid) { 712 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 713 if (err) 714 return err; 715 } else { 716 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 717 if (err) 718 return err; 719 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 720 if (err) 721 goto err_port_allow_untagged_set; 722 } 723 724 mlxsw_sp_port->pvid = vid; 725 return 0; 726 727 err_port_allow_untagged_set: 728 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 729 return err; 730 } 731 732 static int 733 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 734 { 735 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 736 char sspr_pl[MLXSW_REG_SSPR_LEN]; 737 738 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 740 } 741 742 static int 743 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 744 struct mlxsw_sp_port_mapping *port_mapping) 745 { 746 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 747 bool separate_rxtx; 748 u8 module; 749 u8 width; 750 int err; 751 int i; 752 753 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 754 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 755 if (err) 756 return err; 757 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 758 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 759 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 760 761 if (width && !is_power_of_2(width)) { 762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 763 local_port); 764 return -EINVAL; 765 } 766 767 for (i = 0; i < width; i++) { 768 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 769 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 770 local_port); 771 return -EINVAL; 772 } 773 if (separate_rxtx && 774 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 775 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 776 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 777 local_port); 778 return -EINVAL; 779 } 780 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 782 local_port); 783 return -EINVAL; 784 } 785 } 786 787 port_mapping->module = module; 788 port_mapping->width = width; 789 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 790 return 0; 791 } 792 793 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 794 { 795 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 796 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 797 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 798 int i; 799 800 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 801 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 802 for (i = 0; i < port_mapping->width; i++) { 803 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 804 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 805 } 806 807 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 808 } 809 810 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 811 { 812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 813 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 814 815 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 816 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 817 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 818 } 819 820 static int mlxsw_sp_port_open(struct net_device *dev) 821 { 822 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 823 int err; 824 825 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 826 if (err) 827 return err; 828 netif_start_queue(dev); 829 return 0; 830 } 831 832 static int mlxsw_sp_port_stop(struct net_device *dev) 833 { 834 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 835 836 netif_stop_queue(dev); 837 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 838 } 839 840 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 841 struct net_device *dev) 842 { 843 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 845 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 846 const struct mlxsw_tx_info tx_info = { 847 .local_port = mlxsw_sp_port->local_port, 848 .is_emad = false, 849 }; 850 u64 len; 851 int err; 852 853 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 854 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 855 dev_kfree_skb_any(skb); 856 return NETDEV_TX_OK; 857 } 858 859 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 860 861 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 862 return NETDEV_TX_BUSY; 863 864 if (eth_skb_pad(skb)) { 865 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 866 return NETDEV_TX_OK; 867 } 868 869 mlxsw_sp_txhdr_construct(skb, &tx_info); 870 /* TX header is consumed by HW on the way so we shouldn't count its 871 * bytes as being sent. 872 */ 873 len = skb->len - MLXSW_TXHDR_LEN; 874 875 /* Due to a race we might fail here because of a full queue. In that 876 * unlikely case we simply drop the packet. 877 */ 878 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 879 880 if (!err) { 881 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 882 u64_stats_update_begin(&pcpu_stats->syncp); 883 pcpu_stats->tx_packets++; 884 pcpu_stats->tx_bytes += len; 885 u64_stats_update_end(&pcpu_stats->syncp); 886 } else { 887 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 888 dev_kfree_skb_any(skb); 889 } 890 return NETDEV_TX_OK; 891 } 892 893 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 894 { 895 } 896 897 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 898 { 899 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 900 struct sockaddr *addr = p; 901 int err; 902 903 if (!is_valid_ether_addr(addr->sa_data)) 904 return -EADDRNOTAVAIL; 905 906 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 907 if (err) 908 return err; 909 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 910 return 0; 911 } 912 913 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 914 int mtu) 915 { 916 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 917 } 918 919 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 920 921 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 922 u16 delay) 923 { 924 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 925 BITS_PER_BYTE)); 926 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 927 mtu); 928 } 929 930 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 931 * Assumes 100m cable and maximum MTU. 932 */ 933 #define MLXSW_SP_PAUSE_DELAY 58752 934 935 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 936 u16 delay, bool pfc, bool pause) 937 { 938 if (pfc) 939 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 940 else if (pause) 941 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 942 else 943 return 0; 944 } 945 946 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 947 bool lossy) 948 { 949 if (lossy) 950 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 951 else 952 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 953 thres); 954 } 955 956 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 957 u8 *prio_tc, bool pause_en, 958 struct ieee_pfc *my_pfc) 959 { 960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 961 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 962 u16 delay = !!my_pfc ? my_pfc->delay : 0; 963 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 964 u32 taken_headroom_cells = 0; 965 u32 max_headroom_cells; 966 int i, j, err; 967 968 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 969 970 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 971 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 972 if (err) 973 return err; 974 975 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 976 bool configure = false; 977 bool pfc = false; 978 u16 thres_cells; 979 u16 delay_cells; 980 u16 total_cells; 981 bool lossy; 982 983 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 984 if (prio_tc[j] == i) { 985 pfc = pfc_en & BIT(j); 986 configure = true; 987 break; 988 } 989 } 990 991 if (!configure) 992 continue; 993 994 lossy = !(pfc || pause_en); 995 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 996 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); 997 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 998 pfc, pause_en); 999 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); 1000 total_cells = thres_cells + delay_cells; 1001 1002 taken_headroom_cells += total_cells; 1003 if (taken_headroom_cells > max_headroom_cells) 1004 return -ENOBUFS; 1005 1006 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 1007 thres_cells, lossy); 1008 } 1009 1010 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1011 } 1012 1013 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1014 int mtu, bool pause_en) 1015 { 1016 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1017 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1018 struct ieee_pfc *my_pfc; 1019 u8 *prio_tc; 1020 1021 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1022 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1023 1024 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1025 pause_en, my_pfc); 1026 } 1027 1028 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1029 { 1030 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1031 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1032 int err; 1033 1034 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1035 if (err) 1036 return err; 1037 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1038 if (err) 1039 goto err_span_port_mtu_update; 1040 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1041 if (err) 1042 goto err_port_mtu_set; 1043 dev->mtu = mtu; 1044 return 0; 1045 1046 err_port_mtu_set: 1047 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1048 err_span_port_mtu_update: 1049 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1050 return err; 1051 } 1052 1053 static int 1054 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1055 struct rtnl_link_stats64 *stats) 1056 { 1057 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1058 struct mlxsw_sp_port_pcpu_stats *p; 1059 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1060 u32 tx_dropped = 0; 1061 unsigned int start; 1062 int i; 1063 1064 for_each_possible_cpu(i) { 1065 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1066 do { 1067 start = u64_stats_fetch_begin_irq(&p->syncp); 1068 rx_packets = p->rx_packets; 1069 rx_bytes = p->rx_bytes; 1070 tx_packets = p->tx_packets; 1071 tx_bytes = p->tx_bytes; 1072 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1073 1074 stats->rx_packets += rx_packets; 1075 stats->rx_bytes += rx_bytes; 1076 stats->tx_packets += tx_packets; 1077 stats->tx_bytes += tx_bytes; 1078 /* tx_dropped is u32, updated without syncp protection. */ 1079 tx_dropped += p->tx_dropped; 1080 } 1081 stats->tx_dropped = tx_dropped; 1082 return 0; 1083 } 1084 1085 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1086 { 1087 switch (attr_id) { 1088 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1089 return true; 1090 } 1091 1092 return false; 1093 } 1094 1095 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1096 void *sp) 1097 { 1098 switch (attr_id) { 1099 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1100 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1101 } 1102 1103 return -EINVAL; 1104 } 1105 1106 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1107 int prio, char *ppcnt_pl) 1108 { 1109 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1110 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1111 1112 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1113 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1114 } 1115 1116 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1117 struct rtnl_link_stats64 *stats) 1118 { 1119 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1120 int err; 1121 1122 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1123 0, ppcnt_pl); 1124 if (err) 1125 goto out; 1126 1127 stats->tx_packets = 1128 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1129 stats->rx_packets = 1130 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1131 stats->tx_bytes = 1132 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1133 stats->rx_bytes = 1134 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1135 stats->multicast = 1136 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1137 1138 stats->rx_crc_errors = 1139 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1140 stats->rx_frame_errors = 1141 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1142 1143 stats->rx_length_errors = ( 1144 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1145 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1146 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1147 1148 stats->rx_errors = (stats->rx_crc_errors + 1149 stats->rx_frame_errors + stats->rx_length_errors); 1150 1151 out: 1152 return err; 1153 } 1154 1155 static void 1156 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1157 struct mlxsw_sp_port_xstats *xstats) 1158 { 1159 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1160 int err, i; 1161 1162 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1163 ppcnt_pl); 1164 if (!err) 1165 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1166 1167 for (i = 0; i < TC_MAX_QUEUE; i++) { 1168 err = mlxsw_sp_port_get_stats_raw(dev, 1169 MLXSW_REG_PPCNT_TC_CONG_TC, 1170 i, ppcnt_pl); 1171 if (!err) 1172 xstats->wred_drop[i] = 1173 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1174 1175 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1176 i, ppcnt_pl); 1177 if (err) 1178 continue; 1179 1180 xstats->backlog[i] = 1181 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1182 xstats->tail_drop[i] = 1183 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1184 } 1185 1186 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1187 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1188 i, ppcnt_pl); 1189 if (err) 1190 continue; 1191 1192 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1193 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1194 } 1195 } 1196 1197 static void update_stats_cache(struct work_struct *work) 1198 { 1199 struct mlxsw_sp_port *mlxsw_sp_port = 1200 container_of(work, struct mlxsw_sp_port, 1201 periodic_hw_stats.update_dw.work); 1202 1203 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1204 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 1205 * necessary when port goes down. 1206 */ 1207 goto out; 1208 1209 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1210 &mlxsw_sp_port->periodic_hw_stats.stats); 1211 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1212 &mlxsw_sp_port->periodic_hw_stats.xstats); 1213 1214 out: 1215 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1216 MLXSW_HW_STATS_UPDATE_TIME); 1217 } 1218 1219 /* Return the stats from a cache that is updated periodically, 1220 * as this function might get called in an atomic context. 1221 */ 1222 static void 1223 mlxsw_sp_port_get_stats64(struct net_device *dev, 1224 struct rtnl_link_stats64 *stats) 1225 { 1226 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1227 1228 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1229 } 1230 1231 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1232 u16 vid_begin, u16 vid_end, 1233 bool is_member, bool untagged) 1234 { 1235 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1236 char *spvm_pl; 1237 int err; 1238 1239 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1240 if (!spvm_pl) 1241 return -ENOMEM; 1242 1243 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1244 vid_end, is_member, untagged); 1245 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1246 kfree(spvm_pl); 1247 return err; 1248 } 1249 1250 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1251 u16 vid_end, bool is_member, bool untagged) 1252 { 1253 u16 vid, vid_e; 1254 int err; 1255 1256 for (vid = vid_begin; vid <= vid_end; 1257 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1258 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1259 vid_end); 1260 1261 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1262 is_member, untagged); 1263 if (err) 1264 return err; 1265 } 1266 1267 return 0; 1268 } 1269 1270 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1271 bool flush_default) 1272 { 1273 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1274 1275 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1276 &mlxsw_sp_port->vlans_list, list) { 1277 if (!flush_default && 1278 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1279 continue; 1280 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1281 } 1282 } 1283 1284 static void 1285 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1286 { 1287 if (mlxsw_sp_port_vlan->bridge_port) 1288 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1289 else if (mlxsw_sp_port_vlan->fid) 1290 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1291 } 1292 1293 struct mlxsw_sp_port_vlan * 1294 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1295 { 1296 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1297 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1298 int err; 1299 1300 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1301 if (mlxsw_sp_port_vlan) 1302 return ERR_PTR(-EEXIST); 1303 1304 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1305 if (err) 1306 return ERR_PTR(err); 1307 1308 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1309 if (!mlxsw_sp_port_vlan) { 1310 err = -ENOMEM; 1311 goto err_port_vlan_alloc; 1312 } 1313 1314 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1315 mlxsw_sp_port_vlan->vid = vid; 1316 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1317 1318 return mlxsw_sp_port_vlan; 1319 1320 err_port_vlan_alloc: 1321 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1322 return ERR_PTR(err); 1323 } 1324 1325 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1326 { 1327 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1328 u16 vid = mlxsw_sp_port_vlan->vid; 1329 1330 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1331 list_del(&mlxsw_sp_port_vlan->list); 1332 kfree(mlxsw_sp_port_vlan); 1333 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1334 } 1335 1336 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1337 __be16 __always_unused proto, u16 vid) 1338 { 1339 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1340 1341 /* VLAN 0 is added to HW filter when device goes up, but it is 1342 * reserved in our case, so simply return. 1343 */ 1344 if (!vid) 1345 return 0; 1346 1347 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1348 } 1349 1350 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1351 __be16 __always_unused proto, u16 vid) 1352 { 1353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1354 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1355 1356 /* VLAN 0 is removed from HW filter when device goes down, but 1357 * it is reserved in our case, so simply return. 1358 */ 1359 if (!vid) 1360 return 0; 1361 1362 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1363 if (!mlxsw_sp_port_vlan) 1364 return 0; 1365 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1366 1367 return 0; 1368 } 1369 1370 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1371 void *type_data) 1372 { 1373 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1374 1375 switch (type) { 1376 case TC_SETUP_BLOCK: 1377 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1378 case TC_SETUP_QDISC_RED: 1379 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1380 case TC_SETUP_QDISC_PRIO: 1381 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1382 case TC_SETUP_QDISC_ETS: 1383 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1384 case TC_SETUP_QDISC_TBF: 1385 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1386 case TC_SETUP_QDISC_FIFO: 1387 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1388 default: 1389 return -EOPNOTSUPP; 1390 } 1391 } 1392 1393 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1394 { 1395 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1396 1397 if (!enable) { 1398 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1399 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1400 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1401 return -EINVAL; 1402 } 1403 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1404 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1405 } else { 1406 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1407 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1408 } 1409 return 0; 1410 } 1411 1412 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1413 { 1414 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1415 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1416 int err; 1417 1418 if (netif_running(dev)) 1419 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1420 1421 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1422 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1423 pplr_pl); 1424 1425 if (netif_running(dev)) 1426 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1427 1428 return err; 1429 } 1430 1431 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1432 1433 static int mlxsw_sp_handle_feature(struct net_device *dev, 1434 netdev_features_t wanted_features, 1435 netdev_features_t feature, 1436 mlxsw_sp_feature_handler feature_handler) 1437 { 1438 netdev_features_t changes = wanted_features ^ dev->features; 1439 bool enable = !!(wanted_features & feature); 1440 int err; 1441 1442 if (!(changes & feature)) 1443 return 0; 1444 1445 err = feature_handler(dev, enable); 1446 if (err) { 1447 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1448 enable ? "Enable" : "Disable", &feature, err); 1449 return err; 1450 } 1451 1452 if (enable) 1453 dev->features |= feature; 1454 else 1455 dev->features &= ~feature; 1456 1457 return 0; 1458 } 1459 static int mlxsw_sp_set_features(struct net_device *dev, 1460 netdev_features_t features) 1461 { 1462 netdev_features_t oper_features = dev->features; 1463 int err = 0; 1464 1465 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1466 mlxsw_sp_feature_hw_tc); 1467 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1468 mlxsw_sp_feature_loopback); 1469 1470 if (err) { 1471 dev->features = oper_features; 1472 return -EINVAL; 1473 } 1474 1475 return 0; 1476 } 1477 1478 static struct devlink_port * 1479 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1480 { 1481 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1483 1484 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1485 mlxsw_sp_port->local_port); 1486 } 1487 1488 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1489 struct ifreq *ifr) 1490 { 1491 struct hwtstamp_config config; 1492 int err; 1493 1494 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1495 return -EFAULT; 1496 1497 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1498 &config); 1499 if (err) 1500 return err; 1501 1502 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1503 return -EFAULT; 1504 1505 return 0; 1506 } 1507 1508 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1509 struct ifreq *ifr) 1510 { 1511 struct hwtstamp_config config; 1512 int err; 1513 1514 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1515 &config); 1516 if (err) 1517 return err; 1518 1519 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1520 return -EFAULT; 1521 1522 return 0; 1523 } 1524 1525 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1526 { 1527 struct hwtstamp_config config = {0}; 1528 1529 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1530 } 1531 1532 static int 1533 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1534 { 1535 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1536 1537 switch (cmd) { 1538 case SIOCSHWTSTAMP: 1539 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1540 case SIOCGHWTSTAMP: 1541 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1542 default: 1543 return -EOPNOTSUPP; 1544 } 1545 } 1546 1547 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1548 .ndo_open = mlxsw_sp_port_open, 1549 .ndo_stop = mlxsw_sp_port_stop, 1550 .ndo_start_xmit = mlxsw_sp_port_xmit, 1551 .ndo_setup_tc = mlxsw_sp_setup_tc, 1552 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1553 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1554 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1555 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1556 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1557 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1558 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1559 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1560 .ndo_set_features = mlxsw_sp_set_features, 1561 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1562 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1563 }; 1564 1565 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1566 struct ethtool_drvinfo *drvinfo) 1567 { 1568 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1570 1571 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1572 sizeof(drvinfo->driver)); 1573 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1574 sizeof(drvinfo->version)); 1575 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1576 "%d.%d.%d", 1577 mlxsw_sp->bus_info->fw_rev.major, 1578 mlxsw_sp->bus_info->fw_rev.minor, 1579 mlxsw_sp->bus_info->fw_rev.subminor); 1580 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1581 sizeof(drvinfo->bus_info)); 1582 } 1583 1584 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1585 struct ethtool_pauseparam *pause) 1586 { 1587 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1588 1589 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1590 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1591 } 1592 1593 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1594 struct ethtool_pauseparam *pause) 1595 { 1596 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1597 1598 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1599 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1600 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1601 1602 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1603 pfcc_pl); 1604 } 1605 1606 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1607 struct ethtool_pauseparam *pause) 1608 { 1609 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1610 bool pause_en = pause->tx_pause || pause->rx_pause; 1611 int err; 1612 1613 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1614 netdev_err(dev, "PFC already enabled on port\n"); 1615 return -EINVAL; 1616 } 1617 1618 if (pause->autoneg) { 1619 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1620 return -EINVAL; 1621 } 1622 1623 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1624 if (err) { 1625 netdev_err(dev, "Failed to configure port's headroom\n"); 1626 return err; 1627 } 1628 1629 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1630 if (err) { 1631 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1632 goto err_port_pause_configure; 1633 } 1634 1635 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1636 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1637 1638 return 0; 1639 1640 err_port_pause_configure: 1641 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1642 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1643 return err; 1644 } 1645 1646 struct mlxsw_sp_port_hw_stats { 1647 char str[ETH_GSTRING_LEN]; 1648 u64 (*getter)(const char *payload); 1649 bool cells_bytes; 1650 }; 1651 1652 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1653 { 1654 .str = "a_frames_transmitted_ok", 1655 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1656 }, 1657 { 1658 .str = "a_frames_received_ok", 1659 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1660 }, 1661 { 1662 .str = "a_frame_check_sequence_errors", 1663 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1664 }, 1665 { 1666 .str = "a_alignment_errors", 1667 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1668 }, 1669 { 1670 .str = "a_octets_transmitted_ok", 1671 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1672 }, 1673 { 1674 .str = "a_octets_received_ok", 1675 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1676 }, 1677 { 1678 .str = "a_multicast_frames_xmitted_ok", 1679 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1680 }, 1681 { 1682 .str = "a_broadcast_frames_xmitted_ok", 1683 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1684 }, 1685 { 1686 .str = "a_multicast_frames_received_ok", 1687 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1688 }, 1689 { 1690 .str = "a_broadcast_frames_received_ok", 1691 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1692 }, 1693 { 1694 .str = "a_in_range_length_errors", 1695 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1696 }, 1697 { 1698 .str = "a_out_of_range_length_field", 1699 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1700 }, 1701 { 1702 .str = "a_frame_too_long_errors", 1703 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1704 }, 1705 { 1706 .str = "a_symbol_error_during_carrier", 1707 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1708 }, 1709 { 1710 .str = "a_mac_control_frames_transmitted", 1711 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1712 }, 1713 { 1714 .str = "a_mac_control_frames_received", 1715 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1716 }, 1717 { 1718 .str = "a_unsupported_opcodes_received", 1719 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1720 }, 1721 { 1722 .str = "a_pause_mac_ctrl_frames_received", 1723 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1724 }, 1725 { 1726 .str = "a_pause_mac_ctrl_frames_xmitted", 1727 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1728 }, 1729 }; 1730 1731 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1732 1733 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 1734 { 1735 .str = "if_in_discards", 1736 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 1737 }, 1738 { 1739 .str = "if_out_discards", 1740 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 1741 }, 1742 { 1743 .str = "if_out_errors", 1744 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 1745 }, 1746 }; 1747 1748 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 1749 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 1750 1751 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1752 { 1753 .str = "ether_stats_undersize_pkts", 1754 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 1755 }, 1756 { 1757 .str = "ether_stats_oversize_pkts", 1758 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 1759 }, 1760 { 1761 .str = "ether_stats_fragments", 1762 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 1763 }, 1764 { 1765 .str = "ether_pkts64octets", 1766 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1767 }, 1768 { 1769 .str = "ether_pkts65to127octets", 1770 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1771 }, 1772 { 1773 .str = "ether_pkts128to255octets", 1774 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1775 }, 1776 { 1777 .str = "ether_pkts256to511octets", 1778 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1779 }, 1780 { 1781 .str = "ether_pkts512to1023octets", 1782 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1783 }, 1784 { 1785 .str = "ether_pkts1024to1518octets", 1786 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1787 }, 1788 { 1789 .str = "ether_pkts1519to2047octets", 1790 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1791 }, 1792 { 1793 .str = "ether_pkts2048to4095octets", 1794 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1795 }, 1796 { 1797 .str = "ether_pkts4096to8191octets", 1798 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1799 }, 1800 { 1801 .str = "ether_pkts8192to10239octets", 1802 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1803 }, 1804 }; 1805 1806 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1807 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1808 1809 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 1810 { 1811 .str = "dot3stats_fcs_errors", 1812 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 1813 }, 1814 { 1815 .str = "dot3stats_symbol_errors", 1816 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 1817 }, 1818 { 1819 .str = "dot3control_in_unknown_opcodes", 1820 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 1821 }, 1822 { 1823 .str = "dot3in_pause_frames", 1824 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 1825 }, 1826 }; 1827 1828 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 1829 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 1830 1831 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { 1832 { 1833 .str = "ecn_marked", 1834 .getter = mlxsw_reg_ppcnt_ecn_marked_get, 1835 }, 1836 }; 1837 1838 #define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) 1839 1840 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 1841 { 1842 .str = "discard_ingress_general", 1843 .getter = mlxsw_reg_ppcnt_ingress_general_get, 1844 }, 1845 { 1846 .str = "discard_ingress_policy_engine", 1847 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 1848 }, 1849 { 1850 .str = "discard_ingress_vlan_membership", 1851 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 1852 }, 1853 { 1854 .str = "discard_ingress_tag_frame_type", 1855 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 1856 }, 1857 { 1858 .str = "discard_egress_vlan_membership", 1859 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 1860 }, 1861 { 1862 .str = "discard_loopback_filter", 1863 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 1864 }, 1865 { 1866 .str = "discard_egress_general", 1867 .getter = mlxsw_reg_ppcnt_egress_general_get, 1868 }, 1869 { 1870 .str = "discard_egress_hoq", 1871 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 1872 }, 1873 { 1874 .str = "discard_egress_policy_engine", 1875 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 1876 }, 1877 { 1878 .str = "discard_ingress_tx_link_down", 1879 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 1880 }, 1881 { 1882 .str = "discard_egress_stp_filter", 1883 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 1884 }, 1885 { 1886 .str = "discard_egress_sll", 1887 .getter = mlxsw_reg_ppcnt_egress_sll_get, 1888 }, 1889 }; 1890 1891 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 1892 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 1893 1894 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1895 { 1896 .str = "rx_octets_prio", 1897 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1898 }, 1899 { 1900 .str = "rx_frames_prio", 1901 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1902 }, 1903 { 1904 .str = "tx_octets_prio", 1905 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1906 }, 1907 { 1908 .str = "tx_frames_prio", 1909 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1910 }, 1911 { 1912 .str = "rx_pause_prio", 1913 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1914 }, 1915 { 1916 .str = "rx_pause_duration_prio", 1917 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1918 }, 1919 { 1920 .str = "tx_pause_prio", 1921 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1922 }, 1923 { 1924 .str = "tx_pause_duration_prio", 1925 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1926 }, 1927 }; 1928 1929 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1930 1931 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1932 { 1933 .str = "tc_transmit_queue_tc", 1934 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1935 .cells_bytes = true, 1936 }, 1937 { 1938 .str = "tc_no_buffer_discard_uc_tc", 1939 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1940 }, 1941 }; 1942 1943 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1944 1945 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1946 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 1947 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 1948 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 1949 MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ 1950 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 1951 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 1952 IEEE_8021QAZ_MAX_TCS) + \ 1953 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 1954 TC_MAX_QUEUE)) 1955 1956 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1957 { 1958 int i; 1959 1960 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1961 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 1962 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1963 *p += ETH_GSTRING_LEN; 1964 } 1965 } 1966 1967 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1968 { 1969 int i; 1970 1971 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1972 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 1973 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1974 *p += ETH_GSTRING_LEN; 1975 } 1976 } 1977 1978 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1979 u32 stringset, u8 *data) 1980 { 1981 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1982 u8 *p = data; 1983 int i; 1984 1985 switch (stringset) { 1986 case ETH_SS_STATS: 1987 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1988 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1989 ETH_GSTRING_LEN); 1990 p += ETH_GSTRING_LEN; 1991 } 1992 1993 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 1994 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 1995 ETH_GSTRING_LEN); 1996 p += ETH_GSTRING_LEN; 1997 } 1998 1999 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2000 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2001 ETH_GSTRING_LEN); 2002 p += ETH_GSTRING_LEN; 2003 } 2004 2005 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2006 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2007 ETH_GSTRING_LEN); 2008 p += ETH_GSTRING_LEN; 2009 } 2010 2011 for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { 2012 memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, 2013 ETH_GSTRING_LEN); 2014 p += ETH_GSTRING_LEN; 2015 } 2016 2017 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2018 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2019 ETH_GSTRING_LEN); 2020 p += ETH_GSTRING_LEN; 2021 } 2022 2023 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2024 mlxsw_sp_port_get_prio_strings(&p, i); 2025 2026 for (i = 0; i < TC_MAX_QUEUE; i++) 2027 mlxsw_sp_port_get_tc_strings(&p, i); 2028 2029 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2030 break; 2031 } 2032 } 2033 2034 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2035 enum ethtool_phys_id_state state) 2036 { 2037 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2038 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2039 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2040 bool active; 2041 2042 switch (state) { 2043 case ETHTOOL_ID_ACTIVE: 2044 active = true; 2045 break; 2046 case ETHTOOL_ID_INACTIVE: 2047 active = false; 2048 break; 2049 default: 2050 return -EOPNOTSUPP; 2051 } 2052 2053 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2054 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2055 } 2056 2057 static int 2058 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2059 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2060 { 2061 switch (grp) { 2062 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2063 *p_hw_stats = mlxsw_sp_port_hw_stats; 2064 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2065 break; 2066 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2067 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2068 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2069 break; 2070 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2071 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2072 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2073 break; 2074 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2075 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2076 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2077 break; 2078 case MLXSW_REG_PPCNT_EXT_CNT: 2079 *p_hw_stats = mlxsw_sp_port_hw_ext_stats; 2080 *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2081 break; 2082 case MLXSW_REG_PPCNT_DISCARD_CNT: 2083 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2084 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2085 break; 2086 case MLXSW_REG_PPCNT_PRIO_CNT: 2087 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2088 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2089 break; 2090 case MLXSW_REG_PPCNT_TC_CNT: 2091 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2092 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2093 break; 2094 default: 2095 WARN_ON(1); 2096 return -EOPNOTSUPP; 2097 } 2098 return 0; 2099 } 2100 2101 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2102 enum mlxsw_reg_ppcnt_grp grp, int prio, 2103 u64 *data, int data_index) 2104 { 2105 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2107 struct mlxsw_sp_port_hw_stats *hw_stats; 2108 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2109 int i, len; 2110 int err; 2111 2112 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2113 if (err) 2114 return; 2115 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2116 for (i = 0; i < len; i++) { 2117 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2118 if (!hw_stats[i].cells_bytes) 2119 continue; 2120 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2121 data[data_index + i]); 2122 } 2123 } 2124 2125 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2126 struct ethtool_stats *stats, u64 *data) 2127 { 2128 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2129 int i, data_index = 0; 2130 2131 /* IEEE 802.3 Counters */ 2132 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2133 data, data_index); 2134 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2135 2136 /* RFC 2863 Counters */ 2137 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2138 data, data_index); 2139 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2140 2141 /* RFC 2819 Counters */ 2142 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2143 data, data_index); 2144 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2145 2146 /* RFC 3635 Counters */ 2147 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2148 data, data_index); 2149 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2150 2151 /* Extended Counters */ 2152 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 2153 data, data_index); 2154 data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; 2155 2156 /* Discard Counters */ 2157 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2158 data, data_index); 2159 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2160 2161 /* Per-Priority Counters */ 2162 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2163 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2164 data, data_index); 2165 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2166 } 2167 2168 /* Per-TC Counters */ 2169 for (i = 0; i < TC_MAX_QUEUE; i++) { 2170 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2171 data, data_index); 2172 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2173 } 2174 2175 /* PTP counters */ 2176 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2177 data, data_index); 2178 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2179 } 2180 2181 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2182 { 2183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2184 2185 switch (sset) { 2186 case ETH_SS_STATS: 2187 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2188 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2189 default: 2190 return -EOPNOTSUPP; 2191 } 2192 } 2193 2194 struct mlxsw_sp1_port_link_mode { 2195 enum ethtool_link_mode_bit_indices mask_ethtool; 2196 u32 mask; 2197 u32 speed; 2198 }; 2199 2200 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2201 { 2202 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2203 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2204 .speed = SPEED_100, 2205 }, 2206 { 2207 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2208 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2209 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2210 .speed = SPEED_1000, 2211 }, 2212 { 2213 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2214 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2215 .speed = SPEED_10000, 2216 }, 2217 { 2218 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2219 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2220 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2221 .speed = SPEED_10000, 2222 }, 2223 { 2224 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2225 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2226 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2227 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2228 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2229 .speed = SPEED_10000, 2230 }, 2231 { 2232 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2233 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2234 .speed = SPEED_20000, 2235 }, 2236 { 2237 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2238 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2239 .speed = SPEED_40000, 2240 }, 2241 { 2242 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2243 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2244 .speed = SPEED_40000, 2245 }, 2246 { 2247 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2248 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2249 .speed = SPEED_40000, 2250 }, 2251 { 2252 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2253 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2254 .speed = SPEED_40000, 2255 }, 2256 { 2257 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2258 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2259 .speed = SPEED_25000, 2260 }, 2261 { 2262 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2263 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2264 .speed = SPEED_25000, 2265 }, 2266 { 2267 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2268 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2269 .speed = SPEED_25000, 2270 }, 2271 { 2272 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2273 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2274 .speed = SPEED_50000, 2275 }, 2276 { 2277 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2278 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2279 .speed = SPEED_50000, 2280 }, 2281 { 2282 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2283 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2284 .speed = SPEED_50000, 2285 }, 2286 { 2287 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2288 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2289 .speed = SPEED_100000, 2290 }, 2291 { 2292 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2293 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2294 .speed = SPEED_100000, 2295 }, 2296 { 2297 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2298 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2299 .speed = SPEED_100000, 2300 }, 2301 { 2302 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2303 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2304 .speed = SPEED_100000, 2305 }, 2306 }; 2307 2308 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2309 2310 static void 2311 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2312 u32 ptys_eth_proto, 2313 struct ethtool_link_ksettings *cmd) 2314 { 2315 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2316 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2317 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2318 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2319 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2320 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2321 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2322 2323 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2324 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2325 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2326 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2327 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2328 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2329 } 2330 2331 static void 2332 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2333 u8 width, unsigned long *mode) 2334 { 2335 int i; 2336 2337 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2338 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2339 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2340 mode); 2341 } 2342 } 2343 2344 static u32 2345 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2346 { 2347 int i; 2348 2349 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2350 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2351 return mlxsw_sp1_port_link_mode[i].speed; 2352 } 2353 2354 return SPEED_UNKNOWN; 2355 } 2356 2357 static void 2358 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2359 u32 ptys_eth_proto, 2360 struct ethtool_link_ksettings *cmd) 2361 { 2362 cmd->base.speed = SPEED_UNKNOWN; 2363 cmd->base.duplex = DUPLEX_UNKNOWN; 2364 2365 if (!carrier_ok) 2366 return; 2367 2368 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2369 if (cmd->base.speed != SPEED_UNKNOWN) 2370 cmd->base.duplex = DUPLEX_FULL; 2371 } 2372 2373 static u32 2374 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2375 const struct ethtool_link_ksettings *cmd) 2376 { 2377 u32 ptys_proto = 0; 2378 int i; 2379 2380 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2381 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2382 cmd->link_modes.advertising)) 2383 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2384 } 2385 return ptys_proto; 2386 } 2387 2388 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2389 u32 speed) 2390 { 2391 u32 ptys_proto = 0; 2392 int i; 2393 2394 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2395 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2396 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2397 } 2398 return ptys_proto; 2399 } 2400 2401 static void 2402 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2403 u8 local_port, u32 proto_admin, bool autoneg) 2404 { 2405 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2406 } 2407 2408 static void 2409 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2410 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2411 u32 *p_eth_proto_oper) 2412 { 2413 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2414 p_eth_proto_oper); 2415 } 2416 2417 static const struct mlxsw_sp_port_type_speed_ops 2418 mlxsw_sp1_port_type_speed_ops = { 2419 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2420 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2421 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2422 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2423 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2424 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2425 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2426 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2427 }; 2428 2429 static const enum ethtool_link_mode_bit_indices 2430 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2431 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2432 }; 2433 2434 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2435 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2436 2437 static const enum ethtool_link_mode_bit_indices 2438 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2439 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2440 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2441 }; 2442 2443 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2444 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2445 2446 static const enum ethtool_link_mode_bit_indices 2447 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2448 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2449 }; 2450 2451 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2452 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2453 2454 static const enum ethtool_link_mode_bit_indices 2455 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2456 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2457 }; 2458 2459 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2460 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2461 2462 static const enum ethtool_link_mode_bit_indices 2463 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2464 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2465 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2466 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2467 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2468 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2469 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2470 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2471 }; 2472 2473 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2474 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2475 2476 static const enum ethtool_link_mode_bit_indices 2477 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2478 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2479 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2480 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2481 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2482 }; 2483 2484 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2485 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2486 2487 static const enum ethtool_link_mode_bit_indices 2488 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2489 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2490 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2491 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2492 }; 2493 2494 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2495 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2496 2497 static const enum ethtool_link_mode_bit_indices 2498 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2499 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2500 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2501 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2502 }; 2503 2504 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2505 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2506 2507 static const enum ethtool_link_mode_bit_indices 2508 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2509 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2510 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2511 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2512 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2513 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2514 }; 2515 2516 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2517 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2518 2519 static const enum ethtool_link_mode_bit_indices 2520 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2521 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2522 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2523 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2524 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2525 }; 2526 2527 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2528 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2529 2530 static const enum ethtool_link_mode_bit_indices 2531 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2532 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2533 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2534 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2535 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2536 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2537 }; 2538 2539 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2540 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2541 2542 static const enum ethtool_link_mode_bit_indices 2543 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2544 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2545 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2546 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2547 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2548 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2549 }; 2550 2551 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2552 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2553 2554 static const enum ethtool_link_mode_bit_indices 2555 mlxsw_sp2_mask_ethtool_400gaui_8[] = { 2556 ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2557 ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2558 ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2559 ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, 2560 ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2561 }; 2562 2563 #define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ 2564 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) 2565 2566 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2567 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2568 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2569 #define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) 2570 2571 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2572 { 2573 switch (width) { 2574 case 1: 2575 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2576 case 2: 2577 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2578 case 4: 2579 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2580 case 8: 2581 return MLXSW_SP_PORT_MASK_WIDTH_8X; 2582 default: 2583 WARN_ON_ONCE(1); 2584 return 0; 2585 } 2586 } 2587 2588 struct mlxsw_sp2_port_link_mode { 2589 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2590 int m_ethtool_len; 2591 u32 mask; 2592 u32 speed; 2593 u8 mask_width; 2594 }; 2595 2596 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2597 { 2598 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2599 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2600 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2601 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2602 MLXSW_SP_PORT_MASK_WIDTH_2X | 2603 MLXSW_SP_PORT_MASK_WIDTH_4X | 2604 MLXSW_SP_PORT_MASK_WIDTH_8X, 2605 .speed = SPEED_100, 2606 }, 2607 { 2608 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2609 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2610 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2611 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2612 MLXSW_SP_PORT_MASK_WIDTH_2X | 2613 MLXSW_SP_PORT_MASK_WIDTH_4X | 2614 MLXSW_SP_PORT_MASK_WIDTH_8X, 2615 .speed = SPEED_1000, 2616 }, 2617 { 2618 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2619 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2620 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2621 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2622 MLXSW_SP_PORT_MASK_WIDTH_2X | 2623 MLXSW_SP_PORT_MASK_WIDTH_4X | 2624 MLXSW_SP_PORT_MASK_WIDTH_8X, 2625 .speed = SPEED_2500, 2626 }, 2627 { 2628 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2629 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2630 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2631 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2632 MLXSW_SP_PORT_MASK_WIDTH_2X | 2633 MLXSW_SP_PORT_MASK_WIDTH_4X | 2634 MLXSW_SP_PORT_MASK_WIDTH_8X, 2635 .speed = SPEED_5000, 2636 }, 2637 { 2638 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2639 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2640 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2641 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2642 MLXSW_SP_PORT_MASK_WIDTH_2X | 2643 MLXSW_SP_PORT_MASK_WIDTH_4X | 2644 MLXSW_SP_PORT_MASK_WIDTH_8X, 2645 .speed = SPEED_10000, 2646 }, 2647 { 2648 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2649 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2650 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2651 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2652 MLXSW_SP_PORT_MASK_WIDTH_8X, 2653 .speed = SPEED_40000, 2654 }, 2655 { 2656 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2657 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2658 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2659 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2660 MLXSW_SP_PORT_MASK_WIDTH_2X | 2661 MLXSW_SP_PORT_MASK_WIDTH_4X | 2662 MLXSW_SP_PORT_MASK_WIDTH_8X, 2663 .speed = SPEED_25000, 2664 }, 2665 { 2666 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2667 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2668 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2669 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 2670 MLXSW_SP_PORT_MASK_WIDTH_4X | 2671 MLXSW_SP_PORT_MASK_WIDTH_8X, 2672 .speed = SPEED_50000, 2673 }, 2674 { 2675 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2676 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2677 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2678 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 2679 .speed = SPEED_50000, 2680 }, 2681 { 2682 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2683 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2684 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2685 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2686 MLXSW_SP_PORT_MASK_WIDTH_8X, 2687 .speed = SPEED_100000, 2688 }, 2689 { 2690 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2691 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2692 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2693 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 2694 .speed = SPEED_100000, 2695 }, 2696 { 2697 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2698 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2699 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2700 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | 2701 MLXSW_SP_PORT_MASK_WIDTH_8X, 2702 .speed = SPEED_200000, 2703 }, 2704 { 2705 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, 2706 .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, 2707 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, 2708 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, 2709 .speed = SPEED_400000, 2710 }, 2711 }; 2712 2713 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2714 2715 static void 2716 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2717 u32 ptys_eth_proto, 2718 struct ethtool_link_ksettings *cmd) 2719 { 2720 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2721 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2722 } 2723 2724 static void 2725 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2726 unsigned long *mode) 2727 { 2728 int i; 2729 2730 for (i = 0; i < link_mode->m_ethtool_len; i++) 2731 __set_bit(link_mode->mask_ethtool[i], mode); 2732 } 2733 2734 static void 2735 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2736 u8 width, unsigned long *mode) 2737 { 2738 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2739 int i; 2740 2741 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2742 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 2743 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 2744 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2745 mode); 2746 } 2747 } 2748 2749 static u32 2750 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2751 { 2752 int i; 2753 2754 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2755 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 2756 return mlxsw_sp2_port_link_mode[i].speed; 2757 } 2758 2759 return SPEED_UNKNOWN; 2760 } 2761 2762 static void 2763 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2764 u32 ptys_eth_proto, 2765 struct ethtool_link_ksettings *cmd) 2766 { 2767 cmd->base.speed = SPEED_UNKNOWN; 2768 cmd->base.duplex = DUPLEX_UNKNOWN; 2769 2770 if (!carrier_ok) 2771 return; 2772 2773 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2774 if (cmd->base.speed != SPEED_UNKNOWN) 2775 cmd->base.duplex = DUPLEX_FULL; 2776 } 2777 2778 static bool 2779 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2780 const unsigned long *mode) 2781 { 2782 int cnt = 0; 2783 int i; 2784 2785 for (i = 0; i < link_mode->m_ethtool_len; i++) { 2786 if (test_bit(link_mode->mask_ethtool[i], mode)) 2787 cnt++; 2788 } 2789 2790 return cnt == link_mode->m_ethtool_len; 2791 } 2792 2793 static u32 2794 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2795 const struct ethtool_link_ksettings *cmd) 2796 { 2797 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2798 u32 ptys_proto = 0; 2799 int i; 2800 2801 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2802 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 2803 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 2804 cmd->link_modes.advertising)) 2805 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2806 } 2807 return ptys_proto; 2808 } 2809 2810 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 2811 u8 width, u32 speed) 2812 { 2813 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 2814 u32 ptys_proto = 0; 2815 int i; 2816 2817 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 2818 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 2819 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 2820 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 2821 } 2822 return ptys_proto; 2823 } 2824 2825 static void 2826 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2827 u8 local_port, u32 proto_admin, 2828 bool autoneg) 2829 { 2830 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 2831 } 2832 2833 static void 2834 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2835 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2836 u32 *p_eth_proto_oper) 2837 { 2838 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 2839 p_eth_proto_admin, p_eth_proto_oper); 2840 } 2841 2842 static const struct mlxsw_sp_port_type_speed_ops 2843 mlxsw_sp2_port_type_speed_ops = { 2844 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 2845 .from_ptys_link = mlxsw_sp2_from_ptys_link, 2846 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 2847 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 2848 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 2849 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 2850 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 2851 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 2852 }; 2853 2854 static void 2855 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 2856 u8 width, struct ethtool_link_ksettings *cmd) 2857 { 2858 const struct mlxsw_sp_port_type_speed_ops *ops; 2859 2860 ops = mlxsw_sp->port_type_speed_ops; 2861 2862 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2863 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2864 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2865 2866 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 2867 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 2868 cmd->link_modes.supported); 2869 } 2870 2871 static void 2872 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 2873 u32 eth_proto_admin, bool autoneg, u8 width, 2874 struct ethtool_link_ksettings *cmd) 2875 { 2876 const struct mlxsw_sp_port_type_speed_ops *ops; 2877 2878 ops = mlxsw_sp->port_type_speed_ops; 2879 2880 if (!autoneg) 2881 return; 2882 2883 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2884 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 2885 cmd->link_modes.advertising); 2886 } 2887 2888 static u8 2889 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 2890 { 2891 switch (connector_type) { 2892 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 2893 return PORT_OTHER; 2894 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 2895 return PORT_NONE; 2896 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 2897 return PORT_TP; 2898 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 2899 return PORT_AUI; 2900 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 2901 return PORT_BNC; 2902 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 2903 return PORT_MII; 2904 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 2905 return PORT_FIBRE; 2906 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 2907 return PORT_DA; 2908 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 2909 return PORT_OTHER; 2910 default: 2911 WARN_ON_ONCE(1); 2912 return PORT_OTHER; 2913 } 2914 } 2915 2916 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2917 struct ethtool_link_ksettings *cmd) 2918 { 2919 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 2920 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2921 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2922 const struct mlxsw_sp_port_type_speed_ops *ops; 2923 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2924 u8 connector_type; 2925 bool autoneg; 2926 int err; 2927 2928 ops = mlxsw_sp->port_type_speed_ops; 2929 2930 autoneg = mlxsw_sp_port->link.autoneg; 2931 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2932 0, false); 2933 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2934 if (err) 2935 return err; 2936 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 2937 ð_proto_admin, ð_proto_oper); 2938 2939 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 2940 mlxsw_sp_port->mapping.width, cmd); 2941 2942 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 2943 mlxsw_sp_port->mapping.width, cmd); 2944 2945 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2946 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 2947 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 2948 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 2949 eth_proto_oper, cmd); 2950 2951 return 0; 2952 } 2953 2954 static int 2955 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2956 const struct ethtool_link_ksettings *cmd) 2957 { 2958 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2959 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2960 const struct mlxsw_sp_port_type_speed_ops *ops; 2961 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2962 u32 eth_proto_cap, eth_proto_new; 2963 bool autoneg; 2964 int err; 2965 2966 ops = mlxsw_sp->port_type_speed_ops; 2967 2968 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2969 0, false); 2970 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2971 if (err) 2972 return err; 2973 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 2974 2975 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2976 eth_proto_new = autoneg ? 2977 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 2978 cmd) : 2979 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 2980 cmd->base.speed); 2981 2982 eth_proto_new = eth_proto_new & eth_proto_cap; 2983 if (!eth_proto_new) { 2984 netdev_err(dev, "No supported speed requested\n"); 2985 return -EINVAL; 2986 } 2987 2988 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 2989 eth_proto_new, autoneg); 2990 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2991 if (err) 2992 return err; 2993 2994 mlxsw_sp_port->link.autoneg = autoneg; 2995 2996 if (!netif_running(dev)) 2997 return 0; 2998 2999 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3000 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3001 3002 return 0; 3003 } 3004 3005 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3006 struct ethtool_modinfo *modinfo) 3007 { 3008 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3009 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3010 int err; 3011 3012 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3013 mlxsw_sp_port->mapping.module, 3014 modinfo); 3015 3016 return err; 3017 } 3018 3019 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3020 struct ethtool_eeprom *ee, 3021 u8 *data) 3022 { 3023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3024 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3025 int err; 3026 3027 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3028 mlxsw_sp_port->mapping.module, ee, 3029 data); 3030 3031 return err; 3032 } 3033 3034 static int 3035 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3036 { 3037 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3038 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3039 3040 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3041 } 3042 3043 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3044 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3045 .get_link = ethtool_op_get_link, 3046 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3047 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3048 .get_strings = mlxsw_sp_port_get_strings, 3049 .set_phys_id = mlxsw_sp_port_set_phys_id, 3050 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3051 .get_sset_count = mlxsw_sp_port_get_sset_count, 3052 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3053 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3054 .get_module_info = mlxsw_sp_get_module_info, 3055 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3056 .get_ts_info = mlxsw_sp_get_ts_info, 3057 }; 3058 3059 static int 3060 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 3061 { 3062 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3063 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3064 const struct mlxsw_sp_port_type_speed_ops *ops; 3065 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3066 int err; 3067 3068 ops = mlxsw_sp->port_type_speed_ops; 3069 3070 /* Set advertised speeds to supported speeds. */ 3071 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3072 0, false); 3073 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3074 if (err) 3075 return err; 3076 3077 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3078 ð_proto_admin, ð_proto_oper); 3079 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3080 eth_proto_cap, mlxsw_sp_port->link.autoneg); 3081 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3082 } 3083 3084 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 3085 { 3086 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 3087 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3088 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3089 u32 eth_proto_oper; 3090 int err; 3091 3092 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 3093 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 3094 mlxsw_sp_port->local_port, 0, 3095 false); 3096 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3097 if (err) 3098 return err; 3099 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 3100 ð_proto_oper); 3101 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 3102 return 0; 3103 } 3104 3105 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3106 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3107 bool dwrr, u8 dwrr_weight) 3108 { 3109 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3110 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3111 3112 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3113 next_index); 3114 mlxsw_reg_qeec_de_set(qeec_pl, true); 3115 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3116 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3117 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3118 } 3119 3120 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3121 enum mlxsw_reg_qeec_hr hr, u8 index, 3122 u8 next_index, u32 maxrate, u8 burst_size) 3123 { 3124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3125 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3126 3127 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3128 next_index); 3129 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3130 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3131 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 3132 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3133 } 3134 3135 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3136 enum mlxsw_reg_qeec_hr hr, u8 index, 3137 u8 next_index, u32 minrate) 3138 { 3139 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3140 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3141 3142 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3143 next_index); 3144 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3145 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3146 3147 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3148 } 3149 3150 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3151 u8 switch_prio, u8 tclass) 3152 { 3153 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3154 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3155 3156 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3157 tclass); 3158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3159 } 3160 3161 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3162 { 3163 int err, i; 3164 3165 /* Setup the elements hierarcy, so that each TC is linked to 3166 * one subgroup, which are all member in the same group. 3167 */ 3168 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3169 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 3170 if (err) 3171 return err; 3172 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3173 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3174 MLXSW_REG_QEEC_HR_SUBGROUP, i, 3175 0, false, 0); 3176 if (err) 3177 return err; 3178 } 3179 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3180 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3181 MLXSW_REG_QEEC_HR_TC, i, i, 3182 false, 0); 3183 if (err) 3184 return err; 3185 3186 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3187 MLXSW_REG_QEEC_HR_TC, 3188 i + 8, i, 3189 true, 100); 3190 if (err) 3191 return err; 3192 } 3193 3194 /* Make sure the max shaper is disabled in all hierarchies that support 3195 * it. Note that this disables ptps (PTP shaper), but that is intended 3196 * for the initial configuration. 3197 */ 3198 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3199 MLXSW_REG_QEEC_HR_PORT, 0, 0, 3200 MLXSW_REG_QEEC_MAS_DIS, 0); 3201 if (err) 3202 return err; 3203 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3204 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3205 MLXSW_REG_QEEC_HR_SUBGROUP, 3206 i, 0, 3207 MLXSW_REG_QEEC_MAS_DIS, 0); 3208 if (err) 3209 return err; 3210 } 3211 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3212 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3213 MLXSW_REG_QEEC_HR_TC, 3214 i, i, 3215 MLXSW_REG_QEEC_MAS_DIS, 0); 3216 if (err) 3217 return err; 3218 3219 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3220 MLXSW_REG_QEEC_HR_TC, 3221 i + 8, i, 3222 MLXSW_REG_QEEC_MAS_DIS, 0); 3223 if (err) 3224 return err; 3225 } 3226 3227 /* Configure the min shaper for multicast TCs. */ 3228 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3229 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3230 MLXSW_REG_QEEC_HR_TC, 3231 i + 8, i, 3232 MLXSW_REG_QEEC_MIS_MIN); 3233 if (err) 3234 return err; 3235 } 3236 3237 /* Map all priorities to traffic class 0. */ 3238 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3239 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3240 if (err) 3241 return err; 3242 } 3243 3244 return 0; 3245 } 3246 3247 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3248 bool enable) 3249 { 3250 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3251 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3252 3253 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3254 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3255 } 3256 3257 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3258 u8 split_base_local_port, 3259 struct mlxsw_sp_port_mapping *port_mapping) 3260 { 3261 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3262 bool split = !!split_base_local_port; 3263 struct mlxsw_sp_port *mlxsw_sp_port; 3264 struct net_device *dev; 3265 int err; 3266 3267 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3268 port_mapping->module + 1, split, 3269 port_mapping->lane / port_mapping->width, 3270 mlxsw_sp->base_mac, 3271 sizeof(mlxsw_sp->base_mac)); 3272 if (err) { 3273 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3274 local_port); 3275 return err; 3276 } 3277 3278 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3279 if (!dev) { 3280 err = -ENOMEM; 3281 goto err_alloc_etherdev; 3282 } 3283 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3284 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 3285 mlxsw_sp_port = netdev_priv(dev); 3286 mlxsw_sp_port->dev = dev; 3287 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3288 mlxsw_sp_port->local_port = local_port; 3289 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3290 mlxsw_sp_port->split = split; 3291 mlxsw_sp_port->split_base_local_port = split_base_local_port; 3292 mlxsw_sp_port->mapping = *port_mapping; 3293 mlxsw_sp_port->link.autoneg = 1; 3294 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3295 3296 mlxsw_sp_port->pcpu_stats = 3297 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3298 if (!mlxsw_sp_port->pcpu_stats) { 3299 err = -ENOMEM; 3300 goto err_alloc_stats; 3301 } 3302 3303 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3304 &update_stats_cache); 3305 3306 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3307 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3308 3309 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 3310 if (err) { 3311 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3312 mlxsw_sp_port->local_port); 3313 goto err_port_module_map; 3314 } 3315 3316 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3317 if (err) { 3318 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3319 mlxsw_sp_port->local_port); 3320 goto err_port_swid_set; 3321 } 3322 3323 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3324 if (err) { 3325 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3326 mlxsw_sp_port->local_port); 3327 goto err_dev_addr_init; 3328 } 3329 3330 netif_carrier_off(dev); 3331 3332 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3333 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3334 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3335 3336 dev->min_mtu = 0; 3337 dev->max_mtu = ETH_MAX_MTU; 3338 3339 /* Each packet needs to have a Tx header (metadata) on top all other 3340 * headers. 3341 */ 3342 dev->needed_headroom = MLXSW_TXHDR_LEN; 3343 3344 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3345 if (err) { 3346 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3347 mlxsw_sp_port->local_port); 3348 goto err_port_system_port_mapping_set; 3349 } 3350 3351 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 3352 if (err) { 3353 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3354 mlxsw_sp_port->local_port); 3355 goto err_port_speed_by_width_set; 3356 } 3357 3358 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3359 if (err) { 3360 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3361 mlxsw_sp_port->local_port); 3362 goto err_port_mtu_set; 3363 } 3364 3365 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3366 if (err) 3367 goto err_port_admin_status_set; 3368 3369 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3370 if (err) { 3371 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3372 mlxsw_sp_port->local_port); 3373 goto err_port_buffers_init; 3374 } 3375 3376 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3377 if (err) { 3378 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3379 mlxsw_sp_port->local_port); 3380 goto err_port_ets_init; 3381 } 3382 3383 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3384 if (err) { 3385 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3386 mlxsw_sp_port->local_port); 3387 goto err_port_tc_mc_mode; 3388 } 3389 3390 /* ETS and buffers must be initialized before DCB. */ 3391 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3392 if (err) { 3393 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3394 mlxsw_sp_port->local_port); 3395 goto err_port_dcb_init; 3396 } 3397 3398 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3399 if (err) { 3400 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3401 mlxsw_sp_port->local_port); 3402 goto err_port_fids_init; 3403 } 3404 3405 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3406 if (err) { 3407 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3408 mlxsw_sp_port->local_port); 3409 goto err_port_qdiscs_init; 3410 } 3411 3412 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3413 false); 3414 if (err) { 3415 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3416 mlxsw_sp_port->local_port); 3417 goto err_port_vlan_clear; 3418 } 3419 3420 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3421 if (err) { 3422 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3423 mlxsw_sp_port->local_port); 3424 goto err_port_nve_init; 3425 } 3426 3427 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3428 if (err) { 3429 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3430 mlxsw_sp_port->local_port); 3431 goto err_port_pvid_set; 3432 } 3433 3434 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3435 MLXSW_SP_DEFAULT_VID); 3436 if (IS_ERR(mlxsw_sp_port_vlan)) { 3437 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3438 mlxsw_sp_port->local_port); 3439 err = PTR_ERR(mlxsw_sp_port_vlan); 3440 goto err_port_vlan_create; 3441 } 3442 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3443 3444 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3445 mlxsw_sp->ptp_ops->shaper_work); 3446 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw, 3447 mlxsw_sp_span_speed_update_work); 3448 3449 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3450 err = register_netdev(dev); 3451 if (err) { 3452 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3453 mlxsw_sp_port->local_port); 3454 goto err_register_netdev; 3455 } 3456 3457 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3458 mlxsw_sp_port, dev); 3459 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3460 return 0; 3461 3462 err_register_netdev: 3463 mlxsw_sp->ports[local_port] = NULL; 3464 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3465 err_port_vlan_create: 3466 err_port_pvid_set: 3467 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3468 err_port_nve_init: 3469 err_port_vlan_clear: 3470 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3471 err_port_qdiscs_init: 3472 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3473 err_port_fids_init: 3474 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3475 err_port_dcb_init: 3476 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3477 err_port_tc_mc_mode: 3478 err_port_ets_init: 3479 err_port_buffers_init: 3480 err_port_admin_status_set: 3481 err_port_mtu_set: 3482 err_port_speed_by_width_set: 3483 err_port_system_port_mapping_set: 3484 err_dev_addr_init: 3485 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3486 err_port_swid_set: 3487 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3488 err_port_module_map: 3489 free_percpu(mlxsw_sp_port->pcpu_stats); 3490 err_alloc_stats: 3491 free_netdev(dev); 3492 err_alloc_etherdev: 3493 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3494 return err; 3495 } 3496 3497 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3498 { 3499 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3500 3501 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3502 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw); 3503 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3504 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3505 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3506 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3507 mlxsw_sp->ports[local_port] = NULL; 3508 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3509 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3510 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3511 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3512 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3513 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3514 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3515 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3516 free_percpu(mlxsw_sp_port->pcpu_stats); 3517 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3518 free_netdev(mlxsw_sp_port->dev); 3519 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3520 } 3521 3522 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3523 { 3524 struct mlxsw_sp_port *mlxsw_sp_port; 3525 int err; 3526 3527 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3528 if (!mlxsw_sp_port) 3529 return -ENOMEM; 3530 3531 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3532 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3533 3534 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3535 mlxsw_sp_port, 3536 mlxsw_sp->base_mac, 3537 sizeof(mlxsw_sp->base_mac)); 3538 if (err) { 3539 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3540 goto err_core_cpu_port_init; 3541 } 3542 3543 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3544 return 0; 3545 3546 err_core_cpu_port_init: 3547 kfree(mlxsw_sp_port); 3548 return err; 3549 } 3550 3551 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3552 { 3553 struct mlxsw_sp_port *mlxsw_sp_port = 3554 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3555 3556 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3557 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3558 kfree(mlxsw_sp_port); 3559 } 3560 3561 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3562 { 3563 return mlxsw_sp->ports[local_port] != NULL; 3564 } 3565 3566 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3567 { 3568 int i; 3569 3570 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3571 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3572 mlxsw_sp_port_remove(mlxsw_sp, i); 3573 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3574 kfree(mlxsw_sp->ports); 3575 mlxsw_sp->ports = NULL; 3576 } 3577 3578 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3579 { 3580 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3581 struct mlxsw_sp_port_mapping *port_mapping; 3582 size_t alloc_size; 3583 int i; 3584 int err; 3585 3586 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3587 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3588 if (!mlxsw_sp->ports) 3589 return -ENOMEM; 3590 3591 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 3592 if (err) 3593 goto err_cpu_port_create; 3594 3595 for (i = 1; i < max_ports; i++) { 3596 port_mapping = mlxsw_sp->port_mapping[i]; 3597 if (!port_mapping) 3598 continue; 3599 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 3600 if (err) 3601 goto err_port_create; 3602 } 3603 return 0; 3604 3605 err_port_create: 3606 for (i--; i >= 1; i--) 3607 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3608 mlxsw_sp_port_remove(mlxsw_sp, i); 3609 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3610 err_cpu_port_create: 3611 kfree(mlxsw_sp->ports); 3612 mlxsw_sp->ports = NULL; 3613 return err; 3614 } 3615 3616 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 3617 { 3618 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3619 struct mlxsw_sp_port_mapping port_mapping; 3620 int i; 3621 int err; 3622 3623 mlxsw_sp->port_mapping = kcalloc(max_ports, 3624 sizeof(struct mlxsw_sp_port_mapping *), 3625 GFP_KERNEL); 3626 if (!mlxsw_sp->port_mapping) 3627 return -ENOMEM; 3628 3629 for (i = 1; i < max_ports; i++) { 3630 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 3631 if (err) 3632 goto err_port_module_info_get; 3633 if (!port_mapping.width) 3634 continue; 3635 3636 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 3637 sizeof(port_mapping), 3638 GFP_KERNEL); 3639 if (!mlxsw_sp->port_mapping[i]) { 3640 err = -ENOMEM; 3641 goto err_port_module_info_dup; 3642 } 3643 } 3644 return 0; 3645 3646 err_port_module_info_get: 3647 err_port_module_info_dup: 3648 for (i--; i >= 1; i--) 3649 kfree(mlxsw_sp->port_mapping[i]); 3650 kfree(mlxsw_sp->port_mapping); 3651 return err; 3652 } 3653 3654 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 3655 { 3656 int i; 3657 3658 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3659 kfree(mlxsw_sp->port_mapping[i]); 3660 kfree(mlxsw_sp->port_mapping); 3661 } 3662 3663 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 3664 { 3665 u8 offset = (local_port - 1) % max_width; 3666 3667 return local_port - offset; 3668 } 3669 3670 static int 3671 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3672 struct mlxsw_sp_port_mapping *port_mapping, 3673 unsigned int count, u8 offset) 3674 { 3675 struct mlxsw_sp_port_mapping split_port_mapping; 3676 int err, i; 3677 3678 split_port_mapping = *port_mapping; 3679 split_port_mapping.width /= count; 3680 for (i = 0; i < count; i++) { 3681 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3682 base_port, &split_port_mapping); 3683 if (err) 3684 goto err_port_create; 3685 split_port_mapping.lane += split_port_mapping.width; 3686 } 3687 3688 return 0; 3689 3690 err_port_create: 3691 for (i--; i >= 0; i--) 3692 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3693 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3694 return err; 3695 } 3696 3697 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3698 u8 base_port, 3699 unsigned int count, u8 offset) 3700 { 3701 struct mlxsw_sp_port_mapping *port_mapping; 3702 int i; 3703 3704 /* Go over original unsplit ports in the gap and recreate them. */ 3705 for (i = 0; i < count * offset; i++) { 3706 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 3707 if (!port_mapping) 3708 continue; 3709 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 3710 } 3711 } 3712 3713 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 3714 unsigned int count, 3715 unsigned int max_width) 3716 { 3717 enum mlxsw_res_id local_ports_in_x_res_id; 3718 int split_width = max_width / count; 3719 3720 if (split_width == 1) 3721 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 3722 else if (split_width == 2) 3723 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 3724 else if (split_width == 4) 3725 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 3726 else 3727 return -EINVAL; 3728 3729 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 3730 return -EINVAL; 3731 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 3732 } 3733 3734 static struct mlxsw_sp_port * 3735 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3736 { 3737 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 3738 return mlxsw_sp->ports[local_port]; 3739 return NULL; 3740 } 3741 3742 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3743 unsigned int count, 3744 struct netlink_ext_ack *extack) 3745 { 3746 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3747 struct mlxsw_sp_port_mapping port_mapping; 3748 struct mlxsw_sp_port *mlxsw_sp_port; 3749 int max_width; 3750 u8 base_port; 3751 int offset; 3752 int i; 3753 int err; 3754 3755 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 3756 if (!mlxsw_sp_port) { 3757 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3758 local_port); 3759 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3760 return -EINVAL; 3761 } 3762 3763 /* Split ports cannot be split. */ 3764 if (mlxsw_sp_port->split) { 3765 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3766 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3767 return -EINVAL; 3768 } 3769 3770 max_width = mlxsw_core_module_max_width(mlxsw_core, 3771 mlxsw_sp_port->mapping.module); 3772 if (max_width < 0) { 3773 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 3774 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 3775 return max_width; 3776 } 3777 3778 /* Split port with non-max and 1 module width cannot be split. */ 3779 if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { 3780 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 3781 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 3782 return -EINVAL; 3783 } 3784 3785 if (count == 1 || !is_power_of_2(count) || count > max_width) { 3786 netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); 3787 NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); 3788 return -EINVAL; 3789 } 3790 3791 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 3792 if (offset < 0) { 3793 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 3794 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 3795 return -EINVAL; 3796 } 3797 3798 /* Only in case max split is being done, the local port and 3799 * base port may differ. 3800 */ 3801 base_port = count == max_width ? 3802 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 3803 local_port; 3804 3805 for (i = 0; i < count * offset; i++) { 3806 /* Expect base port to exist and also the one in the middle in 3807 * case of maximal split count. 3808 */ 3809 if (i == 0 || (count == max_width && i == count / 2)) 3810 continue; 3811 3812 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 3813 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3814 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3815 return -EINVAL; 3816 } 3817 } 3818 3819 port_mapping = mlxsw_sp_port->mapping; 3820 3821 for (i = 0; i < count; i++) 3822 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3823 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3824 3825 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 3826 count, offset); 3827 if (err) { 3828 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3829 goto err_port_split_create; 3830 } 3831 3832 return 0; 3833 3834 err_port_split_create: 3835 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 3836 return err; 3837 } 3838 3839 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3840 struct netlink_ext_ack *extack) 3841 { 3842 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3843 struct mlxsw_sp_port *mlxsw_sp_port; 3844 unsigned int count; 3845 int max_width; 3846 u8 base_port; 3847 int offset; 3848 int i; 3849 3850 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 3851 if (!mlxsw_sp_port) { 3852 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3853 local_port); 3854 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3855 return -EINVAL; 3856 } 3857 3858 if (!mlxsw_sp_port->split) { 3859 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3860 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3861 return -EINVAL; 3862 } 3863 3864 max_width = mlxsw_core_module_max_width(mlxsw_core, 3865 mlxsw_sp_port->mapping.module); 3866 if (max_width < 0) { 3867 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 3868 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 3869 return max_width; 3870 } 3871 3872 count = max_width / mlxsw_sp_port->mapping.width; 3873 3874 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 3875 if (WARN_ON(offset < 0)) { 3876 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 3877 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 3878 return -EINVAL; 3879 } 3880 3881 base_port = mlxsw_sp_port->split_base_local_port; 3882 3883 for (i = 0; i < count; i++) 3884 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3885 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3886 3887 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 3888 3889 return 0; 3890 } 3891 3892 static void 3893 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 3894 { 3895 int i; 3896 3897 for (i = 0; i < TC_MAX_QUEUE; i++) 3898 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 3899 } 3900 3901 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3902 char *pude_pl, void *priv) 3903 { 3904 struct mlxsw_sp *mlxsw_sp = priv; 3905 struct mlxsw_sp_port *mlxsw_sp_port; 3906 enum mlxsw_reg_pude_oper_status status; 3907 u8 local_port; 3908 3909 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3910 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3911 if (!mlxsw_sp_port) 3912 return; 3913 3914 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3915 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3916 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3917 netif_carrier_on(mlxsw_sp_port->dev); 3918 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 3919 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0); 3920 } else { 3921 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3922 netif_carrier_off(mlxsw_sp_port->dev); 3923 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 3924 } 3925 } 3926 3927 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 3928 char *mtpptr_pl, bool ingress) 3929 { 3930 u8 local_port; 3931 u8 num_rec; 3932 int i; 3933 3934 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 3935 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 3936 for (i = 0; i < num_rec; i++) { 3937 u8 domain_number; 3938 u8 message_type; 3939 u16 sequence_id; 3940 u64 timestamp; 3941 3942 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 3943 &domain_number, &sequence_id, 3944 ×tamp); 3945 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 3946 message_type, domain_number, 3947 sequence_id, timestamp); 3948 } 3949 } 3950 3951 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 3952 char *mtpptr_pl, void *priv) 3953 { 3954 struct mlxsw_sp *mlxsw_sp = priv; 3955 3956 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 3957 } 3958 3959 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 3960 char *mtpptr_pl, void *priv) 3961 { 3962 struct mlxsw_sp *mlxsw_sp = priv; 3963 3964 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 3965 } 3966 3967 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3968 u8 local_port, void *priv) 3969 { 3970 struct mlxsw_sp *mlxsw_sp = priv; 3971 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3972 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3973 3974 if (unlikely(!mlxsw_sp_port)) { 3975 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3976 local_port); 3977 return; 3978 } 3979 3980 skb->dev = mlxsw_sp_port->dev; 3981 3982 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3983 u64_stats_update_begin(&pcpu_stats->syncp); 3984 pcpu_stats->rx_packets++; 3985 pcpu_stats->rx_bytes += skb->len; 3986 u64_stats_update_end(&pcpu_stats->syncp); 3987 3988 skb->protocol = eth_type_trans(skb, skb->dev); 3989 netif_receive_skb(skb); 3990 } 3991 3992 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3993 void *priv) 3994 { 3995 skb->offload_fwd_mark = 1; 3996 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3997 } 3998 3999 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4000 u8 local_port, void *priv) 4001 { 4002 skb->offload_l3_fwd_mark = 1; 4003 skb->offload_fwd_mark = 1; 4004 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4005 } 4006 4007 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 4008 u8 local_port) 4009 { 4010 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4011 } 4012 4013 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 4014 u8 local_port) 4015 { 4016 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4017 struct mlxsw_sp_port_sample *sample; 4018 u32 size; 4019 4020 if (unlikely(!mlxsw_sp_port)) { 4021 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4022 local_port); 4023 goto out; 4024 } 4025 4026 rcu_read_lock(); 4027 sample = rcu_dereference(mlxsw_sp_port->sample); 4028 if (!sample) 4029 goto out_unlock; 4030 size = sample->truncate ? sample->trunc_size : skb->len; 4031 psample_sample_packet(sample->psample_group, skb, size, 4032 mlxsw_sp_port->dev->ifindex, 0, sample->rate); 4033 out_unlock: 4034 rcu_read_unlock(); 4035 out: 4036 consume_skb(skb); 4037 } 4038 4039 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4040 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4041 _is_ctrl, SP_##_trap_group, DISCARD) 4042 4043 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4044 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4045 _is_ctrl, SP_##_trap_group, DISCARD) 4046 4047 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4048 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4049 _is_ctrl, SP_##_trap_group, DISCARD) 4050 4051 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4052 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4053 4054 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4055 /* Events */ 4056 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4057 /* L2 traps */ 4058 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 4059 /* L3 traps */ 4060 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4061 false), 4062 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4063 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4064 false), 4065 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 4066 ROUTER_EXP, false), 4067 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 4068 ROUTER_EXP, false), 4069 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 4070 ROUTER_EXP, false), 4071 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 4072 ROUTER_EXP, false), 4073 /* Multicast Router Traps */ 4074 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4075 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4076 /* NVE traps */ 4077 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 4078 }; 4079 4080 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4081 /* Events */ 4082 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4083 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4084 }; 4085 4086 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4087 { 4088 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4089 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4090 enum mlxsw_reg_qpcr_ir_units ir_units; 4091 int max_cpu_policers; 4092 bool is_bytes; 4093 u8 burst_size; 4094 u32 rate; 4095 int i, err; 4096 4097 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4098 return -EIO; 4099 4100 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4101 4102 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4103 for (i = 0; i < max_cpu_policers; i++) { 4104 is_bytes = false; 4105 switch (i) { 4106 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4107 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4108 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 4109 rate = 1024; 4110 burst_size = 7; 4111 break; 4112 default: 4113 continue; 4114 } 4115 4116 __set_bit(i, mlxsw_sp->trap->policers_usage); 4117 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4118 burst_size); 4119 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4120 if (err) 4121 return err; 4122 } 4123 4124 return 0; 4125 } 4126 4127 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4128 { 4129 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4130 enum mlxsw_reg_htgt_trap_group i; 4131 int max_cpu_policers; 4132 int max_trap_groups; 4133 u8 priority, tc; 4134 u16 policer_id; 4135 int err; 4136 4137 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4138 return -EIO; 4139 4140 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4141 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4142 4143 for (i = 0; i < max_trap_groups; i++) { 4144 policer_id = i; 4145 switch (i) { 4146 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4147 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4148 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 4149 priority = 1; 4150 tc = 1; 4151 break; 4152 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4153 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4154 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4155 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4156 break; 4157 default: 4158 continue; 4159 } 4160 4161 if (max_cpu_policers <= policer_id && 4162 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4163 return -EIO; 4164 4165 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4166 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4167 if (err) 4168 return err; 4169 } 4170 4171 return 0; 4172 } 4173 4174 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4175 const struct mlxsw_listener listeners[], 4176 size_t listeners_count) 4177 { 4178 int i; 4179 int err; 4180 4181 for (i = 0; i < listeners_count; i++) { 4182 err = mlxsw_core_trap_register(mlxsw_sp->core, 4183 &listeners[i], 4184 mlxsw_sp); 4185 if (err) 4186 goto err_listener_register; 4187 4188 } 4189 return 0; 4190 4191 err_listener_register: 4192 for (i--; i >= 0; i--) { 4193 mlxsw_core_trap_unregister(mlxsw_sp->core, 4194 &listeners[i], 4195 mlxsw_sp); 4196 } 4197 return err; 4198 } 4199 4200 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4201 const struct mlxsw_listener listeners[], 4202 size_t listeners_count) 4203 { 4204 int i; 4205 4206 for (i = 0; i < listeners_count; i++) { 4207 mlxsw_core_trap_unregister(mlxsw_sp->core, 4208 &listeners[i], 4209 mlxsw_sp); 4210 } 4211 } 4212 4213 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4214 { 4215 struct mlxsw_sp_trap *trap; 4216 u64 max_policers; 4217 int err; 4218 4219 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 4220 return -EIO; 4221 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 4222 trap = kzalloc(struct_size(trap, policers_usage, 4223 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 4224 if (!trap) 4225 return -ENOMEM; 4226 trap->max_policers = max_policers; 4227 mlxsw_sp->trap = trap; 4228 4229 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4230 if (err) 4231 goto err_cpu_policers_set; 4232 4233 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4234 if (err) 4235 goto err_trap_groups_set; 4236 4237 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4238 ARRAY_SIZE(mlxsw_sp_listener)); 4239 if (err) 4240 goto err_traps_register; 4241 4242 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4243 mlxsw_sp->listeners_count); 4244 if (err) 4245 goto err_extra_traps_init; 4246 4247 return 0; 4248 4249 err_extra_traps_init: 4250 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4251 ARRAY_SIZE(mlxsw_sp_listener)); 4252 err_traps_register: 4253 err_trap_groups_set: 4254 err_cpu_policers_set: 4255 kfree(trap); 4256 return err; 4257 } 4258 4259 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4260 { 4261 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4262 mlxsw_sp->listeners_count); 4263 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4264 ARRAY_SIZE(mlxsw_sp_listener)); 4265 kfree(mlxsw_sp->trap); 4266 } 4267 4268 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4269 4270 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4271 { 4272 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4273 u32 seed; 4274 int err; 4275 4276 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4277 MLXSW_SP_LAG_SEED_INIT); 4278 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4279 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4280 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4281 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4282 MLXSW_REG_SLCR_LAG_HASH_SIP | 4283 MLXSW_REG_SLCR_LAG_HASH_DIP | 4284 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4285 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4286 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4287 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4288 if (err) 4289 return err; 4290 4291 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4292 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4293 return -EIO; 4294 4295 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4296 sizeof(struct mlxsw_sp_upper), 4297 GFP_KERNEL); 4298 if (!mlxsw_sp->lags) 4299 return -ENOMEM; 4300 4301 return 0; 4302 } 4303 4304 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4305 { 4306 kfree(mlxsw_sp->lags); 4307 } 4308 4309 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4310 { 4311 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4312 4313 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4314 MLXSW_REG_HTGT_INVALID_POLICER, 4315 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4316 MLXSW_REG_HTGT_DEFAULT_TC); 4317 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4318 } 4319 4320 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4321 .clock_init = mlxsw_sp1_ptp_clock_init, 4322 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4323 .init = mlxsw_sp1_ptp_init, 4324 .fini = mlxsw_sp1_ptp_fini, 4325 .receive = mlxsw_sp1_ptp_receive, 4326 .transmitted = mlxsw_sp1_ptp_transmitted, 4327 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4328 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4329 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4330 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4331 .get_stats_count = mlxsw_sp1_get_stats_count, 4332 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4333 .get_stats = mlxsw_sp1_get_stats, 4334 }; 4335 4336 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4337 .clock_init = mlxsw_sp2_ptp_clock_init, 4338 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4339 .init = mlxsw_sp2_ptp_init, 4340 .fini = mlxsw_sp2_ptp_fini, 4341 .receive = mlxsw_sp2_ptp_receive, 4342 .transmitted = mlxsw_sp2_ptp_transmitted, 4343 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4344 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4345 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4346 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4347 .get_stats_count = mlxsw_sp2_get_stats_count, 4348 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4349 .get_stats = mlxsw_sp2_get_stats, 4350 }; 4351 4352 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 4353 { 4354 return mtu * 5 / 2; 4355 } 4356 4357 static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 4358 .buffsize_get = mlxsw_sp1_span_buffsize_get, 4359 }; 4360 4361 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 4362 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 4363 4364 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 4365 { 4366 return 3 * mtu + buffer_factor * speed / 1000; 4367 } 4368 4369 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 4370 { 4371 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 4372 4373 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4374 } 4375 4376 static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 4377 .buffsize_get = mlxsw_sp2_span_buffsize_get, 4378 }; 4379 4380 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 4381 { 4382 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 4383 4384 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 4385 } 4386 4387 static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 4388 .buffsize_get = mlxsw_sp3_span_buffsize_get, 4389 }; 4390 4391 u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) 4392 { 4393 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 4394 4395 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 4396 } 4397 4398 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4399 unsigned long event, void *ptr); 4400 4401 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4402 const struct mlxsw_bus_info *mlxsw_bus_info, 4403 struct netlink_ext_ack *extack) 4404 { 4405 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4406 int err; 4407 4408 mlxsw_sp->core = mlxsw_core; 4409 mlxsw_sp->bus_info = mlxsw_bus_info; 4410 4411 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4412 if (err) 4413 return err; 4414 4415 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 4416 4417 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4418 if (err) { 4419 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4420 return err; 4421 } 4422 4423 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4424 if (err) { 4425 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4426 return err; 4427 } 4428 4429 err = mlxsw_sp_fids_init(mlxsw_sp); 4430 if (err) { 4431 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4432 goto err_fids_init; 4433 } 4434 4435 err = mlxsw_sp_traps_init(mlxsw_sp); 4436 if (err) { 4437 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4438 goto err_traps_init; 4439 } 4440 4441 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4442 if (err) { 4443 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4444 goto err_devlink_traps_init; 4445 } 4446 4447 err = mlxsw_sp_buffers_init(mlxsw_sp); 4448 if (err) { 4449 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4450 goto err_buffers_init; 4451 } 4452 4453 err = mlxsw_sp_lag_init(mlxsw_sp); 4454 if (err) { 4455 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4456 goto err_lag_init; 4457 } 4458 4459 /* Initialize SPAN before router and switchdev, so that those components 4460 * can call mlxsw_sp_span_respin(). 4461 */ 4462 err = mlxsw_sp_span_init(mlxsw_sp); 4463 if (err) { 4464 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4465 goto err_span_init; 4466 } 4467 4468 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4469 if (err) { 4470 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4471 goto err_switchdev_init; 4472 } 4473 4474 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4475 if (err) { 4476 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4477 goto err_counter_pool_init; 4478 } 4479 4480 err = mlxsw_sp_afa_init(mlxsw_sp); 4481 if (err) { 4482 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4483 goto err_afa_init; 4484 } 4485 4486 err = mlxsw_sp_nve_init(mlxsw_sp); 4487 if (err) { 4488 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4489 goto err_nve_init; 4490 } 4491 4492 err = mlxsw_sp_acl_init(mlxsw_sp); 4493 if (err) { 4494 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4495 goto err_acl_init; 4496 } 4497 4498 err = mlxsw_sp_router_init(mlxsw_sp, extack); 4499 if (err) { 4500 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4501 goto err_router_init; 4502 } 4503 4504 if (mlxsw_sp->bus_info->read_frc_capable) { 4505 /* NULL is a valid return value from clock_init */ 4506 mlxsw_sp->clock = 4507 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4508 mlxsw_sp->bus_info->dev); 4509 if (IS_ERR(mlxsw_sp->clock)) { 4510 err = PTR_ERR(mlxsw_sp->clock); 4511 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4512 goto err_ptp_clock_init; 4513 } 4514 } 4515 4516 if (mlxsw_sp->clock) { 4517 /* NULL is a valid return value from ptp_ops->init */ 4518 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4519 if (IS_ERR(mlxsw_sp->ptp_state)) { 4520 err = PTR_ERR(mlxsw_sp->ptp_state); 4521 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4522 goto err_ptp_init; 4523 } 4524 } 4525 4526 /* Initialize netdevice notifier after router and SPAN is initialized, 4527 * so that the event handler can use router structures and call SPAN 4528 * respin. 4529 */ 4530 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4531 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4532 &mlxsw_sp->netdevice_nb); 4533 if (err) { 4534 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4535 goto err_netdev_notifier; 4536 } 4537 4538 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4539 if (err) { 4540 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4541 goto err_dpipe_init; 4542 } 4543 4544 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 4545 if (err) { 4546 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 4547 goto err_port_module_info_init; 4548 } 4549 4550 err = mlxsw_sp_ports_create(mlxsw_sp); 4551 if (err) { 4552 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4553 goto err_ports_create; 4554 } 4555 4556 return 0; 4557 4558 err_ports_create: 4559 mlxsw_sp_port_module_info_fini(mlxsw_sp); 4560 err_port_module_info_init: 4561 mlxsw_sp_dpipe_fini(mlxsw_sp); 4562 err_dpipe_init: 4563 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4564 &mlxsw_sp->netdevice_nb); 4565 err_netdev_notifier: 4566 if (mlxsw_sp->clock) 4567 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4568 err_ptp_init: 4569 if (mlxsw_sp->clock) 4570 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4571 err_ptp_clock_init: 4572 mlxsw_sp_router_fini(mlxsw_sp); 4573 err_router_init: 4574 mlxsw_sp_acl_fini(mlxsw_sp); 4575 err_acl_init: 4576 mlxsw_sp_nve_fini(mlxsw_sp); 4577 err_nve_init: 4578 mlxsw_sp_afa_fini(mlxsw_sp); 4579 err_afa_init: 4580 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4581 err_counter_pool_init: 4582 mlxsw_sp_switchdev_fini(mlxsw_sp); 4583 err_switchdev_init: 4584 mlxsw_sp_span_fini(mlxsw_sp); 4585 err_span_init: 4586 mlxsw_sp_lag_fini(mlxsw_sp); 4587 err_lag_init: 4588 mlxsw_sp_buffers_fini(mlxsw_sp); 4589 err_buffers_init: 4590 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4591 err_devlink_traps_init: 4592 mlxsw_sp_traps_fini(mlxsw_sp); 4593 err_traps_init: 4594 mlxsw_sp_fids_fini(mlxsw_sp); 4595 err_fids_init: 4596 mlxsw_sp_kvdl_fini(mlxsw_sp); 4597 return err; 4598 } 4599 4600 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4601 const struct mlxsw_bus_info *mlxsw_bus_info, 4602 struct netlink_ext_ack *extack) 4603 { 4604 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4605 4606 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4607 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4608 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4609 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4610 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4611 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4612 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 4613 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4614 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4615 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4616 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4617 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4618 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4619 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4620 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 4621 mlxsw_sp->listeners = mlxsw_sp1_listener; 4622 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4623 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 4624 4625 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4626 } 4627 4628 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4629 const struct mlxsw_bus_info *mlxsw_bus_info, 4630 struct netlink_ext_ack *extack) 4631 { 4632 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4633 4634 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev; 4635 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME; 4636 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4637 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4638 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4639 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4640 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 4641 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4642 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4643 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4644 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4645 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4646 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4647 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4648 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 4649 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 4650 4651 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4652 } 4653 4654 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 4655 const struct mlxsw_bus_info *mlxsw_bus_info, 4656 struct netlink_ext_ack *extack) 4657 { 4658 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4659 4660 mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev; 4661 mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME; 4662 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4663 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4664 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4665 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4666 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 4667 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4668 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4669 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4670 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4671 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4672 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4673 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4674 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 4675 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 4676 4677 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 4678 } 4679 4680 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4681 { 4682 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4683 4684 mlxsw_sp_ports_remove(mlxsw_sp); 4685 mlxsw_sp_port_module_info_fini(mlxsw_sp); 4686 mlxsw_sp_dpipe_fini(mlxsw_sp); 4687 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 4688 &mlxsw_sp->netdevice_nb); 4689 if (mlxsw_sp->clock) { 4690 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4691 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4692 } 4693 mlxsw_sp_router_fini(mlxsw_sp); 4694 mlxsw_sp_acl_fini(mlxsw_sp); 4695 mlxsw_sp_nve_fini(mlxsw_sp); 4696 mlxsw_sp_afa_fini(mlxsw_sp); 4697 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4698 mlxsw_sp_switchdev_fini(mlxsw_sp); 4699 mlxsw_sp_span_fini(mlxsw_sp); 4700 mlxsw_sp_lag_fini(mlxsw_sp); 4701 mlxsw_sp_buffers_fini(mlxsw_sp); 4702 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4703 mlxsw_sp_traps_fini(mlxsw_sp); 4704 mlxsw_sp_fids_fini(mlxsw_sp); 4705 mlxsw_sp_kvdl_fini(mlxsw_sp); 4706 } 4707 4708 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4709 * 802.1Q FIDs 4710 */ 4711 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4712 VLAN_VID_MASK - 1) 4713 4714 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4715 .used_max_mid = 1, 4716 .max_mid = MLXSW_SP_MID_MAX, 4717 .used_flood_tables = 1, 4718 .used_flood_mode = 1, 4719 .flood_mode = 3, 4720 .max_fid_flood_tables = 3, 4721 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4722 .used_max_ib_mc = 1, 4723 .max_ib_mc = 0, 4724 .used_max_pkey = 1, 4725 .max_pkey = 0, 4726 .used_kvd_sizes = 1, 4727 .kvd_hash_single_parts = 59, 4728 .kvd_hash_double_parts = 41, 4729 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4730 .swid_config = { 4731 { 4732 .used_type = 1, 4733 .type = MLXSW_PORT_SWID_TYPE_ETH, 4734 } 4735 }, 4736 }; 4737 4738 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4739 .used_max_mid = 1, 4740 .max_mid = MLXSW_SP_MID_MAX, 4741 .used_flood_tables = 1, 4742 .used_flood_mode = 1, 4743 .flood_mode = 3, 4744 .max_fid_flood_tables = 3, 4745 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4746 .used_max_ib_mc = 1, 4747 .max_ib_mc = 0, 4748 .used_max_pkey = 1, 4749 .max_pkey = 0, 4750 .swid_config = { 4751 { 4752 .used_type = 1, 4753 .type = MLXSW_PORT_SWID_TYPE_ETH, 4754 } 4755 }, 4756 }; 4757 4758 static void 4759 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4760 struct devlink_resource_size_params *kvd_size_params, 4761 struct devlink_resource_size_params *linear_size_params, 4762 struct devlink_resource_size_params *hash_double_size_params, 4763 struct devlink_resource_size_params *hash_single_size_params) 4764 { 4765 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4766 KVD_SINGLE_MIN_SIZE); 4767 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4768 KVD_DOUBLE_MIN_SIZE); 4769 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4770 u32 linear_size_min = 0; 4771 4772 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4773 MLXSW_SP_KVD_GRANULARITY, 4774 DEVLINK_RESOURCE_UNIT_ENTRY); 4775 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4776 kvd_size - single_size_min - 4777 double_size_min, 4778 MLXSW_SP_KVD_GRANULARITY, 4779 DEVLINK_RESOURCE_UNIT_ENTRY); 4780 devlink_resource_size_params_init(hash_double_size_params, 4781 double_size_min, 4782 kvd_size - single_size_min - 4783 linear_size_min, 4784 MLXSW_SP_KVD_GRANULARITY, 4785 DEVLINK_RESOURCE_UNIT_ENTRY); 4786 devlink_resource_size_params_init(hash_single_size_params, 4787 single_size_min, 4788 kvd_size - double_size_min - 4789 linear_size_min, 4790 MLXSW_SP_KVD_GRANULARITY, 4791 DEVLINK_RESOURCE_UNIT_ENTRY); 4792 } 4793 4794 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4795 { 4796 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4797 struct devlink_resource_size_params hash_single_size_params; 4798 struct devlink_resource_size_params hash_double_size_params; 4799 struct devlink_resource_size_params linear_size_params; 4800 struct devlink_resource_size_params kvd_size_params; 4801 u32 kvd_size, single_size, double_size, linear_size; 4802 const struct mlxsw_config_profile *profile; 4803 int err; 4804 4805 profile = &mlxsw_sp1_config_profile; 4806 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4807 return -EIO; 4808 4809 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4810 &linear_size_params, 4811 &hash_double_size_params, 4812 &hash_single_size_params); 4813 4814 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4815 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4816 kvd_size, MLXSW_SP_RESOURCE_KVD, 4817 DEVLINK_RESOURCE_ID_PARENT_TOP, 4818 &kvd_size_params); 4819 if (err) 4820 return err; 4821 4822 linear_size = profile->kvd_linear_size; 4823 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4824 linear_size, 4825 MLXSW_SP_RESOURCE_KVD_LINEAR, 4826 MLXSW_SP_RESOURCE_KVD, 4827 &linear_size_params); 4828 if (err) 4829 return err; 4830 4831 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4832 if (err) 4833 return err; 4834 4835 double_size = kvd_size - linear_size; 4836 double_size *= profile->kvd_hash_double_parts; 4837 double_size /= profile->kvd_hash_double_parts + 4838 profile->kvd_hash_single_parts; 4839 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4840 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4841 double_size, 4842 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4843 MLXSW_SP_RESOURCE_KVD, 4844 &hash_double_size_params); 4845 if (err) 4846 return err; 4847 4848 single_size = kvd_size - double_size - linear_size; 4849 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4850 single_size, 4851 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4852 MLXSW_SP_RESOURCE_KVD, 4853 &hash_single_size_params); 4854 if (err) 4855 return err; 4856 4857 return 0; 4858 } 4859 4860 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4861 { 4862 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4863 struct devlink_resource_size_params kvd_size_params; 4864 u32 kvd_size; 4865 4866 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4867 return -EIO; 4868 4869 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4870 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 4871 MLXSW_SP_KVD_GRANULARITY, 4872 DEVLINK_RESOURCE_UNIT_ENTRY); 4873 4874 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4875 kvd_size, MLXSW_SP_RESOURCE_KVD, 4876 DEVLINK_RESOURCE_ID_PARENT_TOP, 4877 &kvd_size_params); 4878 } 4879 4880 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 4881 { 4882 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4883 struct devlink_resource_size_params span_size_params; 4884 u32 max_span; 4885 4886 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 4887 return -EIO; 4888 4889 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 4890 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 4891 1, DEVLINK_RESOURCE_UNIT_ENTRY); 4892 4893 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 4894 max_span, MLXSW_SP_RESOURCE_SPAN, 4895 DEVLINK_RESOURCE_ID_PARENT_TOP, 4896 &span_size_params); 4897 } 4898 4899 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4900 { 4901 int err; 4902 4903 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 4904 if (err) 4905 return err; 4906 4907 err = mlxsw_sp_resources_span_register(mlxsw_core); 4908 if (err) 4909 goto err_resources_span_register; 4910 4911 err = mlxsw_sp_counter_resources_register(mlxsw_core); 4912 if (err) 4913 goto err_resources_counter_register; 4914 4915 return 0; 4916 4917 err_resources_counter_register: 4918 err_resources_span_register: 4919 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 4920 return err; 4921 } 4922 4923 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4924 { 4925 int err; 4926 4927 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 4928 if (err) 4929 return err; 4930 4931 err = mlxsw_sp_resources_span_register(mlxsw_core); 4932 if (err) 4933 goto err_resources_span_register; 4934 4935 err = mlxsw_sp_counter_resources_register(mlxsw_core); 4936 if (err) 4937 goto err_resources_counter_register; 4938 4939 return 0; 4940 4941 err_resources_counter_register: 4942 err_resources_span_register: 4943 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 4944 return err; 4945 } 4946 4947 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4948 const struct mlxsw_config_profile *profile, 4949 u64 *p_single_size, u64 *p_double_size, 4950 u64 *p_linear_size) 4951 { 4952 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4953 u32 double_size; 4954 int err; 4955 4956 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4957 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4958 return -EIO; 4959 4960 /* The hash part is what left of the kvd without the 4961 * linear part. It is split to the single size and 4962 * double size by the parts ratio from the profile. 4963 * Both sizes must be a multiplications of the 4964 * granularity from the profile. In case the user 4965 * provided the sizes they are obtained via devlink. 4966 */ 4967 err = devlink_resource_size_get(devlink, 4968 MLXSW_SP_RESOURCE_KVD_LINEAR, 4969 p_linear_size); 4970 if (err) 4971 *p_linear_size = profile->kvd_linear_size; 4972 4973 err = devlink_resource_size_get(devlink, 4974 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4975 p_double_size); 4976 if (err) { 4977 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4978 *p_linear_size; 4979 double_size *= profile->kvd_hash_double_parts; 4980 double_size /= profile->kvd_hash_double_parts + 4981 profile->kvd_hash_single_parts; 4982 *p_double_size = rounddown(double_size, 4983 MLXSW_SP_KVD_GRANULARITY); 4984 } 4985 4986 err = devlink_resource_size_get(devlink, 4987 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4988 p_single_size); 4989 if (err) 4990 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4991 *p_double_size - *p_linear_size; 4992 4993 /* Check results are legal. */ 4994 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4995 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4996 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4997 return -EIO; 4998 4999 return 0; 5000 } 5001 5002 static int 5003 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5004 union devlink_param_value val, 5005 struct netlink_ext_ack *extack) 5006 { 5007 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5008 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5009 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5010 return -EINVAL; 5011 } 5012 5013 return 0; 5014 } 5015 5016 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5017 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5018 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5019 NULL, NULL, 5020 mlxsw_sp_devlink_param_fw_load_policy_validate), 5021 }; 5022 5023 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5024 { 5025 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5026 union devlink_param_value value; 5027 int err; 5028 5029 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5030 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5031 if (err) 5032 return err; 5033 5034 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5035 devlink_param_driverinit_value_set(devlink, 5036 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5037 value); 5038 return 0; 5039 } 5040 5041 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5042 { 5043 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5044 mlxsw_sp_devlink_params, 5045 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5046 } 5047 5048 static int 5049 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5050 struct devlink_param_gset_ctx *ctx) 5051 { 5052 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5053 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5054 5055 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5056 return 0; 5057 } 5058 5059 static int 5060 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5061 struct devlink_param_gset_ctx *ctx) 5062 { 5063 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5064 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5065 5066 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5067 } 5068 5069 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5070 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5071 "acl_region_rehash_interval", 5072 DEVLINK_PARAM_TYPE_U32, 5073 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5074 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5075 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5076 NULL), 5077 }; 5078 5079 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5080 { 5081 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5082 union devlink_param_value value; 5083 int err; 5084 5085 err = mlxsw_sp_params_register(mlxsw_core); 5086 if (err) 5087 return err; 5088 5089 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5090 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5091 if (err) 5092 goto err_devlink_params_register; 5093 5094 value.vu32 = 0; 5095 devlink_param_driverinit_value_set(devlink, 5096 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5097 value); 5098 return 0; 5099 5100 err_devlink_params_register: 5101 mlxsw_sp_params_unregister(mlxsw_core); 5102 return err; 5103 } 5104 5105 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5106 { 5107 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5108 mlxsw_sp2_devlink_params, 5109 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5110 mlxsw_sp_params_unregister(mlxsw_core); 5111 } 5112 5113 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5114 struct sk_buff *skb, u8 local_port) 5115 { 5116 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5117 5118 skb_pull(skb, MLXSW_TXHDR_LEN); 5119 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5120 } 5121 5122 static struct mlxsw_driver mlxsw_sp1_driver = { 5123 .kind = mlxsw_sp1_driver_name, 5124 .priv_size = sizeof(struct mlxsw_sp), 5125 .init = mlxsw_sp1_init, 5126 .fini = mlxsw_sp_fini, 5127 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5128 .port_split = mlxsw_sp_port_split, 5129 .port_unsplit = mlxsw_sp_port_unsplit, 5130 .sb_pool_get = mlxsw_sp_sb_pool_get, 5131 .sb_pool_set = mlxsw_sp_sb_pool_set, 5132 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5133 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5134 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5135 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5136 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5137 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5138 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5139 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5140 .flash_update = mlxsw_sp_flash_update, 5141 .trap_init = mlxsw_sp_trap_init, 5142 .trap_fini = mlxsw_sp_trap_fini, 5143 .trap_action_set = mlxsw_sp_trap_action_set, 5144 .trap_group_init = mlxsw_sp_trap_group_init, 5145 .trap_group_set = mlxsw_sp_trap_group_set, 5146 .trap_policer_init = mlxsw_sp_trap_policer_init, 5147 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5148 .trap_policer_set = mlxsw_sp_trap_policer_set, 5149 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5150 .txhdr_construct = mlxsw_sp_txhdr_construct, 5151 .resources_register = mlxsw_sp1_resources_register, 5152 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5153 .params_register = mlxsw_sp_params_register, 5154 .params_unregister = mlxsw_sp_params_unregister, 5155 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5156 .txhdr_len = MLXSW_TXHDR_LEN, 5157 .profile = &mlxsw_sp1_config_profile, 5158 .res_query_enabled = true, 5159 }; 5160 5161 static struct mlxsw_driver mlxsw_sp2_driver = { 5162 .kind = mlxsw_sp2_driver_name, 5163 .priv_size = sizeof(struct mlxsw_sp), 5164 .init = mlxsw_sp2_init, 5165 .fini = mlxsw_sp_fini, 5166 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5167 .port_split = mlxsw_sp_port_split, 5168 .port_unsplit = mlxsw_sp_port_unsplit, 5169 .sb_pool_get = mlxsw_sp_sb_pool_get, 5170 .sb_pool_set = mlxsw_sp_sb_pool_set, 5171 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5172 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5173 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5174 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5175 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5176 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5177 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5178 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5179 .flash_update = mlxsw_sp_flash_update, 5180 .trap_init = mlxsw_sp_trap_init, 5181 .trap_fini = mlxsw_sp_trap_fini, 5182 .trap_action_set = mlxsw_sp_trap_action_set, 5183 .trap_group_init = mlxsw_sp_trap_group_init, 5184 .trap_group_set = mlxsw_sp_trap_group_set, 5185 .trap_policer_init = mlxsw_sp_trap_policer_init, 5186 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5187 .trap_policer_set = mlxsw_sp_trap_policer_set, 5188 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5189 .txhdr_construct = mlxsw_sp_txhdr_construct, 5190 .resources_register = mlxsw_sp2_resources_register, 5191 .params_register = mlxsw_sp2_params_register, 5192 .params_unregister = mlxsw_sp2_params_unregister, 5193 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5194 .txhdr_len = MLXSW_TXHDR_LEN, 5195 .profile = &mlxsw_sp2_config_profile, 5196 .res_query_enabled = true, 5197 }; 5198 5199 static struct mlxsw_driver mlxsw_sp3_driver = { 5200 .kind = mlxsw_sp3_driver_name, 5201 .priv_size = sizeof(struct mlxsw_sp), 5202 .init = mlxsw_sp3_init, 5203 .fini = mlxsw_sp_fini, 5204 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5205 .port_split = mlxsw_sp_port_split, 5206 .port_unsplit = mlxsw_sp_port_unsplit, 5207 .sb_pool_get = mlxsw_sp_sb_pool_get, 5208 .sb_pool_set = mlxsw_sp_sb_pool_set, 5209 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5210 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5211 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5212 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5213 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5214 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5215 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5216 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5217 .flash_update = mlxsw_sp_flash_update, 5218 .trap_init = mlxsw_sp_trap_init, 5219 .trap_fini = mlxsw_sp_trap_fini, 5220 .trap_action_set = mlxsw_sp_trap_action_set, 5221 .trap_group_init = mlxsw_sp_trap_group_init, 5222 .trap_group_set = mlxsw_sp_trap_group_set, 5223 .trap_policer_init = mlxsw_sp_trap_policer_init, 5224 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 5225 .trap_policer_set = mlxsw_sp_trap_policer_set, 5226 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 5227 .txhdr_construct = mlxsw_sp_txhdr_construct, 5228 .resources_register = mlxsw_sp2_resources_register, 5229 .params_register = mlxsw_sp2_params_register, 5230 .params_unregister = mlxsw_sp2_params_unregister, 5231 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5232 .txhdr_len = MLXSW_TXHDR_LEN, 5233 .profile = &mlxsw_sp2_config_profile, 5234 .res_query_enabled = true, 5235 }; 5236 5237 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5238 { 5239 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5240 } 5241 5242 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5243 { 5244 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5245 int ret = 0; 5246 5247 if (mlxsw_sp_port_dev_check(lower_dev)) { 5248 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5249 ret = 1; 5250 } 5251 5252 return ret; 5253 } 5254 5255 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5256 { 5257 struct mlxsw_sp_port *mlxsw_sp_port; 5258 5259 if (mlxsw_sp_port_dev_check(dev)) 5260 return netdev_priv(dev); 5261 5262 mlxsw_sp_port = NULL; 5263 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5264 5265 return mlxsw_sp_port; 5266 } 5267 5268 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5269 { 5270 struct mlxsw_sp_port *mlxsw_sp_port; 5271 5272 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5273 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5274 } 5275 5276 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5277 { 5278 struct mlxsw_sp_port *mlxsw_sp_port; 5279 5280 if (mlxsw_sp_port_dev_check(dev)) 5281 return netdev_priv(dev); 5282 5283 mlxsw_sp_port = NULL; 5284 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5285 &mlxsw_sp_port); 5286 5287 return mlxsw_sp_port; 5288 } 5289 5290 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5291 { 5292 struct mlxsw_sp_port *mlxsw_sp_port; 5293 5294 rcu_read_lock(); 5295 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5296 if (mlxsw_sp_port) 5297 dev_hold(mlxsw_sp_port->dev); 5298 rcu_read_unlock(); 5299 return mlxsw_sp_port; 5300 } 5301 5302 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5303 { 5304 dev_put(mlxsw_sp_port->dev); 5305 } 5306 5307 static void 5308 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5309 struct net_device *lag_dev) 5310 { 5311 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5312 struct net_device *upper_dev; 5313 struct list_head *iter; 5314 5315 if (netif_is_bridge_port(lag_dev)) 5316 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5317 5318 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5319 if (!netif_is_bridge_port(upper_dev)) 5320 continue; 5321 br_dev = netdev_master_upper_dev_get(upper_dev); 5322 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5323 } 5324 } 5325 5326 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5327 { 5328 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5329 5330 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5332 } 5333 5334 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5335 { 5336 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5337 5338 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5339 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5340 } 5341 5342 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5343 u16 lag_id, u8 port_index) 5344 { 5345 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5346 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5347 5348 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5349 lag_id, port_index); 5350 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5351 } 5352 5353 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5354 u16 lag_id) 5355 { 5356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5357 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5358 5359 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5360 lag_id); 5361 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5362 } 5363 5364 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5365 u16 lag_id) 5366 { 5367 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5368 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5369 5370 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5371 lag_id); 5372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5373 } 5374 5375 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5376 u16 lag_id) 5377 { 5378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5379 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5380 5381 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5382 lag_id); 5383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5384 } 5385 5386 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5387 struct net_device *lag_dev, 5388 u16 *p_lag_id) 5389 { 5390 struct mlxsw_sp_upper *lag; 5391 int free_lag_id = -1; 5392 u64 max_lag; 5393 int i; 5394 5395 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5396 for (i = 0; i < max_lag; i++) { 5397 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5398 if (lag->ref_count) { 5399 if (lag->dev == lag_dev) { 5400 *p_lag_id = i; 5401 return 0; 5402 } 5403 } else if (free_lag_id < 0) { 5404 free_lag_id = i; 5405 } 5406 } 5407 if (free_lag_id < 0) 5408 return -EBUSY; 5409 *p_lag_id = free_lag_id; 5410 return 0; 5411 } 5412 5413 static bool 5414 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5415 struct net_device *lag_dev, 5416 struct netdev_lag_upper_info *lag_upper_info, 5417 struct netlink_ext_ack *extack) 5418 { 5419 u16 lag_id; 5420 5421 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5422 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5423 return false; 5424 } 5425 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5426 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5427 return false; 5428 } 5429 return true; 5430 } 5431 5432 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5433 u16 lag_id, u8 *p_port_index) 5434 { 5435 u64 max_lag_members; 5436 int i; 5437 5438 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5439 MAX_LAG_MEMBERS); 5440 for (i = 0; i < max_lag_members; i++) { 5441 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5442 *p_port_index = i; 5443 return 0; 5444 } 5445 } 5446 return -EBUSY; 5447 } 5448 5449 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5450 struct net_device *lag_dev) 5451 { 5452 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5453 struct mlxsw_sp_upper *lag; 5454 u16 lag_id; 5455 u8 port_index; 5456 int err; 5457 5458 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5459 if (err) 5460 return err; 5461 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5462 if (!lag->ref_count) { 5463 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5464 if (err) 5465 return err; 5466 lag->dev = lag_dev; 5467 } 5468 5469 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5470 if (err) 5471 return err; 5472 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5473 if (err) 5474 goto err_col_port_add; 5475 5476 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5477 mlxsw_sp_port->local_port); 5478 mlxsw_sp_port->lag_id = lag_id; 5479 mlxsw_sp_port->lagged = 1; 5480 lag->ref_count++; 5481 5482 /* Port is no longer usable as a router interface */ 5483 if (mlxsw_sp_port->default_vlan->fid) 5484 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5485 5486 return 0; 5487 5488 err_col_port_add: 5489 if (!lag->ref_count) 5490 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5491 return err; 5492 } 5493 5494 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5495 struct net_device *lag_dev) 5496 { 5497 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5498 u16 lag_id = mlxsw_sp_port->lag_id; 5499 struct mlxsw_sp_upper *lag; 5500 5501 if (!mlxsw_sp_port->lagged) 5502 return; 5503 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5504 WARN_ON(lag->ref_count == 0); 5505 5506 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5507 5508 /* Any VLANs configured on the port are no longer valid */ 5509 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5510 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5511 /* Make the LAG and its directly linked uppers leave bridges they 5512 * are memeber in 5513 */ 5514 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5515 5516 if (lag->ref_count == 1) 5517 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5518 5519 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5520 mlxsw_sp_port->local_port); 5521 mlxsw_sp_port->lagged = 0; 5522 lag->ref_count--; 5523 5524 /* Make sure untagged frames are allowed to ingress */ 5525 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5526 } 5527 5528 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5529 u16 lag_id) 5530 { 5531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5532 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5533 5534 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5535 mlxsw_sp_port->local_port); 5536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5537 } 5538 5539 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5540 u16 lag_id) 5541 { 5542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5543 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5544 5545 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5546 mlxsw_sp_port->local_port); 5547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5548 } 5549 5550 static int 5551 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5552 { 5553 int err; 5554 5555 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5556 mlxsw_sp_port->lag_id); 5557 if (err) 5558 return err; 5559 5560 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5561 if (err) 5562 goto err_dist_port_add; 5563 5564 return 0; 5565 5566 err_dist_port_add: 5567 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5568 return err; 5569 } 5570 5571 static int 5572 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5573 { 5574 int err; 5575 5576 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5577 mlxsw_sp_port->lag_id); 5578 if (err) 5579 return err; 5580 5581 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5582 mlxsw_sp_port->lag_id); 5583 if (err) 5584 goto err_col_port_disable; 5585 5586 return 0; 5587 5588 err_col_port_disable: 5589 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5590 return err; 5591 } 5592 5593 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5594 struct netdev_lag_lower_state_info *info) 5595 { 5596 if (info->tx_enabled) 5597 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5598 else 5599 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5600 } 5601 5602 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5603 bool enable) 5604 { 5605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5606 enum mlxsw_reg_spms_state spms_state; 5607 char *spms_pl; 5608 u16 vid; 5609 int err; 5610 5611 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5612 MLXSW_REG_SPMS_STATE_DISCARDING; 5613 5614 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5615 if (!spms_pl) 5616 return -ENOMEM; 5617 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5618 5619 for (vid = 0; vid < VLAN_N_VID; vid++) 5620 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5621 5622 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5623 kfree(spms_pl); 5624 return err; 5625 } 5626 5627 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5628 { 5629 u16 vid = 1; 5630 int err; 5631 5632 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5633 if (err) 5634 return err; 5635 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5636 if (err) 5637 goto err_port_stp_set; 5638 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5639 true, false); 5640 if (err) 5641 goto err_port_vlan_set; 5642 5643 for (; vid <= VLAN_N_VID - 1; vid++) { 5644 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5645 vid, false); 5646 if (err) 5647 goto err_vid_learning_set; 5648 } 5649 5650 return 0; 5651 5652 err_vid_learning_set: 5653 for (vid--; vid >= 1; vid--) 5654 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5655 err_port_vlan_set: 5656 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5657 err_port_stp_set: 5658 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5659 return err; 5660 } 5661 5662 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5663 { 5664 u16 vid; 5665 5666 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5667 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5668 vid, true); 5669 5670 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5671 false, false); 5672 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5673 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5674 } 5675 5676 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5677 { 5678 unsigned int num_vxlans = 0; 5679 struct net_device *dev; 5680 struct list_head *iter; 5681 5682 netdev_for_each_lower_dev(br_dev, dev, iter) { 5683 if (netif_is_vxlan(dev)) 5684 num_vxlans++; 5685 } 5686 5687 return num_vxlans > 1; 5688 } 5689 5690 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5691 { 5692 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5693 struct net_device *dev; 5694 struct list_head *iter; 5695 5696 netdev_for_each_lower_dev(br_dev, dev, iter) { 5697 u16 pvid; 5698 int err; 5699 5700 if (!netif_is_vxlan(dev)) 5701 continue; 5702 5703 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5704 if (err || !pvid) 5705 continue; 5706 5707 if (test_and_set_bit(pvid, vlans)) 5708 return false; 5709 } 5710 5711 return true; 5712 } 5713 5714 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5715 struct netlink_ext_ack *extack) 5716 { 5717 if (br_multicast_enabled(br_dev)) { 5718 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5719 return false; 5720 } 5721 5722 if (!br_vlan_enabled(br_dev) && 5723 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5724 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5725 return false; 5726 } 5727 5728 if (br_vlan_enabled(br_dev) && 5729 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5730 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5731 return false; 5732 } 5733 5734 return true; 5735 } 5736 5737 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5738 struct net_device *dev, 5739 unsigned long event, void *ptr) 5740 { 5741 struct netdev_notifier_changeupper_info *info; 5742 struct mlxsw_sp_port *mlxsw_sp_port; 5743 struct netlink_ext_ack *extack; 5744 struct net_device *upper_dev; 5745 struct mlxsw_sp *mlxsw_sp; 5746 int err = 0; 5747 5748 mlxsw_sp_port = netdev_priv(dev); 5749 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5750 info = ptr; 5751 extack = netdev_notifier_info_to_extack(&info->info); 5752 5753 switch (event) { 5754 case NETDEV_PRECHANGEUPPER: 5755 upper_dev = info->upper_dev; 5756 if (!is_vlan_dev(upper_dev) && 5757 !netif_is_lag_master(upper_dev) && 5758 !netif_is_bridge_master(upper_dev) && 5759 !netif_is_ovs_master(upper_dev) && 5760 !netif_is_macvlan(upper_dev)) { 5761 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5762 return -EINVAL; 5763 } 5764 if (!info->linking) 5765 break; 5766 if (netif_is_bridge_master(upper_dev) && 5767 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5768 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5769 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5770 return -EOPNOTSUPP; 5771 if (netdev_has_any_upper_dev(upper_dev) && 5772 (!netif_is_bridge_master(upper_dev) || 5773 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5774 upper_dev))) { 5775 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5776 return -EINVAL; 5777 } 5778 if (netif_is_lag_master(upper_dev) && 5779 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5780 info->upper_info, extack)) 5781 return -EINVAL; 5782 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5783 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5784 return -EINVAL; 5785 } 5786 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5787 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5788 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5789 return -EINVAL; 5790 } 5791 if (netif_is_macvlan(upper_dev) && 5792 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 5793 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5794 return -EOPNOTSUPP; 5795 } 5796 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5797 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5798 return -EINVAL; 5799 } 5800 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5801 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5802 return -EINVAL; 5803 } 5804 break; 5805 case NETDEV_CHANGEUPPER: 5806 upper_dev = info->upper_dev; 5807 if (netif_is_bridge_master(upper_dev)) { 5808 if (info->linking) 5809 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5810 lower_dev, 5811 upper_dev, 5812 extack); 5813 else 5814 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5815 lower_dev, 5816 upper_dev); 5817 } else if (netif_is_lag_master(upper_dev)) { 5818 if (info->linking) { 5819 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5820 upper_dev); 5821 } else { 5822 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5823 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5824 upper_dev); 5825 } 5826 } else if (netif_is_ovs_master(upper_dev)) { 5827 if (info->linking) 5828 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5829 else 5830 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5831 } else if (netif_is_macvlan(upper_dev)) { 5832 if (!info->linking) 5833 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5834 } else if (is_vlan_dev(upper_dev)) { 5835 struct net_device *br_dev; 5836 5837 if (!netif_is_bridge_port(upper_dev)) 5838 break; 5839 if (info->linking) 5840 break; 5841 br_dev = netdev_master_upper_dev_get(upper_dev); 5842 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5843 br_dev); 5844 } 5845 break; 5846 } 5847 5848 return err; 5849 } 5850 5851 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5852 unsigned long event, void *ptr) 5853 { 5854 struct netdev_notifier_changelowerstate_info *info; 5855 struct mlxsw_sp_port *mlxsw_sp_port; 5856 int err; 5857 5858 mlxsw_sp_port = netdev_priv(dev); 5859 info = ptr; 5860 5861 switch (event) { 5862 case NETDEV_CHANGELOWERSTATE: 5863 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5864 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5865 info->lower_state_info); 5866 if (err) 5867 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5868 } 5869 break; 5870 } 5871 5872 return 0; 5873 } 5874 5875 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5876 struct net_device *port_dev, 5877 unsigned long event, void *ptr) 5878 { 5879 switch (event) { 5880 case NETDEV_PRECHANGEUPPER: 5881 case NETDEV_CHANGEUPPER: 5882 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5883 event, ptr); 5884 case NETDEV_CHANGELOWERSTATE: 5885 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5886 ptr); 5887 } 5888 5889 return 0; 5890 } 5891 5892 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5893 unsigned long event, void *ptr) 5894 { 5895 struct net_device *dev; 5896 struct list_head *iter; 5897 int ret; 5898 5899 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5900 if (mlxsw_sp_port_dev_check(dev)) { 5901 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5902 ptr); 5903 if (ret) 5904 return ret; 5905 } 5906 } 5907 5908 return 0; 5909 } 5910 5911 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5912 struct net_device *dev, 5913 unsigned long event, void *ptr, 5914 u16 vid) 5915 { 5916 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5917 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5918 struct netdev_notifier_changeupper_info *info = ptr; 5919 struct netlink_ext_ack *extack; 5920 struct net_device *upper_dev; 5921 int err = 0; 5922 5923 extack = netdev_notifier_info_to_extack(&info->info); 5924 5925 switch (event) { 5926 case NETDEV_PRECHANGEUPPER: 5927 upper_dev = info->upper_dev; 5928 if (!netif_is_bridge_master(upper_dev) && 5929 !netif_is_macvlan(upper_dev)) { 5930 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5931 return -EINVAL; 5932 } 5933 if (!info->linking) 5934 break; 5935 if (netif_is_bridge_master(upper_dev) && 5936 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5937 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5938 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5939 return -EOPNOTSUPP; 5940 if (netdev_has_any_upper_dev(upper_dev) && 5941 (!netif_is_bridge_master(upper_dev) || 5942 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5943 upper_dev))) { 5944 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5945 return -EINVAL; 5946 } 5947 if (netif_is_macvlan(upper_dev) && 5948 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 5949 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5950 return -EOPNOTSUPP; 5951 } 5952 break; 5953 case NETDEV_CHANGEUPPER: 5954 upper_dev = info->upper_dev; 5955 if (netif_is_bridge_master(upper_dev)) { 5956 if (info->linking) 5957 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5958 vlan_dev, 5959 upper_dev, 5960 extack); 5961 else 5962 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5963 vlan_dev, 5964 upper_dev); 5965 } else if (netif_is_macvlan(upper_dev)) { 5966 if (!info->linking) 5967 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5968 } else { 5969 err = -EINVAL; 5970 WARN_ON(1); 5971 } 5972 break; 5973 } 5974 5975 return err; 5976 } 5977 5978 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5979 struct net_device *lag_dev, 5980 unsigned long event, 5981 void *ptr, u16 vid) 5982 { 5983 struct net_device *dev; 5984 struct list_head *iter; 5985 int ret; 5986 5987 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5988 if (mlxsw_sp_port_dev_check(dev)) { 5989 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5990 event, ptr, 5991 vid); 5992 if (ret) 5993 return ret; 5994 } 5995 } 5996 5997 return 0; 5998 } 5999 6000 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6001 struct net_device *br_dev, 6002 unsigned long event, void *ptr, 6003 u16 vid) 6004 { 6005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6006 struct netdev_notifier_changeupper_info *info = ptr; 6007 struct netlink_ext_ack *extack; 6008 struct net_device *upper_dev; 6009 6010 if (!mlxsw_sp) 6011 return 0; 6012 6013 extack = netdev_notifier_info_to_extack(&info->info); 6014 6015 switch (event) { 6016 case NETDEV_PRECHANGEUPPER: 6017 upper_dev = info->upper_dev; 6018 if (!netif_is_macvlan(upper_dev)) { 6019 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6020 return -EOPNOTSUPP; 6021 } 6022 if (!info->linking) 6023 break; 6024 if (netif_is_macvlan(upper_dev) && 6025 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 6026 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6027 return -EOPNOTSUPP; 6028 } 6029 break; 6030 case NETDEV_CHANGEUPPER: 6031 upper_dev = info->upper_dev; 6032 if (info->linking) 6033 break; 6034 if (netif_is_macvlan(upper_dev)) 6035 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6036 break; 6037 } 6038 6039 return 0; 6040 } 6041 6042 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6043 unsigned long event, void *ptr) 6044 { 6045 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6046 u16 vid = vlan_dev_vlan_id(vlan_dev); 6047 6048 if (mlxsw_sp_port_dev_check(real_dev)) 6049 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6050 event, ptr, vid); 6051 else if (netif_is_lag_master(real_dev)) 6052 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6053 real_dev, event, 6054 ptr, vid); 6055 else if (netif_is_bridge_master(real_dev)) 6056 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6057 event, ptr, vid); 6058 6059 return 0; 6060 } 6061 6062 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6063 unsigned long event, void *ptr) 6064 { 6065 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6066 struct netdev_notifier_changeupper_info *info = ptr; 6067 struct netlink_ext_ack *extack; 6068 struct net_device *upper_dev; 6069 6070 if (!mlxsw_sp) 6071 return 0; 6072 6073 extack = netdev_notifier_info_to_extack(&info->info); 6074 6075 switch (event) { 6076 case NETDEV_PRECHANGEUPPER: 6077 upper_dev = info->upper_dev; 6078 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6079 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6080 return -EOPNOTSUPP; 6081 } 6082 if (!info->linking) 6083 break; 6084 if (netif_is_macvlan(upper_dev) && 6085 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 6086 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6087 return -EOPNOTSUPP; 6088 } 6089 break; 6090 case NETDEV_CHANGEUPPER: 6091 upper_dev = info->upper_dev; 6092 if (info->linking) 6093 break; 6094 if (is_vlan_dev(upper_dev)) 6095 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6096 if (netif_is_macvlan(upper_dev)) 6097 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6098 break; 6099 } 6100 6101 return 0; 6102 } 6103 6104 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6105 unsigned long event, void *ptr) 6106 { 6107 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6108 struct netdev_notifier_changeupper_info *info = ptr; 6109 struct netlink_ext_ack *extack; 6110 6111 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6112 return 0; 6113 6114 extack = netdev_notifier_info_to_extack(&info->info); 6115 6116 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6117 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6118 6119 return -EOPNOTSUPP; 6120 } 6121 6122 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6123 { 6124 struct netdev_notifier_changeupper_info *info = ptr; 6125 6126 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6127 return false; 6128 return netif_is_l3_master(info->upper_dev); 6129 } 6130 6131 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6132 struct net_device *dev, 6133 unsigned long event, void *ptr) 6134 { 6135 struct netdev_notifier_changeupper_info *cu_info; 6136 struct netdev_notifier_info *info = ptr; 6137 struct netlink_ext_ack *extack; 6138 struct net_device *upper_dev; 6139 6140 extack = netdev_notifier_info_to_extack(info); 6141 6142 switch (event) { 6143 case NETDEV_CHANGEUPPER: 6144 cu_info = container_of(info, 6145 struct netdev_notifier_changeupper_info, 6146 info); 6147 upper_dev = cu_info->upper_dev; 6148 if (!netif_is_bridge_master(upper_dev)) 6149 return 0; 6150 if (!mlxsw_sp_lower_get(upper_dev)) 6151 return 0; 6152 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6153 return -EOPNOTSUPP; 6154 if (cu_info->linking) { 6155 if (!netif_running(dev)) 6156 return 0; 6157 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6158 * device needs to be mapped to a VLAN, but at this 6159 * point no VLANs are configured on the VxLAN device 6160 */ 6161 if (br_vlan_enabled(upper_dev)) 6162 return 0; 6163 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6164 dev, 0, extack); 6165 } else { 6166 /* VLANs were already flushed, which triggered the 6167 * necessary cleanup 6168 */ 6169 if (br_vlan_enabled(upper_dev)) 6170 return 0; 6171 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6172 } 6173 break; 6174 case NETDEV_PRE_UP: 6175 upper_dev = netdev_master_upper_dev_get(dev); 6176 if (!upper_dev) 6177 return 0; 6178 if (!netif_is_bridge_master(upper_dev)) 6179 return 0; 6180 if (!mlxsw_sp_lower_get(upper_dev)) 6181 return 0; 6182 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6183 extack); 6184 case NETDEV_DOWN: 6185 upper_dev = netdev_master_upper_dev_get(dev); 6186 if (!upper_dev) 6187 return 0; 6188 if (!netif_is_bridge_master(upper_dev)) 6189 return 0; 6190 if (!mlxsw_sp_lower_get(upper_dev)) 6191 return 0; 6192 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6193 break; 6194 } 6195 6196 return 0; 6197 } 6198 6199 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6200 unsigned long event, void *ptr) 6201 { 6202 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6203 struct mlxsw_sp_span_entry *span_entry; 6204 struct mlxsw_sp *mlxsw_sp; 6205 int err = 0; 6206 6207 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6208 if (event == NETDEV_UNREGISTER) { 6209 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6210 if (span_entry) 6211 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6212 } 6213 mlxsw_sp_span_respin(mlxsw_sp); 6214 6215 if (netif_is_vxlan(dev)) 6216 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6217 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6218 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6219 event, ptr); 6220 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6221 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6222 event, ptr); 6223 else if (event == NETDEV_PRE_CHANGEADDR || 6224 event == NETDEV_CHANGEADDR || 6225 event == NETDEV_CHANGEMTU) 6226 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6227 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6228 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6229 else if (mlxsw_sp_port_dev_check(dev)) 6230 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6231 else if (netif_is_lag_master(dev)) 6232 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6233 else if (is_vlan_dev(dev)) 6234 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6235 else if (netif_is_bridge_master(dev)) 6236 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6237 else if (netif_is_macvlan(dev)) 6238 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6239 6240 return notifier_from_errno(err); 6241 } 6242 6243 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6244 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6245 }; 6246 6247 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6248 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6249 }; 6250 6251 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6252 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6253 {0, }, 6254 }; 6255 6256 static struct pci_driver mlxsw_sp1_pci_driver = { 6257 .name = mlxsw_sp1_driver_name, 6258 .id_table = mlxsw_sp1_pci_id_table, 6259 }; 6260 6261 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6262 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6263 {0, }, 6264 }; 6265 6266 static struct pci_driver mlxsw_sp2_pci_driver = { 6267 .name = mlxsw_sp2_driver_name, 6268 .id_table = mlxsw_sp2_pci_id_table, 6269 }; 6270 6271 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6272 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6273 {0, }, 6274 }; 6275 6276 static struct pci_driver mlxsw_sp3_pci_driver = { 6277 .name = mlxsw_sp3_driver_name, 6278 .id_table = mlxsw_sp3_pci_id_table, 6279 }; 6280 6281 static int __init mlxsw_sp_module_init(void) 6282 { 6283 int err; 6284 6285 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6286 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6287 6288 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6289 if (err) 6290 goto err_sp1_core_driver_register; 6291 6292 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6293 if (err) 6294 goto err_sp2_core_driver_register; 6295 6296 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6297 if (err) 6298 goto err_sp3_core_driver_register; 6299 6300 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6301 if (err) 6302 goto err_sp1_pci_driver_register; 6303 6304 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6305 if (err) 6306 goto err_sp2_pci_driver_register; 6307 6308 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6309 if (err) 6310 goto err_sp3_pci_driver_register; 6311 6312 return 0; 6313 6314 err_sp3_pci_driver_register: 6315 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6316 err_sp2_pci_driver_register: 6317 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6318 err_sp1_pci_driver_register: 6319 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6320 err_sp3_core_driver_register: 6321 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6322 err_sp2_core_driver_register: 6323 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6324 err_sp1_core_driver_register: 6325 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6326 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6327 return err; 6328 } 6329 6330 static void __exit mlxsw_sp_module_exit(void) 6331 { 6332 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6333 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6334 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6335 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6336 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6337 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6338 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6339 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6340 } 6341 6342 module_init(mlxsw_sp_module_init); 6343 module_exit(mlxsw_sp_module_exit); 6344 6345 MODULE_LICENSE("Dual BSD/GPL"); 6346 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6347 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6348 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6349 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6350 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6351 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6352 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 6353 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 6354