1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1886 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 69 static const char mlxsw_sp_driver_version[] = "1.0"; 70 71 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 72 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 73 }; 74 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 75 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 76 }; 77 78 /* tx_hdr_version 79 * Tx header version. 80 * Must be set to 1. 81 */ 82 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 83 84 /* tx_hdr_ctl 85 * Packet control type. 86 * 0 - Ethernet control (e.g. EMADs, LACP) 87 * 1 - Ethernet data 88 */ 89 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 90 91 /* tx_hdr_proto 92 * Packet protocol type. Must be set to 1 (Ethernet). 93 */ 94 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 95 96 /* tx_hdr_rx_is_router 97 * Packet is sent from the router. Valid for data packets only. 98 */ 99 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 100 101 /* tx_hdr_fid_valid 102 * Indicates if the 'fid' field is valid and should be used for 103 * forwarding lookup. Valid for data packets only. 104 */ 105 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 106 107 /* tx_hdr_swid 108 * Switch partition ID. Must be set to 0. 109 */ 110 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 111 112 /* tx_hdr_control_tclass 113 * Indicates if the packet should use the control TClass and not one 114 * of the data TClasses. 115 */ 116 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 117 118 /* tx_hdr_etclass 119 * Egress TClass to be used on the egress device on the egress port. 120 */ 121 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 122 123 /* tx_hdr_port_mid 124 * Destination local port for unicast packets. 125 * Destination multicast ID for multicast packets. 126 * 127 * Control packets are directed to a specific egress port, while data 128 * packets are transmitted through the CPU port (0) into the switch partition, 129 * where forwarding rules are applied. 130 */ 131 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 132 133 /* tx_hdr_fid 134 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 135 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 136 * Valid for data packets only. 137 */ 138 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 139 140 /* tx_hdr_type 141 * 0 - Data packets 142 * 6 - Control packets 143 */ 144 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 145 146 struct mlxsw_sp_mlxfw_dev { 147 struct mlxfw_dev mlxfw_dev; 148 struct mlxsw_sp *mlxsw_sp; 149 }; 150 151 struct mlxsw_sp_ptp_ops { 152 struct mlxsw_sp_ptp_clock * 153 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 154 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 155 156 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 157 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 158 159 /* Notify a driver that a packet that might be PTP was received. Driver 160 * is responsible for freeing the passed-in SKB. 161 */ 162 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 163 u8 local_port); 164 165 /* Notify a driver that a timestamped packet was transmitted. Driver 166 * is responsible for freeing the passed-in SKB. 167 */ 168 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 169 u8 local_port); 170 171 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 172 struct hwtstamp_config *config); 173 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 174 struct hwtstamp_config *config); 175 void (*shaper_work)(struct work_struct *work); 176 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 177 struct ethtool_ts_info *info); 178 int (*get_stats_count)(void); 179 void (*get_stats_strings)(u8 **p); 180 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 181 u64 *data, int data_index); 182 }; 183 184 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 185 u16 component_index, u32 *p_max_size, 186 u8 *p_align_bits, u16 *p_max_write_size) 187 { 188 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 189 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 191 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 192 int err; 193 194 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 196 if (err) 197 return err; 198 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 199 p_max_write_size); 200 201 *p_align_bits = max_t(u8, *p_align_bits, 2); 202 *p_max_write_size = min_t(u16, *p_max_write_size, 203 MLXSW_REG_MCDA_MAX_DATA_LEN); 204 return 0; 205 } 206 207 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 208 { 209 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 210 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 211 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 212 char mcc_pl[MLXSW_REG_MCC_LEN]; 213 u8 control_state; 214 int err; 215 216 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 218 if (err) 219 return err; 220 221 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 222 if (control_state != MLXFW_FSM_STATE_IDLE) 223 return -EBUSY; 224 225 mlxsw_reg_mcc_pack(mcc_pl, 226 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 227 0, *fwhandle, 0); 228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 229 } 230 231 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 232 u32 fwhandle, u16 component_index, 233 u32 component_size) 234 { 235 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 236 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 237 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 238 char mcc_pl[MLXSW_REG_MCC_LEN]; 239 240 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 241 component_index, fwhandle, component_size); 242 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 243 } 244 245 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 246 u32 fwhandle, u8 *data, u16 size, 247 u32 offset) 248 { 249 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 250 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 252 char mcda_pl[MLXSW_REG_MCDA_LEN]; 253 254 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 256 } 257 258 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 259 u32 fwhandle, u16 component_index) 260 { 261 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 262 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 264 char mcc_pl[MLXSW_REG_MCC_LEN]; 265 266 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 267 component_index, fwhandle, 0); 268 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 269 } 270 271 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 272 { 273 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 274 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 276 char mcc_pl[MLXSW_REG_MCC_LEN]; 277 278 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 279 fwhandle, 0); 280 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 281 } 282 283 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 284 enum mlxfw_fsm_state *fsm_state, 285 enum mlxfw_fsm_state_err *fsm_state_err) 286 { 287 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 288 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 290 char mcc_pl[MLXSW_REG_MCC_LEN]; 291 u8 control_state; 292 u8 error_code; 293 int err; 294 295 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 if (err) 298 return err; 299 300 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 301 *fsm_state = control_state; 302 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 303 MLXFW_FSM_STATE_ERR_MAX); 304 return 0; 305 } 306 307 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 308 { 309 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 310 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 312 char mcc_pl[MLXSW_REG_MCC_LEN]; 313 314 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 315 fwhandle, 0); 316 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 317 } 318 319 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 320 { 321 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 322 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 324 char mcc_pl[MLXSW_REG_MCC_LEN]; 325 326 mlxsw_reg_mcc_pack(mcc_pl, 327 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 328 fwhandle, 0); 329 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 330 } 331 332 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 333 const char *msg, const char *comp_name, 334 u32 done_bytes, u32 total_bytes) 335 { 336 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 337 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 338 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 339 340 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 341 msg, comp_name, 342 done_bytes, total_bytes); 343 } 344 345 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 346 .component_query = mlxsw_sp_component_query, 347 .fsm_lock = mlxsw_sp_fsm_lock, 348 .fsm_component_update = mlxsw_sp_fsm_component_update, 349 .fsm_block_download = mlxsw_sp_fsm_block_download, 350 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 351 .fsm_activate = mlxsw_sp_fsm_activate, 352 .fsm_query_state = mlxsw_sp_fsm_query_state, 353 .fsm_cancel = mlxsw_sp_fsm_cancel, 354 .fsm_release = mlxsw_sp_fsm_release, 355 .status_notify = mlxsw_sp_status_notify, 356 }; 357 358 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 359 const struct firmware *firmware, 360 struct netlink_ext_ack *extack) 361 { 362 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 363 .mlxfw_dev = { 364 .ops = &mlxsw_sp_mlxfw_dev_ops, 365 .psid = mlxsw_sp->bus_info->psid, 366 .psid_size = strlen(mlxsw_sp->bus_info->psid), 367 }, 368 .mlxsw_sp = mlxsw_sp 369 }; 370 int err; 371 372 mlxsw_core_fw_flash_start(mlxsw_sp->core); 373 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 374 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 375 firmware, extack); 376 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 377 mlxsw_core_fw_flash_end(mlxsw_sp->core); 378 379 return err; 380 } 381 382 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 383 { 384 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 385 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 386 const char *fw_filename = mlxsw_sp->fw_filename; 387 union devlink_param_value value; 388 const struct firmware *firmware; 389 int err; 390 391 /* Don't check if driver does not require it */ 392 if (!req_rev || !fw_filename) 393 return 0; 394 395 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 396 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 397 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 398 &value); 399 if (err) 400 return err; 401 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 402 return 0; 403 404 /* Validate driver & FW are compatible */ 405 if (rev->major != req_rev->major) { 406 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 407 rev->major, req_rev->major); 408 return -EINVAL; 409 } 410 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 411 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 412 (rev->minor > req_rev->minor || 413 (rev->minor == req_rev->minor && 414 rev->subminor >= req_rev->subminor))) 415 return 0; 416 417 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 418 rev->major, rev->minor, rev->subminor); 419 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 420 fw_filename); 421 422 err = request_firmware_direct(&firmware, fw_filename, 423 mlxsw_sp->bus_info->dev); 424 if (err) { 425 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 426 fw_filename); 427 return err; 428 } 429 430 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 431 release_firmware(firmware); 432 if (err) 433 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 434 435 /* On FW flash success, tell the caller FW reset is needed 436 * if current FW supports it. 437 */ 438 if (rev->minor >= req_rev->can_reset_minor) 439 return err ? err : -EAGAIN; 440 else 441 return 0; 442 } 443 444 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 445 const char *file_name, const char *component, 446 struct netlink_ext_ack *extack) 447 { 448 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 449 const struct firmware *firmware; 450 int err; 451 452 if (component) 453 return -EOPNOTSUPP; 454 455 err = request_firmware_direct(&firmware, file_name, 456 mlxsw_sp->bus_info->dev); 457 if (err) 458 return err; 459 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 460 release_firmware(firmware); 461 462 return err; 463 } 464 465 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 466 unsigned int counter_index, u64 *packets, 467 u64 *bytes) 468 { 469 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 470 int err; 471 472 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 473 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 474 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 475 if (err) 476 return err; 477 if (packets) 478 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 479 if (bytes) 480 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 481 return 0; 482 } 483 484 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 485 unsigned int counter_index) 486 { 487 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 488 489 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 490 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 492 } 493 494 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 495 unsigned int *p_counter_index) 496 { 497 int err; 498 499 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 500 p_counter_index); 501 if (err) 502 return err; 503 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 504 if (err) 505 goto err_counter_clear; 506 return 0; 507 508 err_counter_clear: 509 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 510 *p_counter_index); 511 return err; 512 } 513 514 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 515 unsigned int counter_index) 516 { 517 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 518 counter_index); 519 } 520 521 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 522 const struct mlxsw_tx_info *tx_info) 523 { 524 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 525 526 memset(txhdr, 0, MLXSW_TXHDR_LEN); 527 528 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 529 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 530 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 531 mlxsw_tx_hdr_swid_set(txhdr, 0); 532 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 533 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 534 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 535 } 536 537 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 538 { 539 switch (state) { 540 case BR_STATE_FORWARDING: 541 return MLXSW_REG_SPMS_STATE_FORWARDING; 542 case BR_STATE_LEARNING: 543 return MLXSW_REG_SPMS_STATE_LEARNING; 544 case BR_STATE_LISTENING: /* fall-through */ 545 case BR_STATE_DISABLED: /* fall-through */ 546 case BR_STATE_BLOCKING: 547 return MLXSW_REG_SPMS_STATE_DISCARDING; 548 default: 549 BUG(); 550 } 551 } 552 553 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 554 u8 state) 555 { 556 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 558 char *spms_pl; 559 int err; 560 561 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 562 if (!spms_pl) 563 return -ENOMEM; 564 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 565 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 566 567 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 568 kfree(spms_pl); 569 return err; 570 } 571 572 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 573 { 574 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 575 int err; 576 577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 578 if (err) 579 return err; 580 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 581 return 0; 582 } 583 584 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 585 bool enable, u32 rate) 586 { 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 589 590 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 592 } 593 594 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 595 bool is_up) 596 { 597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 598 char paos_pl[MLXSW_REG_PAOS_LEN]; 599 600 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 601 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 602 MLXSW_PORT_ADMIN_STATUS_DOWN); 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 604 } 605 606 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 607 unsigned char *addr) 608 { 609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 610 char ppad_pl[MLXSW_REG_PPAD_LEN]; 611 612 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 613 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 615 } 616 617 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 618 { 619 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 620 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 621 622 ether_addr_copy(addr, mlxsw_sp->base_mac); 623 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 624 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 625 } 626 627 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 628 { 629 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 630 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 631 int max_mtu; 632 int err; 633 634 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 635 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 636 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 637 if (err) 638 return err; 639 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 640 641 if (mtu > max_mtu) 642 return -EINVAL; 643 644 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 645 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 646 } 647 648 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 649 { 650 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 651 char pspa_pl[MLXSW_REG_PSPA_LEN]; 652 653 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 655 } 656 657 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 658 { 659 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 660 char svpe_pl[MLXSW_REG_SVPE_LEN]; 661 662 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 664 } 665 666 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 667 bool learn_enable) 668 { 669 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 670 char *spvmlr_pl; 671 int err; 672 673 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 674 if (!spvmlr_pl) 675 return -ENOMEM; 676 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 677 learn_enable); 678 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 679 kfree(spvmlr_pl); 680 return err; 681 } 682 683 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 684 u16 vid) 685 { 686 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 687 char spvid_pl[MLXSW_REG_SPVID_LEN]; 688 689 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 691 } 692 693 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 694 bool allow) 695 { 696 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 697 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 698 699 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 700 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 701 } 702 703 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 704 { 705 int err; 706 707 if (!vid) { 708 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 709 if (err) 710 return err; 711 } else { 712 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 713 if (err) 714 return err; 715 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 716 if (err) 717 goto err_port_allow_untagged_set; 718 } 719 720 mlxsw_sp_port->pvid = vid; 721 return 0; 722 723 err_port_allow_untagged_set: 724 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 725 return err; 726 } 727 728 static int 729 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 730 { 731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 732 char sspr_pl[MLXSW_REG_SSPR_LEN]; 733 734 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 736 } 737 738 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 739 u8 local_port, u8 *p_module, 740 u8 *p_width, u8 *p_lane) 741 { 742 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 743 int err; 744 745 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 746 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 747 if (err) 748 return err; 749 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 750 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 751 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 752 return 0; 753 } 754 755 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 756 u8 module, u8 width, u8 lane) 757 { 758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 759 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 760 int i; 761 762 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 763 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 764 for (i = 0; i < width; i++) { 765 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 766 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 767 } 768 769 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 770 } 771 772 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 773 { 774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 775 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 776 777 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 778 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 780 } 781 782 static int mlxsw_sp_port_open(struct net_device *dev) 783 { 784 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 785 int err; 786 787 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 788 if (err) 789 return err; 790 netif_start_queue(dev); 791 return 0; 792 } 793 794 static int mlxsw_sp_port_stop(struct net_device *dev) 795 { 796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 797 798 netif_stop_queue(dev); 799 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 800 } 801 802 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 803 struct net_device *dev) 804 { 805 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 808 const struct mlxsw_tx_info tx_info = { 809 .local_port = mlxsw_sp_port->local_port, 810 .is_emad = false, 811 }; 812 u64 len; 813 int err; 814 815 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 816 817 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 818 return NETDEV_TX_BUSY; 819 820 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 821 struct sk_buff *skb_orig = skb; 822 823 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 824 if (!skb) { 825 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 826 dev_kfree_skb_any(skb_orig); 827 return NETDEV_TX_OK; 828 } 829 dev_consume_skb_any(skb_orig); 830 } 831 832 if (eth_skb_pad(skb)) { 833 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 834 return NETDEV_TX_OK; 835 } 836 837 mlxsw_sp_txhdr_construct(skb, &tx_info); 838 /* TX header is consumed by HW on the way so we shouldn't count its 839 * bytes as being sent. 840 */ 841 len = skb->len - MLXSW_TXHDR_LEN; 842 843 /* Due to a race we might fail here because of a full queue. In that 844 * unlikely case we simply drop the packet. 845 */ 846 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 847 848 if (!err) { 849 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 850 u64_stats_update_begin(&pcpu_stats->syncp); 851 pcpu_stats->tx_packets++; 852 pcpu_stats->tx_bytes += len; 853 u64_stats_update_end(&pcpu_stats->syncp); 854 } else { 855 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 856 dev_kfree_skb_any(skb); 857 } 858 return NETDEV_TX_OK; 859 } 860 861 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 862 { 863 } 864 865 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 866 { 867 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 868 struct sockaddr *addr = p; 869 int err; 870 871 if (!is_valid_ether_addr(addr->sa_data)) 872 return -EADDRNOTAVAIL; 873 874 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 875 if (err) 876 return err; 877 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 878 return 0; 879 } 880 881 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 882 int mtu) 883 { 884 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 885 } 886 887 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 888 889 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 890 u16 delay) 891 { 892 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 893 BITS_PER_BYTE)); 894 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 895 mtu); 896 } 897 898 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 899 * Assumes 100m cable and maximum MTU. 900 */ 901 #define MLXSW_SP_PAUSE_DELAY 58752 902 903 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 904 u16 delay, bool pfc, bool pause) 905 { 906 if (pfc) 907 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 908 else if (pause) 909 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 910 else 911 return 0; 912 } 913 914 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 915 bool lossy) 916 { 917 if (lossy) 918 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 919 else 920 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 921 thres); 922 } 923 924 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 925 u8 *prio_tc, bool pause_en, 926 struct ieee_pfc *my_pfc) 927 { 928 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 929 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 930 u16 delay = !!my_pfc ? my_pfc->delay : 0; 931 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 932 u32 taken_headroom_cells = 0; 933 u32 max_headroom_cells; 934 int i, j, err; 935 936 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 937 938 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 939 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 940 if (err) 941 return err; 942 943 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 944 bool configure = false; 945 bool pfc = false; 946 u16 thres_cells; 947 u16 delay_cells; 948 u16 total_cells; 949 bool lossy; 950 951 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 952 if (prio_tc[j] == i) { 953 pfc = pfc_en & BIT(j); 954 configure = true; 955 break; 956 } 957 } 958 959 if (!configure) 960 continue; 961 962 lossy = !(pfc || pause_en); 963 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 964 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 965 pfc, pause_en); 966 total_cells = thres_cells + delay_cells; 967 968 taken_headroom_cells += total_cells; 969 if (taken_headroom_cells > max_headroom_cells) 970 return -ENOBUFS; 971 972 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 973 thres_cells, lossy); 974 } 975 976 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 977 } 978 979 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 980 int mtu, bool pause_en) 981 { 982 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 983 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 984 struct ieee_pfc *my_pfc; 985 u8 *prio_tc; 986 987 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 988 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 989 990 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 991 pause_en, my_pfc); 992 } 993 994 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 998 int err; 999 1000 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1001 if (err) 1002 return err; 1003 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1004 if (err) 1005 goto err_span_port_mtu_update; 1006 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1007 if (err) 1008 goto err_port_mtu_set; 1009 dev->mtu = mtu; 1010 return 0; 1011 1012 err_port_mtu_set: 1013 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1014 err_span_port_mtu_update: 1015 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1016 return err; 1017 } 1018 1019 static int 1020 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1021 struct rtnl_link_stats64 *stats) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1024 struct mlxsw_sp_port_pcpu_stats *p; 1025 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1026 u32 tx_dropped = 0; 1027 unsigned int start; 1028 int i; 1029 1030 for_each_possible_cpu(i) { 1031 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1032 do { 1033 start = u64_stats_fetch_begin_irq(&p->syncp); 1034 rx_packets = p->rx_packets; 1035 rx_bytes = p->rx_bytes; 1036 tx_packets = p->tx_packets; 1037 tx_bytes = p->tx_bytes; 1038 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1039 1040 stats->rx_packets += rx_packets; 1041 stats->rx_bytes += rx_bytes; 1042 stats->tx_packets += tx_packets; 1043 stats->tx_bytes += tx_bytes; 1044 /* tx_dropped is u32, updated without syncp protection. */ 1045 tx_dropped += p->tx_dropped; 1046 } 1047 stats->tx_dropped = tx_dropped; 1048 return 0; 1049 } 1050 1051 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1052 { 1053 switch (attr_id) { 1054 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1055 return true; 1056 } 1057 1058 return false; 1059 } 1060 1061 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1062 void *sp) 1063 { 1064 switch (attr_id) { 1065 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1066 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1067 } 1068 1069 return -EINVAL; 1070 } 1071 1072 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1073 int prio, char *ppcnt_pl) 1074 { 1075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1077 1078 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1079 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1080 } 1081 1082 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1083 struct rtnl_link_stats64 *stats) 1084 { 1085 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1086 int err; 1087 1088 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1089 0, ppcnt_pl); 1090 if (err) 1091 goto out; 1092 1093 stats->tx_packets = 1094 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1095 stats->rx_packets = 1096 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1097 stats->tx_bytes = 1098 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1099 stats->rx_bytes = 1100 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1101 stats->multicast = 1102 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1103 1104 stats->rx_crc_errors = 1105 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1106 stats->rx_frame_errors = 1107 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1108 1109 stats->rx_length_errors = ( 1110 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1111 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1112 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1113 1114 stats->rx_errors = (stats->rx_crc_errors + 1115 stats->rx_frame_errors + stats->rx_length_errors); 1116 1117 out: 1118 return err; 1119 } 1120 1121 static void 1122 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1123 struct mlxsw_sp_port_xstats *xstats) 1124 { 1125 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1126 int err, i; 1127 1128 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1129 ppcnt_pl); 1130 if (!err) 1131 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1132 1133 for (i = 0; i < TC_MAX_QUEUE; i++) { 1134 err = mlxsw_sp_port_get_stats_raw(dev, 1135 MLXSW_REG_PPCNT_TC_CONG_TC, 1136 i, ppcnt_pl); 1137 if (!err) 1138 xstats->wred_drop[i] = 1139 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1140 1141 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1142 i, ppcnt_pl); 1143 if (err) 1144 continue; 1145 1146 xstats->backlog[i] = 1147 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1148 xstats->tail_drop[i] = 1149 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1150 } 1151 1152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1153 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1154 i, ppcnt_pl); 1155 if (err) 1156 continue; 1157 1158 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1159 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1160 } 1161 } 1162 1163 static void update_stats_cache(struct work_struct *work) 1164 { 1165 struct mlxsw_sp_port *mlxsw_sp_port = 1166 container_of(work, struct mlxsw_sp_port, 1167 periodic_hw_stats.update_dw.work); 1168 1169 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1170 goto out; 1171 1172 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1173 &mlxsw_sp_port->periodic_hw_stats.stats); 1174 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1175 &mlxsw_sp_port->periodic_hw_stats.xstats); 1176 1177 out: 1178 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1179 MLXSW_HW_STATS_UPDATE_TIME); 1180 } 1181 1182 /* Return the stats from a cache that is updated periodically, 1183 * as this function might get called in an atomic context. 1184 */ 1185 static void 1186 mlxsw_sp_port_get_stats64(struct net_device *dev, 1187 struct rtnl_link_stats64 *stats) 1188 { 1189 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1190 1191 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1192 } 1193 1194 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1195 u16 vid_begin, u16 vid_end, 1196 bool is_member, bool untagged) 1197 { 1198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1199 char *spvm_pl; 1200 int err; 1201 1202 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1203 if (!spvm_pl) 1204 return -ENOMEM; 1205 1206 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1207 vid_end, is_member, untagged); 1208 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1209 kfree(spvm_pl); 1210 return err; 1211 } 1212 1213 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1214 u16 vid_end, bool is_member, bool untagged) 1215 { 1216 u16 vid, vid_e; 1217 int err; 1218 1219 for (vid = vid_begin; vid <= vid_end; 1220 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1221 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1222 vid_end); 1223 1224 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1225 is_member, untagged); 1226 if (err) 1227 return err; 1228 } 1229 1230 return 0; 1231 } 1232 1233 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1234 bool flush_default) 1235 { 1236 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1237 1238 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1239 &mlxsw_sp_port->vlans_list, list) { 1240 if (!flush_default && 1241 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1242 continue; 1243 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1244 } 1245 } 1246 1247 static void 1248 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1249 { 1250 if (mlxsw_sp_port_vlan->bridge_port) 1251 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1252 else if (mlxsw_sp_port_vlan->fid) 1253 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1254 } 1255 1256 struct mlxsw_sp_port_vlan * 1257 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1258 { 1259 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1260 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1261 int err; 1262 1263 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1264 if (mlxsw_sp_port_vlan) 1265 return ERR_PTR(-EEXIST); 1266 1267 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1268 if (err) 1269 return ERR_PTR(err); 1270 1271 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1272 if (!mlxsw_sp_port_vlan) { 1273 err = -ENOMEM; 1274 goto err_port_vlan_alloc; 1275 } 1276 1277 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1278 mlxsw_sp_port_vlan->vid = vid; 1279 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1280 1281 return mlxsw_sp_port_vlan; 1282 1283 err_port_vlan_alloc: 1284 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1285 return ERR_PTR(err); 1286 } 1287 1288 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1289 { 1290 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1291 u16 vid = mlxsw_sp_port_vlan->vid; 1292 1293 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1294 list_del(&mlxsw_sp_port_vlan->list); 1295 kfree(mlxsw_sp_port_vlan); 1296 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1297 } 1298 1299 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1300 __be16 __always_unused proto, u16 vid) 1301 { 1302 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1303 1304 /* VLAN 0 is added to HW filter when device goes up, but it is 1305 * reserved in our case, so simply return. 1306 */ 1307 if (!vid) 1308 return 0; 1309 1310 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1311 } 1312 1313 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1314 __be16 __always_unused proto, u16 vid) 1315 { 1316 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1317 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1318 1319 /* VLAN 0 is removed from HW filter when device goes down, but 1320 * it is reserved in our case, so simply return. 1321 */ 1322 if (!vid) 1323 return 0; 1324 1325 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1326 if (!mlxsw_sp_port_vlan) 1327 return 0; 1328 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1329 1330 return 0; 1331 } 1332 1333 static struct mlxsw_sp_port_mall_tc_entry * 1334 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1335 unsigned long cookie) { 1336 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1337 1338 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1339 if (mall_tc_entry->cookie == cookie) 1340 return mall_tc_entry; 1341 1342 return NULL; 1343 } 1344 1345 static int 1346 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1347 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1348 const struct flow_action_entry *act, 1349 bool ingress) 1350 { 1351 enum mlxsw_sp_span_type span_type; 1352 1353 if (!act->dev) { 1354 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1355 return -EINVAL; 1356 } 1357 1358 mirror->ingress = ingress; 1359 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1360 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1361 true, &mirror->span_id); 1362 } 1363 1364 static void 1365 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1366 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1367 { 1368 enum mlxsw_sp_span_type span_type; 1369 1370 span_type = mirror->ingress ? 1371 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1372 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1373 span_type, true); 1374 } 1375 1376 static int 1377 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1378 struct tc_cls_matchall_offload *cls, 1379 const struct flow_action_entry *act, 1380 bool ingress) 1381 { 1382 int err; 1383 1384 if (!mlxsw_sp_port->sample) 1385 return -EOPNOTSUPP; 1386 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1387 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1388 return -EEXIST; 1389 } 1390 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1391 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1392 return -EOPNOTSUPP; 1393 } 1394 1395 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1396 act->sample.psample_group); 1397 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1398 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1399 mlxsw_sp_port->sample->rate = act->sample.rate; 1400 1401 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1402 if (err) 1403 goto err_port_sample_set; 1404 return 0; 1405 1406 err_port_sample_set: 1407 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1408 return err; 1409 } 1410 1411 static void 1412 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1413 { 1414 if (!mlxsw_sp_port->sample) 1415 return; 1416 1417 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1418 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1419 } 1420 1421 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1422 struct tc_cls_matchall_offload *f, 1423 bool ingress) 1424 { 1425 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1426 __be16 protocol = f->common.protocol; 1427 struct flow_action_entry *act; 1428 int err; 1429 1430 if (!flow_offload_has_one_action(&f->rule->action)) { 1431 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1432 return -EOPNOTSUPP; 1433 } 1434 1435 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1436 if (!mall_tc_entry) 1437 return -ENOMEM; 1438 mall_tc_entry->cookie = f->cookie; 1439 1440 act = &f->rule->action.entries[0]; 1441 1442 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1443 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1444 1445 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1446 mirror = &mall_tc_entry->mirror; 1447 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1448 mirror, act, 1449 ingress); 1450 } else if (act->id == FLOW_ACTION_SAMPLE && 1451 protocol == htons(ETH_P_ALL)) { 1452 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1453 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1454 act, ingress); 1455 } else { 1456 err = -EOPNOTSUPP; 1457 } 1458 1459 if (err) 1460 goto err_add_action; 1461 1462 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1463 return 0; 1464 1465 err_add_action: 1466 kfree(mall_tc_entry); 1467 return err; 1468 } 1469 1470 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1471 struct tc_cls_matchall_offload *f) 1472 { 1473 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1474 1475 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1476 f->cookie); 1477 if (!mall_tc_entry) { 1478 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1479 return; 1480 } 1481 list_del(&mall_tc_entry->list); 1482 1483 switch (mall_tc_entry->type) { 1484 case MLXSW_SP_PORT_MALL_MIRROR: 1485 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1486 &mall_tc_entry->mirror); 1487 break; 1488 case MLXSW_SP_PORT_MALL_SAMPLE: 1489 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1490 break; 1491 default: 1492 WARN_ON(1); 1493 } 1494 1495 kfree(mall_tc_entry); 1496 } 1497 1498 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1499 struct tc_cls_matchall_offload *f, 1500 bool ingress) 1501 { 1502 switch (f->command) { 1503 case TC_CLSMATCHALL_REPLACE: 1504 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1505 ingress); 1506 case TC_CLSMATCHALL_DESTROY: 1507 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1508 return 0; 1509 default: 1510 return -EOPNOTSUPP; 1511 } 1512 } 1513 1514 static int 1515 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1516 struct flow_cls_offload *f) 1517 { 1518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1519 1520 switch (f->command) { 1521 case FLOW_CLS_REPLACE: 1522 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1523 case FLOW_CLS_DESTROY: 1524 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1525 return 0; 1526 case FLOW_CLS_STATS: 1527 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1528 case FLOW_CLS_TMPLT_CREATE: 1529 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1530 case FLOW_CLS_TMPLT_DESTROY: 1531 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1532 return 0; 1533 default: 1534 return -EOPNOTSUPP; 1535 } 1536 } 1537 1538 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1539 void *type_data, 1540 void *cb_priv, bool ingress) 1541 { 1542 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1543 1544 switch (type) { 1545 case TC_SETUP_CLSMATCHALL: 1546 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1547 type_data)) 1548 return -EOPNOTSUPP; 1549 1550 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1551 ingress); 1552 case TC_SETUP_CLSFLOWER: 1553 return 0; 1554 default: 1555 return -EOPNOTSUPP; 1556 } 1557 } 1558 1559 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1560 void *type_data, 1561 void *cb_priv) 1562 { 1563 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1564 cb_priv, true); 1565 } 1566 1567 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1568 void *type_data, 1569 void *cb_priv) 1570 { 1571 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1572 cb_priv, false); 1573 } 1574 1575 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1576 void *type_data, void *cb_priv) 1577 { 1578 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1579 1580 switch (type) { 1581 case TC_SETUP_CLSMATCHALL: 1582 return 0; 1583 case TC_SETUP_CLSFLOWER: 1584 if (mlxsw_sp_acl_block_disabled(acl_block)) 1585 return -EOPNOTSUPP; 1586 1587 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1588 default: 1589 return -EOPNOTSUPP; 1590 } 1591 } 1592 1593 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1594 { 1595 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1596 1597 mlxsw_sp_acl_block_destroy(acl_block); 1598 } 1599 1600 static LIST_HEAD(mlxsw_sp_block_cb_list); 1601 1602 static int 1603 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1604 struct flow_block_offload *f, bool ingress) 1605 { 1606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1607 struct mlxsw_sp_acl_block *acl_block; 1608 struct flow_block_cb *block_cb; 1609 bool register_block = false; 1610 int err; 1611 1612 block_cb = flow_block_cb_lookup(f->block, 1613 mlxsw_sp_setup_tc_block_cb_flower, 1614 mlxsw_sp); 1615 if (!block_cb) { 1616 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1617 if (!acl_block) 1618 return -ENOMEM; 1619 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1620 mlxsw_sp, acl_block, 1621 mlxsw_sp_tc_block_flower_release); 1622 if (IS_ERR(block_cb)) { 1623 mlxsw_sp_acl_block_destroy(acl_block); 1624 err = PTR_ERR(block_cb); 1625 goto err_cb_register; 1626 } 1627 register_block = true; 1628 } else { 1629 acl_block = flow_block_cb_priv(block_cb); 1630 } 1631 flow_block_cb_incref(block_cb); 1632 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1633 mlxsw_sp_port, ingress, f->extack); 1634 if (err) 1635 goto err_block_bind; 1636 1637 if (ingress) 1638 mlxsw_sp_port->ing_acl_block = acl_block; 1639 else 1640 mlxsw_sp_port->eg_acl_block = acl_block; 1641 1642 if (register_block) { 1643 flow_block_cb_add(block_cb, f); 1644 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1645 } 1646 1647 return 0; 1648 1649 err_block_bind: 1650 if (!flow_block_cb_decref(block_cb)) 1651 flow_block_cb_free(block_cb); 1652 err_cb_register: 1653 return err; 1654 } 1655 1656 static void 1657 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1658 struct flow_block_offload *f, bool ingress) 1659 { 1660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1661 struct mlxsw_sp_acl_block *acl_block; 1662 struct flow_block_cb *block_cb; 1663 int err; 1664 1665 block_cb = flow_block_cb_lookup(f->block, 1666 mlxsw_sp_setup_tc_block_cb_flower, 1667 mlxsw_sp); 1668 if (!block_cb) 1669 return; 1670 1671 if (ingress) 1672 mlxsw_sp_port->ing_acl_block = NULL; 1673 else 1674 mlxsw_sp_port->eg_acl_block = NULL; 1675 1676 acl_block = flow_block_cb_priv(block_cb); 1677 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1678 mlxsw_sp_port, ingress); 1679 if (!err && !flow_block_cb_decref(block_cb)) { 1680 flow_block_cb_remove(block_cb, f); 1681 list_del(&block_cb->driver_list); 1682 } 1683 } 1684 1685 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1686 struct flow_block_offload *f) 1687 { 1688 struct flow_block_cb *block_cb; 1689 flow_setup_cb_t *cb; 1690 bool ingress; 1691 int err; 1692 1693 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1694 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1695 ingress = true; 1696 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1697 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1698 ingress = false; 1699 } else { 1700 return -EOPNOTSUPP; 1701 } 1702 1703 f->driver_block_list = &mlxsw_sp_block_cb_list; 1704 1705 switch (f->command) { 1706 case FLOW_BLOCK_BIND: 1707 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1708 &mlxsw_sp_block_cb_list)) 1709 return -EBUSY; 1710 1711 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1712 mlxsw_sp_port, NULL); 1713 if (IS_ERR(block_cb)) 1714 return PTR_ERR(block_cb); 1715 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1716 ingress); 1717 if (err) { 1718 flow_block_cb_free(block_cb); 1719 return err; 1720 } 1721 flow_block_cb_add(block_cb, f); 1722 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1723 return 0; 1724 case FLOW_BLOCK_UNBIND: 1725 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1726 f, ingress); 1727 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1728 if (!block_cb) 1729 return -ENOENT; 1730 1731 flow_block_cb_remove(block_cb, f); 1732 list_del(&block_cb->driver_list); 1733 return 0; 1734 default: 1735 return -EOPNOTSUPP; 1736 } 1737 } 1738 1739 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1740 void *type_data) 1741 { 1742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1743 1744 switch (type) { 1745 case TC_SETUP_BLOCK: 1746 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1747 case TC_SETUP_QDISC_RED: 1748 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1749 case TC_SETUP_QDISC_PRIO: 1750 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1751 default: 1752 return -EOPNOTSUPP; 1753 } 1754 } 1755 1756 1757 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1758 { 1759 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1760 1761 if (!enable) { 1762 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1763 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1764 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1765 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1766 return -EINVAL; 1767 } 1768 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1769 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1770 } else { 1771 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1772 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1773 } 1774 return 0; 1775 } 1776 1777 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1778 { 1779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1780 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1781 int err; 1782 1783 if (netif_running(dev)) 1784 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1785 1786 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1787 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1788 pplr_pl); 1789 1790 if (netif_running(dev)) 1791 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1792 1793 return err; 1794 } 1795 1796 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1797 1798 static int mlxsw_sp_handle_feature(struct net_device *dev, 1799 netdev_features_t wanted_features, 1800 netdev_features_t feature, 1801 mlxsw_sp_feature_handler feature_handler) 1802 { 1803 netdev_features_t changes = wanted_features ^ dev->features; 1804 bool enable = !!(wanted_features & feature); 1805 int err; 1806 1807 if (!(changes & feature)) 1808 return 0; 1809 1810 err = feature_handler(dev, enable); 1811 if (err) { 1812 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1813 enable ? "Enable" : "Disable", &feature, err); 1814 return err; 1815 } 1816 1817 if (enable) 1818 dev->features |= feature; 1819 else 1820 dev->features &= ~feature; 1821 1822 return 0; 1823 } 1824 static int mlxsw_sp_set_features(struct net_device *dev, 1825 netdev_features_t features) 1826 { 1827 netdev_features_t oper_features = dev->features; 1828 int err = 0; 1829 1830 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1831 mlxsw_sp_feature_hw_tc); 1832 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1833 mlxsw_sp_feature_loopback); 1834 1835 if (err) { 1836 dev->features = oper_features; 1837 return -EINVAL; 1838 } 1839 1840 return 0; 1841 } 1842 1843 static struct devlink_port * 1844 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1845 { 1846 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1848 1849 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1850 mlxsw_sp_port->local_port); 1851 } 1852 1853 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1854 struct ifreq *ifr) 1855 { 1856 struct hwtstamp_config config; 1857 int err; 1858 1859 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1860 return -EFAULT; 1861 1862 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1863 &config); 1864 if (err) 1865 return err; 1866 1867 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1868 return -EFAULT; 1869 1870 return 0; 1871 } 1872 1873 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1874 struct ifreq *ifr) 1875 { 1876 struct hwtstamp_config config; 1877 int err; 1878 1879 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1880 &config); 1881 if (err) 1882 return err; 1883 1884 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1885 return -EFAULT; 1886 1887 return 0; 1888 } 1889 1890 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1891 { 1892 struct hwtstamp_config config = {0}; 1893 1894 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1895 } 1896 1897 static int 1898 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1899 { 1900 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1901 1902 switch (cmd) { 1903 case SIOCSHWTSTAMP: 1904 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1905 case SIOCGHWTSTAMP: 1906 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1907 default: 1908 return -EOPNOTSUPP; 1909 } 1910 } 1911 1912 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1913 .ndo_open = mlxsw_sp_port_open, 1914 .ndo_stop = mlxsw_sp_port_stop, 1915 .ndo_start_xmit = mlxsw_sp_port_xmit, 1916 .ndo_setup_tc = mlxsw_sp_setup_tc, 1917 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1918 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1919 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1920 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1921 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1922 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1923 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1924 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1925 .ndo_set_features = mlxsw_sp_set_features, 1926 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1927 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1928 }; 1929 1930 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1931 struct ethtool_drvinfo *drvinfo) 1932 { 1933 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1934 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1935 1936 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1937 sizeof(drvinfo->driver)); 1938 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1939 sizeof(drvinfo->version)); 1940 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1941 "%d.%d.%d", 1942 mlxsw_sp->bus_info->fw_rev.major, 1943 mlxsw_sp->bus_info->fw_rev.minor, 1944 mlxsw_sp->bus_info->fw_rev.subminor); 1945 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1946 sizeof(drvinfo->bus_info)); 1947 } 1948 1949 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1950 struct ethtool_pauseparam *pause) 1951 { 1952 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1953 1954 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1955 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1956 } 1957 1958 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1959 struct ethtool_pauseparam *pause) 1960 { 1961 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1962 1963 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1964 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1965 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1966 1967 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1968 pfcc_pl); 1969 } 1970 1971 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1972 struct ethtool_pauseparam *pause) 1973 { 1974 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1975 bool pause_en = pause->tx_pause || pause->rx_pause; 1976 int err; 1977 1978 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1979 netdev_err(dev, "PFC already enabled on port\n"); 1980 return -EINVAL; 1981 } 1982 1983 if (pause->autoneg) { 1984 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1985 return -EINVAL; 1986 } 1987 1988 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1989 if (err) { 1990 netdev_err(dev, "Failed to configure port's headroom\n"); 1991 return err; 1992 } 1993 1994 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1995 if (err) { 1996 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1997 goto err_port_pause_configure; 1998 } 1999 2000 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2001 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2002 2003 return 0; 2004 2005 err_port_pause_configure: 2006 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2007 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2008 return err; 2009 } 2010 2011 struct mlxsw_sp_port_hw_stats { 2012 char str[ETH_GSTRING_LEN]; 2013 u64 (*getter)(const char *payload); 2014 bool cells_bytes; 2015 }; 2016 2017 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2018 { 2019 .str = "a_frames_transmitted_ok", 2020 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2021 }, 2022 { 2023 .str = "a_frames_received_ok", 2024 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2025 }, 2026 { 2027 .str = "a_frame_check_sequence_errors", 2028 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2029 }, 2030 { 2031 .str = "a_alignment_errors", 2032 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2033 }, 2034 { 2035 .str = "a_octets_transmitted_ok", 2036 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2037 }, 2038 { 2039 .str = "a_octets_received_ok", 2040 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2041 }, 2042 { 2043 .str = "a_multicast_frames_xmitted_ok", 2044 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2045 }, 2046 { 2047 .str = "a_broadcast_frames_xmitted_ok", 2048 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2049 }, 2050 { 2051 .str = "a_multicast_frames_received_ok", 2052 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2053 }, 2054 { 2055 .str = "a_broadcast_frames_received_ok", 2056 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2057 }, 2058 { 2059 .str = "a_in_range_length_errors", 2060 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2061 }, 2062 { 2063 .str = "a_out_of_range_length_field", 2064 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2065 }, 2066 { 2067 .str = "a_frame_too_long_errors", 2068 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2069 }, 2070 { 2071 .str = "a_symbol_error_during_carrier", 2072 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2073 }, 2074 { 2075 .str = "a_mac_control_frames_transmitted", 2076 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2077 }, 2078 { 2079 .str = "a_mac_control_frames_received", 2080 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2081 }, 2082 { 2083 .str = "a_unsupported_opcodes_received", 2084 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2085 }, 2086 { 2087 .str = "a_pause_mac_ctrl_frames_received", 2088 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2089 }, 2090 { 2091 .str = "a_pause_mac_ctrl_frames_xmitted", 2092 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2093 }, 2094 }; 2095 2096 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2097 2098 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2099 { 2100 .str = "if_in_discards", 2101 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2102 }, 2103 { 2104 .str = "if_out_discards", 2105 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2106 }, 2107 { 2108 .str = "if_out_errors", 2109 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2110 }, 2111 }; 2112 2113 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2114 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2115 2116 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2117 { 2118 .str = "ether_stats_undersize_pkts", 2119 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2120 }, 2121 { 2122 .str = "ether_stats_oversize_pkts", 2123 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2124 }, 2125 { 2126 .str = "ether_stats_fragments", 2127 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2128 }, 2129 { 2130 .str = "ether_pkts64octets", 2131 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2132 }, 2133 { 2134 .str = "ether_pkts65to127octets", 2135 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2136 }, 2137 { 2138 .str = "ether_pkts128to255octets", 2139 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2140 }, 2141 { 2142 .str = "ether_pkts256to511octets", 2143 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2144 }, 2145 { 2146 .str = "ether_pkts512to1023octets", 2147 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2148 }, 2149 { 2150 .str = "ether_pkts1024to1518octets", 2151 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2152 }, 2153 { 2154 .str = "ether_pkts1519to2047octets", 2155 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2156 }, 2157 { 2158 .str = "ether_pkts2048to4095octets", 2159 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2160 }, 2161 { 2162 .str = "ether_pkts4096to8191octets", 2163 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2164 }, 2165 { 2166 .str = "ether_pkts8192to10239octets", 2167 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2168 }, 2169 }; 2170 2171 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2172 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2173 2174 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2175 { 2176 .str = "dot3stats_fcs_errors", 2177 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2178 }, 2179 { 2180 .str = "dot3stats_symbol_errors", 2181 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2182 }, 2183 { 2184 .str = "dot3control_in_unknown_opcodes", 2185 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2186 }, 2187 { 2188 .str = "dot3in_pause_frames", 2189 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2190 }, 2191 }; 2192 2193 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2194 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2195 2196 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2197 { 2198 .str = "discard_ingress_general", 2199 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2200 }, 2201 { 2202 .str = "discard_ingress_policy_engine", 2203 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2204 }, 2205 { 2206 .str = "discard_ingress_vlan_membership", 2207 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2208 }, 2209 { 2210 .str = "discard_ingress_tag_frame_type", 2211 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2212 }, 2213 { 2214 .str = "discard_egress_vlan_membership", 2215 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2216 }, 2217 { 2218 .str = "discard_loopback_filter", 2219 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2220 }, 2221 { 2222 .str = "discard_egress_general", 2223 .getter = mlxsw_reg_ppcnt_egress_general_get, 2224 }, 2225 { 2226 .str = "discard_egress_hoq", 2227 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2228 }, 2229 { 2230 .str = "discard_egress_policy_engine", 2231 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2232 }, 2233 { 2234 .str = "discard_ingress_tx_link_down", 2235 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2236 }, 2237 { 2238 .str = "discard_egress_stp_filter", 2239 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2240 }, 2241 { 2242 .str = "discard_egress_sll", 2243 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2244 }, 2245 }; 2246 2247 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2248 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2249 2250 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2251 { 2252 .str = "rx_octets_prio", 2253 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2254 }, 2255 { 2256 .str = "rx_frames_prio", 2257 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2258 }, 2259 { 2260 .str = "tx_octets_prio", 2261 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2262 }, 2263 { 2264 .str = "tx_frames_prio", 2265 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2266 }, 2267 { 2268 .str = "rx_pause_prio", 2269 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2270 }, 2271 { 2272 .str = "rx_pause_duration_prio", 2273 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2274 }, 2275 { 2276 .str = "tx_pause_prio", 2277 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2278 }, 2279 { 2280 .str = "tx_pause_duration_prio", 2281 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2282 }, 2283 }; 2284 2285 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2286 2287 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2288 { 2289 .str = "tc_transmit_queue_tc", 2290 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2291 .cells_bytes = true, 2292 }, 2293 { 2294 .str = "tc_no_buffer_discard_uc_tc", 2295 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2296 }, 2297 }; 2298 2299 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2300 2301 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2302 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2303 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2304 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2305 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2306 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2307 IEEE_8021QAZ_MAX_TCS) + \ 2308 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2309 TC_MAX_QUEUE)) 2310 2311 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2312 { 2313 int i; 2314 2315 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2316 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2317 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2318 *p += ETH_GSTRING_LEN; 2319 } 2320 } 2321 2322 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2323 { 2324 int i; 2325 2326 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2327 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2328 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2329 *p += ETH_GSTRING_LEN; 2330 } 2331 } 2332 2333 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2334 u32 stringset, u8 *data) 2335 { 2336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2337 u8 *p = data; 2338 int i; 2339 2340 switch (stringset) { 2341 case ETH_SS_STATS: 2342 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2343 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2344 ETH_GSTRING_LEN); 2345 p += ETH_GSTRING_LEN; 2346 } 2347 2348 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2349 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2350 ETH_GSTRING_LEN); 2351 p += ETH_GSTRING_LEN; 2352 } 2353 2354 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2355 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2356 ETH_GSTRING_LEN); 2357 p += ETH_GSTRING_LEN; 2358 } 2359 2360 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2361 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2362 ETH_GSTRING_LEN); 2363 p += ETH_GSTRING_LEN; 2364 } 2365 2366 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2367 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2368 ETH_GSTRING_LEN); 2369 p += ETH_GSTRING_LEN; 2370 } 2371 2372 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2373 mlxsw_sp_port_get_prio_strings(&p, i); 2374 2375 for (i = 0; i < TC_MAX_QUEUE; i++) 2376 mlxsw_sp_port_get_tc_strings(&p, i); 2377 2378 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2379 break; 2380 } 2381 } 2382 2383 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2384 enum ethtool_phys_id_state state) 2385 { 2386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2388 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2389 bool active; 2390 2391 switch (state) { 2392 case ETHTOOL_ID_ACTIVE: 2393 active = true; 2394 break; 2395 case ETHTOOL_ID_INACTIVE: 2396 active = false; 2397 break; 2398 default: 2399 return -EOPNOTSUPP; 2400 } 2401 2402 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2404 } 2405 2406 static int 2407 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2408 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2409 { 2410 switch (grp) { 2411 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2412 *p_hw_stats = mlxsw_sp_port_hw_stats; 2413 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2414 break; 2415 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2416 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2417 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2418 break; 2419 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2420 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2421 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2422 break; 2423 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2424 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2425 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2426 break; 2427 case MLXSW_REG_PPCNT_DISCARD_CNT: 2428 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2429 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2430 break; 2431 case MLXSW_REG_PPCNT_PRIO_CNT: 2432 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2433 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2434 break; 2435 case MLXSW_REG_PPCNT_TC_CNT: 2436 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2437 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2438 break; 2439 default: 2440 WARN_ON(1); 2441 return -EOPNOTSUPP; 2442 } 2443 return 0; 2444 } 2445 2446 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2447 enum mlxsw_reg_ppcnt_grp grp, int prio, 2448 u64 *data, int data_index) 2449 { 2450 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2451 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2452 struct mlxsw_sp_port_hw_stats *hw_stats; 2453 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2454 int i, len; 2455 int err; 2456 2457 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2458 if (err) 2459 return; 2460 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2461 for (i = 0; i < len; i++) { 2462 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2463 if (!hw_stats[i].cells_bytes) 2464 continue; 2465 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2466 data[data_index + i]); 2467 } 2468 } 2469 2470 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2471 struct ethtool_stats *stats, u64 *data) 2472 { 2473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2474 int i, data_index = 0; 2475 2476 /* IEEE 802.3 Counters */ 2477 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2478 data, data_index); 2479 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2480 2481 /* RFC 2863 Counters */ 2482 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2483 data, data_index); 2484 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2485 2486 /* RFC 2819 Counters */ 2487 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2488 data, data_index); 2489 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2490 2491 /* RFC 3635 Counters */ 2492 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2493 data, data_index); 2494 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2495 2496 /* Discard Counters */ 2497 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2498 data, data_index); 2499 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2500 2501 /* Per-Priority Counters */ 2502 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2503 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2504 data, data_index); 2505 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2506 } 2507 2508 /* Per-TC Counters */ 2509 for (i = 0; i < TC_MAX_QUEUE; i++) { 2510 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2511 data, data_index); 2512 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2513 } 2514 2515 /* PTP counters */ 2516 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2517 data, data_index); 2518 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2519 } 2520 2521 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2522 { 2523 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2524 2525 switch (sset) { 2526 case ETH_SS_STATS: 2527 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2528 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2529 default: 2530 return -EOPNOTSUPP; 2531 } 2532 } 2533 2534 struct mlxsw_sp1_port_link_mode { 2535 enum ethtool_link_mode_bit_indices mask_ethtool; 2536 u32 mask; 2537 u32 speed; 2538 }; 2539 2540 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2541 { 2542 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2543 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2544 .speed = SPEED_100, 2545 }, 2546 { 2547 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2548 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2549 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2550 .speed = SPEED_1000, 2551 }, 2552 { 2553 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2554 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2555 .speed = SPEED_10000, 2556 }, 2557 { 2558 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2559 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2560 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2561 .speed = SPEED_10000, 2562 }, 2563 { 2564 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2565 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2566 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2567 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2568 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2569 .speed = SPEED_10000, 2570 }, 2571 { 2572 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2573 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2574 .speed = SPEED_20000, 2575 }, 2576 { 2577 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2578 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2579 .speed = SPEED_40000, 2580 }, 2581 { 2582 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2583 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2584 .speed = SPEED_40000, 2585 }, 2586 { 2587 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2588 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2589 .speed = SPEED_40000, 2590 }, 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2594 .speed = SPEED_40000, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2598 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2599 .speed = SPEED_25000, 2600 }, 2601 { 2602 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2603 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2604 .speed = SPEED_25000, 2605 }, 2606 { 2607 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2609 .speed = SPEED_25000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2613 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2614 .speed = SPEED_50000, 2615 }, 2616 { 2617 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2619 .speed = SPEED_50000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2624 .speed = SPEED_50000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2629 .speed = SPEED_100000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2634 .speed = SPEED_100000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2639 .speed = SPEED_100000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2644 .speed = SPEED_100000, 2645 }, 2646 }; 2647 2648 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2649 2650 static void 2651 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2652 u32 ptys_eth_proto, 2653 struct ethtool_link_ksettings *cmd) 2654 { 2655 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2656 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2657 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2658 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2659 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2660 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2661 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2662 2663 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2664 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2665 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2666 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2667 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2668 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2669 } 2670 2671 static void 2672 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2673 u8 width, unsigned long *mode) 2674 { 2675 int i; 2676 2677 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2678 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2679 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2680 mode); 2681 } 2682 } 2683 2684 static u32 2685 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2686 { 2687 int i; 2688 2689 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2690 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2691 return mlxsw_sp1_port_link_mode[i].speed; 2692 } 2693 2694 return SPEED_UNKNOWN; 2695 } 2696 2697 static void 2698 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2699 u32 ptys_eth_proto, 2700 struct ethtool_link_ksettings *cmd) 2701 { 2702 cmd->base.speed = SPEED_UNKNOWN; 2703 cmd->base.duplex = DUPLEX_UNKNOWN; 2704 2705 if (!carrier_ok) 2706 return; 2707 2708 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2709 if (cmd->base.speed != SPEED_UNKNOWN) 2710 cmd->base.duplex = DUPLEX_FULL; 2711 } 2712 2713 static u32 2714 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2715 const struct ethtool_link_ksettings *cmd) 2716 { 2717 u32 ptys_proto = 0; 2718 int i; 2719 2720 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2721 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2722 cmd->link_modes.advertising)) 2723 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2724 } 2725 return ptys_proto; 2726 } 2727 2728 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2729 u32 speed) 2730 { 2731 u32 ptys_proto = 0; 2732 int i; 2733 2734 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2735 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2736 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2737 } 2738 return ptys_proto; 2739 } 2740 2741 static u32 2742 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2743 { 2744 u32 ptys_proto = 0; 2745 int i; 2746 2747 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2748 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2749 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2750 } 2751 return ptys_proto; 2752 } 2753 2754 static int 2755 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2756 u32 *base_speed) 2757 { 2758 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2759 return 0; 2760 } 2761 2762 static void 2763 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2764 u8 local_port, u32 proto_admin, bool autoneg) 2765 { 2766 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2767 } 2768 2769 static void 2770 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2771 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2772 u32 *p_eth_proto_oper) 2773 { 2774 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2775 p_eth_proto_oper); 2776 } 2777 2778 static const struct mlxsw_sp_port_type_speed_ops 2779 mlxsw_sp1_port_type_speed_ops = { 2780 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2781 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2782 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2783 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2784 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2785 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2786 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2787 .port_speed_base = mlxsw_sp1_port_speed_base, 2788 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2789 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2790 }; 2791 2792 static const enum ethtool_link_mode_bit_indices 2793 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2794 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2795 }; 2796 2797 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2798 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2799 2800 static const enum ethtool_link_mode_bit_indices 2801 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2802 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2803 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2804 }; 2805 2806 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2807 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2808 2809 static const enum ethtool_link_mode_bit_indices 2810 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2811 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2812 }; 2813 2814 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2815 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2816 2817 static const enum ethtool_link_mode_bit_indices 2818 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2819 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2820 }; 2821 2822 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2823 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2824 2825 static const enum ethtool_link_mode_bit_indices 2826 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2827 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2828 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2829 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2830 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2831 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2832 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2833 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2834 }; 2835 2836 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2837 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2838 2839 static const enum ethtool_link_mode_bit_indices 2840 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2841 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2842 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2843 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2844 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2845 }; 2846 2847 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2848 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2849 2850 static const enum ethtool_link_mode_bit_indices 2851 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2852 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2853 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2854 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2855 }; 2856 2857 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2858 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2859 2860 static const enum ethtool_link_mode_bit_indices 2861 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2862 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2863 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2864 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2865 }; 2866 2867 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2868 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2869 2870 static const enum ethtool_link_mode_bit_indices 2871 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2872 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2873 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2874 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2875 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2876 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2877 }; 2878 2879 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2880 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2881 2882 static const enum ethtool_link_mode_bit_indices 2883 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2884 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2885 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2886 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2887 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2888 }; 2889 2890 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2891 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2892 2893 static const enum ethtool_link_mode_bit_indices 2894 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2895 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2896 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2897 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2898 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2899 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2900 }; 2901 2902 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2903 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2904 2905 static const enum ethtool_link_mode_bit_indices 2906 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2907 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2908 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2909 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2910 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2911 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2912 }; 2913 2914 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2915 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2916 2917 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2918 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2919 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2920 2921 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2922 { 2923 switch (width) { 2924 case 1: 2925 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2926 case 2: 2927 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2928 case 4: 2929 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2930 default: 2931 WARN_ON_ONCE(1); 2932 return 0; 2933 } 2934 } 2935 2936 struct mlxsw_sp2_port_link_mode { 2937 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2938 int m_ethtool_len; 2939 u32 mask; 2940 u32 speed; 2941 u8 mask_width; 2942 }; 2943 2944 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2945 { 2946 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2947 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2948 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2949 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2950 MLXSW_SP_PORT_MASK_WIDTH_2X | 2951 MLXSW_SP_PORT_MASK_WIDTH_4X, 2952 .speed = SPEED_100, 2953 }, 2954 { 2955 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2956 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2957 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2958 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2959 MLXSW_SP_PORT_MASK_WIDTH_2X | 2960 MLXSW_SP_PORT_MASK_WIDTH_4X, 2961 .speed = SPEED_1000, 2962 }, 2963 { 2964 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2965 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2966 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2967 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2968 MLXSW_SP_PORT_MASK_WIDTH_2X | 2969 MLXSW_SP_PORT_MASK_WIDTH_4X, 2970 .speed = SPEED_2500, 2971 }, 2972 { 2973 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2974 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2975 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2976 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2977 MLXSW_SP_PORT_MASK_WIDTH_2X | 2978 MLXSW_SP_PORT_MASK_WIDTH_4X, 2979 .speed = SPEED_5000, 2980 }, 2981 { 2982 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2983 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2984 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2985 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2986 MLXSW_SP_PORT_MASK_WIDTH_2X | 2987 MLXSW_SP_PORT_MASK_WIDTH_4X, 2988 .speed = SPEED_10000, 2989 }, 2990 { 2991 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2992 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2993 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2994 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 2995 .speed = SPEED_40000, 2996 }, 2997 { 2998 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2999 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3000 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3001 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3002 MLXSW_SP_PORT_MASK_WIDTH_2X | 3003 MLXSW_SP_PORT_MASK_WIDTH_4X, 3004 .speed = SPEED_25000, 3005 }, 3006 { 3007 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3008 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3009 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3010 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3011 MLXSW_SP_PORT_MASK_WIDTH_4X, 3012 .speed = SPEED_50000, 3013 }, 3014 { 3015 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3016 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3017 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3018 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3019 .speed = SPEED_50000, 3020 }, 3021 { 3022 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3023 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3024 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3025 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 3026 .speed = SPEED_100000, 3027 }, 3028 { 3029 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3030 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3031 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3032 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3033 .speed = SPEED_100000, 3034 }, 3035 { 3036 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3037 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3038 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3039 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 3040 .speed = SPEED_200000, 3041 }, 3042 }; 3043 3044 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3045 3046 static void 3047 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3048 u32 ptys_eth_proto, 3049 struct ethtool_link_ksettings *cmd) 3050 { 3051 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3052 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3053 } 3054 3055 static void 3056 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3057 unsigned long *mode) 3058 { 3059 int i; 3060 3061 for (i = 0; i < link_mode->m_ethtool_len; i++) 3062 __set_bit(link_mode->mask_ethtool[i], mode); 3063 } 3064 3065 static void 3066 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3067 u8 width, unsigned long *mode) 3068 { 3069 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3070 int i; 3071 3072 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3073 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3074 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3075 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3076 mode); 3077 } 3078 } 3079 3080 static u32 3081 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3082 { 3083 int i; 3084 3085 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3086 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3087 return mlxsw_sp2_port_link_mode[i].speed; 3088 } 3089 3090 return SPEED_UNKNOWN; 3091 } 3092 3093 static void 3094 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3095 u32 ptys_eth_proto, 3096 struct ethtool_link_ksettings *cmd) 3097 { 3098 cmd->base.speed = SPEED_UNKNOWN; 3099 cmd->base.duplex = DUPLEX_UNKNOWN; 3100 3101 if (!carrier_ok) 3102 return; 3103 3104 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3105 if (cmd->base.speed != SPEED_UNKNOWN) 3106 cmd->base.duplex = DUPLEX_FULL; 3107 } 3108 3109 static bool 3110 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3111 const unsigned long *mode) 3112 { 3113 int cnt = 0; 3114 int i; 3115 3116 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3117 if (test_bit(link_mode->mask_ethtool[i], mode)) 3118 cnt++; 3119 } 3120 3121 return cnt == link_mode->m_ethtool_len; 3122 } 3123 3124 static u32 3125 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3126 const struct ethtool_link_ksettings *cmd) 3127 { 3128 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3129 u32 ptys_proto = 0; 3130 int i; 3131 3132 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3133 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3134 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3135 cmd->link_modes.advertising)) 3136 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3137 } 3138 return ptys_proto; 3139 } 3140 3141 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3142 u8 width, u32 speed) 3143 { 3144 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3145 u32 ptys_proto = 0; 3146 int i; 3147 3148 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3149 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3150 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3151 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3152 } 3153 return ptys_proto; 3154 } 3155 3156 static u32 3157 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3158 { 3159 u32 ptys_proto = 0; 3160 int i; 3161 3162 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3163 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3164 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3165 } 3166 return ptys_proto; 3167 } 3168 3169 static int 3170 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3171 u32 *base_speed) 3172 { 3173 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3174 u32 eth_proto_cap; 3175 int err; 3176 3177 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3178 * it from firmware. 3179 */ 3180 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3182 if (err) 3183 return err; 3184 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3185 3186 if (eth_proto_cap & 3187 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3188 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3189 return 0; 3190 } 3191 3192 if (eth_proto_cap & 3193 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3194 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3195 return 0; 3196 } 3197 3198 return -EIO; 3199 } 3200 3201 static void 3202 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3203 u8 local_port, u32 proto_admin, 3204 bool autoneg) 3205 { 3206 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3207 } 3208 3209 static void 3210 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3211 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3212 u32 *p_eth_proto_oper) 3213 { 3214 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3215 p_eth_proto_admin, p_eth_proto_oper); 3216 } 3217 3218 static const struct mlxsw_sp_port_type_speed_ops 3219 mlxsw_sp2_port_type_speed_ops = { 3220 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3221 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3222 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3223 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3224 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3225 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3226 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3227 .port_speed_base = mlxsw_sp2_port_speed_base, 3228 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3229 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3230 }; 3231 3232 static void 3233 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3234 u8 width, struct ethtool_link_ksettings *cmd) 3235 { 3236 const struct mlxsw_sp_port_type_speed_ops *ops; 3237 3238 ops = mlxsw_sp->port_type_speed_ops; 3239 3240 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3241 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3242 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3243 3244 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3245 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3246 cmd->link_modes.supported); 3247 } 3248 3249 static void 3250 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3251 u32 eth_proto_admin, bool autoneg, u8 width, 3252 struct ethtool_link_ksettings *cmd) 3253 { 3254 const struct mlxsw_sp_port_type_speed_ops *ops; 3255 3256 ops = mlxsw_sp->port_type_speed_ops; 3257 3258 if (!autoneg) 3259 return; 3260 3261 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3262 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3263 cmd->link_modes.advertising); 3264 } 3265 3266 static u8 3267 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3268 { 3269 switch (connector_type) { 3270 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3271 return PORT_OTHER; 3272 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3273 return PORT_NONE; 3274 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3275 return PORT_TP; 3276 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3277 return PORT_AUI; 3278 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3279 return PORT_BNC; 3280 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3281 return PORT_MII; 3282 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3283 return PORT_FIBRE; 3284 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3285 return PORT_DA; 3286 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3287 return PORT_OTHER; 3288 default: 3289 WARN_ON_ONCE(1); 3290 return PORT_OTHER; 3291 } 3292 } 3293 3294 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3295 struct ethtool_link_ksettings *cmd) 3296 { 3297 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3298 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3300 const struct mlxsw_sp_port_type_speed_ops *ops; 3301 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3302 u8 connector_type; 3303 bool autoneg; 3304 int err; 3305 3306 ops = mlxsw_sp->port_type_speed_ops; 3307 3308 autoneg = mlxsw_sp_port->link.autoneg; 3309 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3310 0, false); 3311 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3312 if (err) 3313 return err; 3314 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3315 ð_proto_admin, ð_proto_oper); 3316 3317 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3318 mlxsw_sp_port->mapping.width, cmd); 3319 3320 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3321 mlxsw_sp_port->mapping.width, cmd); 3322 3323 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3324 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3325 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3326 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3327 eth_proto_oper, cmd); 3328 3329 return 0; 3330 } 3331 3332 static int 3333 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3334 const struct ethtool_link_ksettings *cmd) 3335 { 3336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3338 const struct mlxsw_sp_port_type_speed_ops *ops; 3339 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3340 u32 eth_proto_cap, eth_proto_new; 3341 bool autoneg; 3342 int err; 3343 3344 ops = mlxsw_sp->port_type_speed_ops; 3345 3346 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3347 0, false); 3348 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3349 if (err) 3350 return err; 3351 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3352 3353 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3354 eth_proto_new = autoneg ? 3355 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3356 cmd) : 3357 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3358 cmd->base.speed); 3359 3360 eth_proto_new = eth_proto_new & eth_proto_cap; 3361 if (!eth_proto_new) { 3362 netdev_err(dev, "No supported speed requested\n"); 3363 return -EINVAL; 3364 } 3365 3366 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3367 eth_proto_new, autoneg); 3368 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3369 if (err) 3370 return err; 3371 3372 mlxsw_sp_port->link.autoneg = autoneg; 3373 3374 if (!netif_running(dev)) 3375 return 0; 3376 3377 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3378 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3379 3380 return 0; 3381 } 3382 3383 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3384 struct ethtool_modinfo *modinfo) 3385 { 3386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3388 int err; 3389 3390 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3391 mlxsw_sp_port->mapping.module, 3392 modinfo); 3393 3394 return err; 3395 } 3396 3397 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3398 struct ethtool_eeprom *ee, 3399 u8 *data) 3400 { 3401 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3402 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3403 int err; 3404 3405 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3406 mlxsw_sp_port->mapping.module, ee, 3407 data); 3408 3409 return err; 3410 } 3411 3412 static int 3413 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3414 { 3415 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3416 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3417 3418 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3419 } 3420 3421 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3422 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3423 .get_link = ethtool_op_get_link, 3424 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3425 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3426 .get_strings = mlxsw_sp_port_get_strings, 3427 .set_phys_id = mlxsw_sp_port_set_phys_id, 3428 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3429 .get_sset_count = mlxsw_sp_port_get_sset_count, 3430 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3431 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3432 .get_module_info = mlxsw_sp_get_module_info, 3433 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3434 .get_ts_info = mlxsw_sp_get_ts_info, 3435 }; 3436 3437 static int 3438 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3439 { 3440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3441 const struct mlxsw_sp_port_type_speed_ops *ops; 3442 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3443 u32 eth_proto_admin; 3444 u32 upper_speed; 3445 u32 base_speed; 3446 int err; 3447 3448 ops = mlxsw_sp->port_type_speed_ops; 3449 3450 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3451 &base_speed); 3452 if (err) 3453 return err; 3454 upper_speed = base_speed * width; 3455 3456 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3457 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3458 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3459 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3460 } 3461 3462 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3463 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3464 bool dwrr, u8 dwrr_weight) 3465 { 3466 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3467 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3468 3469 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3470 next_index); 3471 mlxsw_reg_qeec_de_set(qeec_pl, true); 3472 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3473 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3475 } 3476 3477 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3478 enum mlxsw_reg_qeec_hr hr, u8 index, 3479 u8 next_index, u32 maxrate) 3480 { 3481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3482 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3483 3484 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3485 next_index); 3486 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3487 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3488 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3489 } 3490 3491 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3492 enum mlxsw_reg_qeec_hr hr, u8 index, 3493 u8 next_index, u32 minrate) 3494 { 3495 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3496 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3497 3498 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3499 next_index); 3500 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3501 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3502 3503 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3504 } 3505 3506 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3507 u8 switch_prio, u8 tclass) 3508 { 3509 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3510 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3511 3512 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3513 tclass); 3514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3515 } 3516 3517 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3518 { 3519 int err, i; 3520 3521 /* Setup the elements hierarcy, so that each TC is linked to 3522 * one subgroup, which are all member in the same group. 3523 */ 3524 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3525 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3526 0); 3527 if (err) 3528 return err; 3529 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3530 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3531 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3532 0, false, 0); 3533 if (err) 3534 return err; 3535 } 3536 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3537 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3538 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3539 false, 0); 3540 if (err) 3541 return err; 3542 3543 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3544 MLXSW_REG_QEEC_HIERARCY_TC, 3545 i + 8, i, 3546 true, 100); 3547 if (err) 3548 return err; 3549 } 3550 3551 /* Make sure the max shaper is disabled in all hierarchies that support 3552 * it. Note that this disables ptps (PTP shaper), but that is intended 3553 * for the initial configuration. 3554 */ 3555 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3556 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3557 MLXSW_REG_QEEC_MAS_DIS); 3558 if (err) 3559 return err; 3560 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3561 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3562 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3563 i, 0, 3564 MLXSW_REG_QEEC_MAS_DIS); 3565 if (err) 3566 return err; 3567 } 3568 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3569 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3570 MLXSW_REG_QEEC_HIERARCY_TC, 3571 i, i, 3572 MLXSW_REG_QEEC_MAS_DIS); 3573 if (err) 3574 return err; 3575 3576 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3577 MLXSW_REG_QEEC_HIERARCY_TC, 3578 i + 8, i, 3579 MLXSW_REG_QEEC_MAS_DIS); 3580 if (err) 3581 return err; 3582 } 3583 3584 /* Configure the min shaper for multicast TCs. */ 3585 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3586 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3587 MLXSW_REG_QEEC_HIERARCY_TC, 3588 i + 8, i, 3589 MLXSW_REG_QEEC_MIS_MIN); 3590 if (err) 3591 return err; 3592 } 3593 3594 /* Map all priorities to traffic class 0. */ 3595 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3596 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3597 if (err) 3598 return err; 3599 } 3600 3601 return 0; 3602 } 3603 3604 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3605 bool enable) 3606 { 3607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3608 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3609 3610 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3612 } 3613 3614 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3615 bool split, u8 module, u8 width, u8 lane) 3616 { 3617 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3618 struct mlxsw_sp_port *mlxsw_sp_port; 3619 struct net_device *dev; 3620 int err; 3621 3622 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3623 module + 1, split, lane / width, 3624 mlxsw_sp->base_mac, 3625 sizeof(mlxsw_sp->base_mac)); 3626 if (err) { 3627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3628 local_port); 3629 return err; 3630 } 3631 3632 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3633 if (!dev) { 3634 err = -ENOMEM; 3635 goto err_alloc_etherdev; 3636 } 3637 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3638 mlxsw_sp_port = netdev_priv(dev); 3639 mlxsw_sp_port->dev = dev; 3640 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3641 mlxsw_sp_port->local_port = local_port; 3642 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3643 mlxsw_sp_port->split = split; 3644 mlxsw_sp_port->mapping.module = module; 3645 mlxsw_sp_port->mapping.width = width; 3646 mlxsw_sp_port->mapping.lane = lane; 3647 mlxsw_sp_port->link.autoneg = 1; 3648 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3649 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3650 3651 mlxsw_sp_port->pcpu_stats = 3652 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3653 if (!mlxsw_sp_port->pcpu_stats) { 3654 err = -ENOMEM; 3655 goto err_alloc_stats; 3656 } 3657 3658 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3659 GFP_KERNEL); 3660 if (!mlxsw_sp_port->sample) { 3661 err = -ENOMEM; 3662 goto err_alloc_sample; 3663 } 3664 3665 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3666 &update_stats_cache); 3667 3668 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3669 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3670 3671 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3672 if (err) { 3673 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3674 mlxsw_sp_port->local_port); 3675 goto err_port_module_map; 3676 } 3677 3678 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3679 if (err) { 3680 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3681 mlxsw_sp_port->local_port); 3682 goto err_port_swid_set; 3683 } 3684 3685 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3686 if (err) { 3687 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3688 mlxsw_sp_port->local_port); 3689 goto err_dev_addr_init; 3690 } 3691 3692 netif_carrier_off(dev); 3693 3694 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3695 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3696 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3697 3698 dev->min_mtu = 0; 3699 dev->max_mtu = ETH_MAX_MTU; 3700 3701 /* Each packet needs to have a Tx header (metadata) on top all other 3702 * headers. 3703 */ 3704 dev->needed_headroom = MLXSW_TXHDR_LEN; 3705 3706 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3707 if (err) { 3708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3709 mlxsw_sp_port->local_port); 3710 goto err_port_system_port_mapping_set; 3711 } 3712 3713 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3714 if (err) { 3715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3716 mlxsw_sp_port->local_port); 3717 goto err_port_speed_by_width_set; 3718 } 3719 3720 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3723 mlxsw_sp_port->local_port); 3724 goto err_port_mtu_set; 3725 } 3726 3727 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3728 if (err) 3729 goto err_port_admin_status_set; 3730 3731 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3732 if (err) { 3733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3734 mlxsw_sp_port->local_port); 3735 goto err_port_buffers_init; 3736 } 3737 3738 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3739 if (err) { 3740 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3741 mlxsw_sp_port->local_port); 3742 goto err_port_ets_init; 3743 } 3744 3745 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3746 if (err) { 3747 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3748 mlxsw_sp_port->local_port); 3749 goto err_port_tc_mc_mode; 3750 } 3751 3752 /* ETS and buffers must be initialized before DCB. */ 3753 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3754 if (err) { 3755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3756 mlxsw_sp_port->local_port); 3757 goto err_port_dcb_init; 3758 } 3759 3760 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3761 if (err) { 3762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3763 mlxsw_sp_port->local_port); 3764 goto err_port_fids_init; 3765 } 3766 3767 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3768 if (err) { 3769 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3770 mlxsw_sp_port->local_port); 3771 goto err_port_qdiscs_init; 3772 } 3773 3774 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3775 if (err) { 3776 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3777 mlxsw_sp_port->local_port); 3778 goto err_port_nve_init; 3779 } 3780 3781 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3782 if (err) { 3783 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3784 mlxsw_sp_port->local_port); 3785 goto err_port_pvid_set; 3786 } 3787 3788 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3789 MLXSW_SP_DEFAULT_VID); 3790 if (IS_ERR(mlxsw_sp_port_vlan)) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3792 mlxsw_sp_port->local_port); 3793 err = PTR_ERR(mlxsw_sp_port_vlan); 3794 goto err_port_vlan_create; 3795 } 3796 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3797 3798 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3799 mlxsw_sp->ptp_ops->shaper_work); 3800 3801 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3802 err = register_netdev(dev); 3803 if (err) { 3804 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3805 mlxsw_sp_port->local_port); 3806 goto err_register_netdev; 3807 } 3808 3809 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3810 mlxsw_sp_port, dev); 3811 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3812 return 0; 3813 3814 err_register_netdev: 3815 mlxsw_sp->ports[local_port] = NULL; 3816 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3817 err_port_vlan_create: 3818 err_port_pvid_set: 3819 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3820 err_port_nve_init: 3821 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3822 err_port_qdiscs_init: 3823 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3824 err_port_fids_init: 3825 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3826 err_port_dcb_init: 3827 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3828 err_port_tc_mc_mode: 3829 err_port_ets_init: 3830 err_port_buffers_init: 3831 err_port_admin_status_set: 3832 err_port_mtu_set: 3833 err_port_speed_by_width_set: 3834 err_port_system_port_mapping_set: 3835 err_dev_addr_init: 3836 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3837 err_port_swid_set: 3838 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3839 err_port_module_map: 3840 kfree(mlxsw_sp_port->sample); 3841 err_alloc_sample: 3842 free_percpu(mlxsw_sp_port->pcpu_stats); 3843 err_alloc_stats: 3844 free_netdev(dev); 3845 err_alloc_etherdev: 3846 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3847 return err; 3848 } 3849 3850 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3851 { 3852 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3853 3854 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3855 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3856 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3857 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3858 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3859 mlxsw_sp->ports[local_port] = NULL; 3860 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3861 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3862 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3863 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3864 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3865 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3866 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3867 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3868 kfree(mlxsw_sp_port->sample); 3869 free_percpu(mlxsw_sp_port->pcpu_stats); 3870 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3871 free_netdev(mlxsw_sp_port->dev); 3872 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3873 } 3874 3875 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3876 { 3877 struct mlxsw_sp_port *mlxsw_sp_port; 3878 int err; 3879 3880 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3881 if (!mlxsw_sp_port) 3882 return -ENOMEM; 3883 3884 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3885 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3886 3887 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3888 mlxsw_sp_port, 3889 mlxsw_sp->base_mac, 3890 sizeof(mlxsw_sp->base_mac)); 3891 if (err) { 3892 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3893 goto err_core_cpu_port_init; 3894 } 3895 3896 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3897 return 0; 3898 3899 err_core_cpu_port_init: 3900 kfree(mlxsw_sp_port); 3901 return err; 3902 } 3903 3904 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3905 { 3906 struct mlxsw_sp_port *mlxsw_sp_port = 3907 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3908 3909 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3910 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3911 kfree(mlxsw_sp_port); 3912 } 3913 3914 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3915 { 3916 return mlxsw_sp->ports[local_port] != NULL; 3917 } 3918 3919 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3920 { 3921 int i; 3922 3923 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3924 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3925 mlxsw_sp_port_remove(mlxsw_sp, i); 3926 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3927 kfree(mlxsw_sp->port_to_module); 3928 kfree(mlxsw_sp->ports); 3929 } 3930 3931 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3932 { 3933 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3934 u8 module, width, lane; 3935 size_t alloc_size; 3936 int i; 3937 int err; 3938 3939 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3940 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3941 if (!mlxsw_sp->ports) 3942 return -ENOMEM; 3943 3944 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3945 GFP_KERNEL); 3946 if (!mlxsw_sp->port_to_module) { 3947 err = -ENOMEM; 3948 goto err_port_to_module_alloc; 3949 } 3950 3951 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 3952 if (err) 3953 goto err_cpu_port_create; 3954 3955 for (i = 1; i < max_ports; i++) { 3956 /* Mark as invalid */ 3957 mlxsw_sp->port_to_module[i] = -1; 3958 3959 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3960 &width, &lane); 3961 if (err) 3962 goto err_port_module_info_get; 3963 if (!width) 3964 continue; 3965 mlxsw_sp->port_to_module[i] = module; 3966 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3967 module, width, lane); 3968 if (err) 3969 goto err_port_create; 3970 } 3971 return 0; 3972 3973 err_port_create: 3974 err_port_module_info_get: 3975 for (i--; i >= 1; i--) 3976 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3977 mlxsw_sp_port_remove(mlxsw_sp, i); 3978 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3979 err_cpu_port_create: 3980 kfree(mlxsw_sp->port_to_module); 3981 err_port_to_module_alloc: 3982 kfree(mlxsw_sp->ports); 3983 return err; 3984 } 3985 3986 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3987 { 3988 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3989 3990 return local_port - offset; 3991 } 3992 3993 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3994 u8 module, unsigned int count, u8 offset) 3995 { 3996 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3997 int err, i; 3998 3999 for (i = 0; i < count; i++) { 4000 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4001 true, module, width, i * width); 4002 if (err) 4003 goto err_port_create; 4004 } 4005 4006 return 0; 4007 4008 err_port_create: 4009 for (i--; i >= 0; i--) 4010 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4011 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4012 return err; 4013 } 4014 4015 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4016 u8 base_port, unsigned int count) 4017 { 4018 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 4019 int i; 4020 4021 /* Split by four means we need to re-create two ports, otherwise 4022 * only one. 4023 */ 4024 count = count / 2; 4025 4026 for (i = 0; i < count; i++) { 4027 local_port = base_port + i * 2; 4028 if (mlxsw_sp->port_to_module[local_port] < 0) 4029 continue; 4030 module = mlxsw_sp->port_to_module[local_port]; 4031 4032 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 4033 width, 0); 4034 } 4035 } 4036 4037 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4038 unsigned int count, 4039 struct netlink_ext_ack *extack) 4040 { 4041 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4042 u8 local_ports_in_1x, local_ports_in_2x, offset; 4043 struct mlxsw_sp_port *mlxsw_sp_port; 4044 u8 module, cur_width, base_port; 4045 int i; 4046 int err; 4047 4048 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4049 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4050 return -EIO; 4051 4052 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4053 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4054 4055 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4056 if (!mlxsw_sp_port) { 4057 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4058 local_port); 4059 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4060 return -EINVAL; 4061 } 4062 4063 module = mlxsw_sp_port->mapping.module; 4064 cur_width = mlxsw_sp_port->mapping.width; 4065 4066 if (count != 2 && count != 4) { 4067 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 4068 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 4069 return -EINVAL; 4070 } 4071 4072 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 4073 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4074 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4075 return -EINVAL; 4076 } 4077 4078 /* Make sure we have enough slave (even) ports for the split. */ 4079 if (count == 2) { 4080 offset = local_ports_in_2x; 4081 base_port = local_port; 4082 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 4083 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4084 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4085 return -EINVAL; 4086 } 4087 } else { 4088 offset = local_ports_in_1x; 4089 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4090 if (mlxsw_sp->ports[base_port + 1] || 4091 mlxsw_sp->ports[base_port + 3]) { 4092 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4093 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4094 return -EINVAL; 4095 } 4096 } 4097 4098 for (i = 0; i < count; i++) 4099 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4100 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4101 4102 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4103 offset); 4104 if (err) { 4105 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4106 goto err_port_split_create; 4107 } 4108 4109 return 0; 4110 4111 err_port_split_create: 4112 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4113 return err; 4114 } 4115 4116 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4117 struct netlink_ext_ack *extack) 4118 { 4119 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4120 u8 local_ports_in_1x, local_ports_in_2x, offset; 4121 struct mlxsw_sp_port *mlxsw_sp_port; 4122 u8 cur_width, base_port; 4123 unsigned int count; 4124 int i; 4125 4126 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4127 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4128 return -EIO; 4129 4130 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4131 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4132 4133 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4134 if (!mlxsw_sp_port) { 4135 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4136 local_port); 4137 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4138 return -EINVAL; 4139 } 4140 4141 if (!mlxsw_sp_port->split) { 4142 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4143 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4144 return -EINVAL; 4145 } 4146 4147 cur_width = mlxsw_sp_port->mapping.width; 4148 count = cur_width == 1 ? 4 : 2; 4149 4150 if (count == 2) 4151 offset = local_ports_in_2x; 4152 else 4153 offset = local_ports_in_1x; 4154 4155 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4156 4157 /* Determine which ports to remove. */ 4158 if (count == 2 && local_port >= base_port + 2) 4159 base_port = base_port + 2; 4160 4161 for (i = 0; i < count; i++) 4162 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4163 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4164 4165 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4166 4167 return 0; 4168 } 4169 4170 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4171 char *pude_pl, void *priv) 4172 { 4173 struct mlxsw_sp *mlxsw_sp = priv; 4174 struct mlxsw_sp_port *mlxsw_sp_port; 4175 enum mlxsw_reg_pude_oper_status status; 4176 u8 local_port; 4177 4178 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4179 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4180 if (!mlxsw_sp_port) 4181 return; 4182 4183 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4184 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4185 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4186 netif_carrier_on(mlxsw_sp_port->dev); 4187 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4188 } else { 4189 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4190 netif_carrier_off(mlxsw_sp_port->dev); 4191 } 4192 } 4193 4194 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4195 char *mtpptr_pl, bool ingress) 4196 { 4197 u8 local_port; 4198 u8 num_rec; 4199 int i; 4200 4201 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4202 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4203 for (i = 0; i < num_rec; i++) { 4204 u8 domain_number; 4205 u8 message_type; 4206 u16 sequence_id; 4207 u64 timestamp; 4208 4209 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4210 &domain_number, &sequence_id, 4211 ×tamp); 4212 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4213 message_type, domain_number, 4214 sequence_id, timestamp); 4215 } 4216 } 4217 4218 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4219 char *mtpptr_pl, void *priv) 4220 { 4221 struct mlxsw_sp *mlxsw_sp = priv; 4222 4223 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4224 } 4225 4226 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4227 char *mtpptr_pl, void *priv) 4228 { 4229 struct mlxsw_sp *mlxsw_sp = priv; 4230 4231 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4232 } 4233 4234 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4235 u8 local_port, void *priv) 4236 { 4237 struct mlxsw_sp *mlxsw_sp = priv; 4238 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4239 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4240 4241 if (unlikely(!mlxsw_sp_port)) { 4242 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4243 local_port); 4244 return; 4245 } 4246 4247 skb->dev = mlxsw_sp_port->dev; 4248 4249 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4250 u64_stats_update_begin(&pcpu_stats->syncp); 4251 pcpu_stats->rx_packets++; 4252 pcpu_stats->rx_bytes += skb->len; 4253 u64_stats_update_end(&pcpu_stats->syncp); 4254 4255 skb->protocol = eth_type_trans(skb, skb->dev); 4256 netif_receive_skb(skb); 4257 } 4258 4259 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4260 void *priv) 4261 { 4262 skb->offload_fwd_mark = 1; 4263 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4264 } 4265 4266 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4267 u8 local_port, void *priv) 4268 { 4269 skb->offload_l3_fwd_mark = 1; 4270 skb->offload_fwd_mark = 1; 4271 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4272 } 4273 4274 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4275 void *priv) 4276 { 4277 struct mlxsw_sp *mlxsw_sp = priv; 4278 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4279 struct psample_group *psample_group; 4280 u32 size; 4281 4282 if (unlikely(!mlxsw_sp_port)) { 4283 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4284 local_port); 4285 goto out; 4286 } 4287 if (unlikely(!mlxsw_sp_port->sample)) { 4288 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4289 local_port); 4290 goto out; 4291 } 4292 4293 size = mlxsw_sp_port->sample->truncate ? 4294 mlxsw_sp_port->sample->trunc_size : skb->len; 4295 4296 rcu_read_lock(); 4297 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4298 if (!psample_group) 4299 goto out_unlock; 4300 psample_sample_packet(psample_group, skb, size, 4301 mlxsw_sp_port->dev->ifindex, 0, 4302 mlxsw_sp_port->sample->rate); 4303 out_unlock: 4304 rcu_read_unlock(); 4305 out: 4306 consume_skb(skb); 4307 } 4308 4309 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4310 void *priv) 4311 { 4312 struct mlxsw_sp *mlxsw_sp = priv; 4313 4314 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4315 } 4316 4317 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4318 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4319 _is_ctrl, SP_##_trap_group, DISCARD) 4320 4321 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4322 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4323 _is_ctrl, SP_##_trap_group, DISCARD) 4324 4325 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4326 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4327 _is_ctrl, SP_##_trap_group, DISCARD) 4328 4329 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4330 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4331 4332 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4333 /* Events */ 4334 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4335 /* L2 traps */ 4336 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4337 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4338 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4339 false, SP_LLDP, DISCARD), 4340 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4341 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4342 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4343 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4344 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4345 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4346 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4347 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4348 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4349 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4350 false), 4351 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4352 false), 4353 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4354 false), 4355 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4356 false), 4357 /* L3 traps */ 4358 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4359 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4360 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4361 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4362 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4363 false), 4364 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4365 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4366 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4367 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4368 false), 4369 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4370 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4371 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4372 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4373 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4374 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4375 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4376 false), 4377 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4378 false), 4379 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4380 false), 4381 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4382 false), 4383 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4384 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4385 false), 4386 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4387 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4388 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4389 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4390 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4391 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4392 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4393 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4394 /* PKT Sample trap */ 4395 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4396 false, SP_IP2ME, DISCARD), 4397 /* ACL trap */ 4398 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4399 /* Multicast Router Traps */ 4400 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4401 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4402 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4403 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4404 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4405 /* NVE traps */ 4406 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4407 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4408 /* PTP traps */ 4409 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4410 false, SP_PTP0, DISCARD), 4411 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4412 }; 4413 4414 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4415 /* Events */ 4416 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4417 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4418 }; 4419 4420 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4421 { 4422 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4423 enum mlxsw_reg_qpcr_ir_units ir_units; 4424 int max_cpu_policers; 4425 bool is_bytes; 4426 u8 burst_size; 4427 u32 rate; 4428 int i, err; 4429 4430 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4431 return -EIO; 4432 4433 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4434 4435 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4436 for (i = 0; i < max_cpu_policers; i++) { 4437 is_bytes = false; 4438 switch (i) { 4439 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4440 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4441 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4446 rate = 128; 4447 burst_size = 7; 4448 break; 4449 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4450 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4451 rate = 16 * 1024; 4452 burst_size = 10; 4453 break; 4454 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4455 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4456 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4457 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4458 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4459 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4460 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4461 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4462 rate = 1024; 4463 burst_size = 7; 4464 break; 4465 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4466 rate = 1024; 4467 burst_size = 7; 4468 break; 4469 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4470 rate = 24 * 1024; 4471 burst_size = 12; 4472 break; 4473 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4474 rate = 19 * 1024; 4475 burst_size = 12; 4476 break; 4477 default: 4478 continue; 4479 } 4480 4481 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4482 burst_size); 4483 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4484 if (err) 4485 return err; 4486 } 4487 4488 return 0; 4489 } 4490 4491 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4492 { 4493 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4494 enum mlxsw_reg_htgt_trap_group i; 4495 int max_cpu_policers; 4496 int max_trap_groups; 4497 u8 priority, tc; 4498 u16 policer_id; 4499 int err; 4500 4501 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4502 return -EIO; 4503 4504 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4505 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4506 4507 for (i = 0; i < max_trap_groups; i++) { 4508 policer_id = i; 4509 switch (i) { 4510 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4511 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4512 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4513 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4514 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4515 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4516 priority = 5; 4517 tc = 5; 4518 break; 4519 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4520 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4521 priority = 4; 4522 tc = 4; 4523 break; 4524 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4525 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4526 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4527 priority = 3; 4528 tc = 3; 4529 break; 4530 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4531 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4532 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4533 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4534 priority = 2; 4535 tc = 2; 4536 break; 4537 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4538 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4539 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4540 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4542 priority = 1; 4543 tc = 1; 4544 break; 4545 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4546 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4547 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4548 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4549 break; 4550 default: 4551 continue; 4552 } 4553 4554 if (max_cpu_policers <= policer_id && 4555 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4556 return -EIO; 4557 4558 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4559 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4560 if (err) 4561 return err; 4562 } 4563 4564 return 0; 4565 } 4566 4567 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4568 const struct mlxsw_listener listeners[], 4569 size_t listeners_count) 4570 { 4571 int i; 4572 int err; 4573 4574 for (i = 0; i < listeners_count; i++) { 4575 err = mlxsw_core_trap_register(mlxsw_sp->core, 4576 &listeners[i], 4577 mlxsw_sp); 4578 if (err) 4579 goto err_listener_register; 4580 4581 } 4582 return 0; 4583 4584 err_listener_register: 4585 for (i--; i >= 0; i--) { 4586 mlxsw_core_trap_unregister(mlxsw_sp->core, 4587 &listeners[i], 4588 mlxsw_sp); 4589 } 4590 return err; 4591 } 4592 4593 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4594 const struct mlxsw_listener listeners[], 4595 size_t listeners_count) 4596 { 4597 int i; 4598 4599 for (i = 0; i < listeners_count; i++) { 4600 mlxsw_core_trap_unregister(mlxsw_sp->core, 4601 &listeners[i], 4602 mlxsw_sp); 4603 } 4604 } 4605 4606 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4607 { 4608 int err; 4609 4610 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4611 if (err) 4612 return err; 4613 4614 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4615 if (err) 4616 return err; 4617 4618 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4619 ARRAY_SIZE(mlxsw_sp_listener)); 4620 if (err) 4621 return err; 4622 4623 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4624 mlxsw_sp->listeners_count); 4625 if (err) 4626 goto err_extra_traps_init; 4627 4628 return 0; 4629 4630 err_extra_traps_init: 4631 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4632 ARRAY_SIZE(mlxsw_sp_listener)); 4633 return err; 4634 } 4635 4636 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4637 { 4638 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4639 mlxsw_sp->listeners_count); 4640 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4641 ARRAY_SIZE(mlxsw_sp_listener)); 4642 } 4643 4644 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4645 4646 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4647 { 4648 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4649 u32 seed; 4650 int err; 4651 4652 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4653 MLXSW_SP_LAG_SEED_INIT); 4654 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4655 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4656 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4657 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4658 MLXSW_REG_SLCR_LAG_HASH_SIP | 4659 MLXSW_REG_SLCR_LAG_HASH_DIP | 4660 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4661 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4662 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4663 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4664 if (err) 4665 return err; 4666 4667 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4668 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4669 return -EIO; 4670 4671 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4672 sizeof(struct mlxsw_sp_upper), 4673 GFP_KERNEL); 4674 if (!mlxsw_sp->lags) 4675 return -ENOMEM; 4676 4677 return 0; 4678 } 4679 4680 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4681 { 4682 kfree(mlxsw_sp->lags); 4683 } 4684 4685 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4686 { 4687 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4688 4689 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4690 MLXSW_REG_HTGT_INVALID_POLICER, 4691 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4692 MLXSW_REG_HTGT_DEFAULT_TC); 4693 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4694 } 4695 4696 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4697 .clock_init = mlxsw_sp1_ptp_clock_init, 4698 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4699 .init = mlxsw_sp1_ptp_init, 4700 .fini = mlxsw_sp1_ptp_fini, 4701 .receive = mlxsw_sp1_ptp_receive, 4702 .transmitted = mlxsw_sp1_ptp_transmitted, 4703 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4704 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4705 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4706 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4707 .get_stats_count = mlxsw_sp1_get_stats_count, 4708 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4709 .get_stats = mlxsw_sp1_get_stats, 4710 }; 4711 4712 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4713 .clock_init = mlxsw_sp2_ptp_clock_init, 4714 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4715 .init = mlxsw_sp2_ptp_init, 4716 .fini = mlxsw_sp2_ptp_fini, 4717 .receive = mlxsw_sp2_ptp_receive, 4718 .transmitted = mlxsw_sp2_ptp_transmitted, 4719 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4720 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4721 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4722 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4723 .get_stats_count = mlxsw_sp2_get_stats_count, 4724 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4725 .get_stats = mlxsw_sp2_get_stats, 4726 }; 4727 4728 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4729 unsigned long event, void *ptr); 4730 4731 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4732 const struct mlxsw_bus_info *mlxsw_bus_info) 4733 { 4734 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4735 int err; 4736 4737 mlxsw_sp->core = mlxsw_core; 4738 mlxsw_sp->bus_info = mlxsw_bus_info; 4739 4740 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4741 if (err) 4742 return err; 4743 4744 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4745 if (err) { 4746 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4747 return err; 4748 } 4749 4750 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4751 if (err) { 4752 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4753 return err; 4754 } 4755 4756 err = mlxsw_sp_fids_init(mlxsw_sp); 4757 if (err) { 4758 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4759 goto err_fids_init; 4760 } 4761 4762 err = mlxsw_sp_traps_init(mlxsw_sp); 4763 if (err) { 4764 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4765 goto err_traps_init; 4766 } 4767 4768 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4769 if (err) { 4770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4771 goto err_devlink_traps_init; 4772 } 4773 4774 err = mlxsw_sp_buffers_init(mlxsw_sp); 4775 if (err) { 4776 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4777 goto err_buffers_init; 4778 } 4779 4780 err = mlxsw_sp_lag_init(mlxsw_sp); 4781 if (err) { 4782 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4783 goto err_lag_init; 4784 } 4785 4786 /* Initialize SPAN before router and switchdev, so that those components 4787 * can call mlxsw_sp_span_respin(). 4788 */ 4789 err = mlxsw_sp_span_init(mlxsw_sp); 4790 if (err) { 4791 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4792 goto err_span_init; 4793 } 4794 4795 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4796 if (err) { 4797 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4798 goto err_switchdev_init; 4799 } 4800 4801 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4802 if (err) { 4803 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4804 goto err_counter_pool_init; 4805 } 4806 4807 err = mlxsw_sp_afa_init(mlxsw_sp); 4808 if (err) { 4809 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4810 goto err_afa_init; 4811 } 4812 4813 err = mlxsw_sp_nve_init(mlxsw_sp); 4814 if (err) { 4815 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4816 goto err_nve_init; 4817 } 4818 4819 err = mlxsw_sp_acl_init(mlxsw_sp); 4820 if (err) { 4821 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4822 goto err_acl_init; 4823 } 4824 4825 err = mlxsw_sp_router_init(mlxsw_sp); 4826 if (err) { 4827 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4828 goto err_router_init; 4829 } 4830 4831 if (mlxsw_sp->bus_info->read_frc_capable) { 4832 /* NULL is a valid return value from clock_init */ 4833 mlxsw_sp->clock = 4834 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4835 mlxsw_sp->bus_info->dev); 4836 if (IS_ERR(mlxsw_sp->clock)) { 4837 err = PTR_ERR(mlxsw_sp->clock); 4838 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4839 goto err_ptp_clock_init; 4840 } 4841 } 4842 4843 if (mlxsw_sp->clock) { 4844 /* NULL is a valid return value from ptp_ops->init */ 4845 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4846 if (IS_ERR(mlxsw_sp->ptp_state)) { 4847 err = PTR_ERR(mlxsw_sp->ptp_state); 4848 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4849 goto err_ptp_init; 4850 } 4851 } 4852 4853 /* Initialize netdevice notifier after router and SPAN is initialized, 4854 * so that the event handler can use router structures and call SPAN 4855 * respin. 4856 */ 4857 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4858 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4859 if (err) { 4860 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4861 goto err_netdev_notifier; 4862 } 4863 4864 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4865 if (err) { 4866 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4867 goto err_dpipe_init; 4868 } 4869 4870 err = mlxsw_sp_ports_create(mlxsw_sp); 4871 if (err) { 4872 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4873 goto err_ports_create; 4874 } 4875 4876 return 0; 4877 4878 err_ports_create: 4879 mlxsw_sp_dpipe_fini(mlxsw_sp); 4880 err_dpipe_init: 4881 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4882 err_netdev_notifier: 4883 if (mlxsw_sp->clock) 4884 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4885 err_ptp_init: 4886 if (mlxsw_sp->clock) 4887 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4888 err_ptp_clock_init: 4889 mlxsw_sp_router_fini(mlxsw_sp); 4890 err_router_init: 4891 mlxsw_sp_acl_fini(mlxsw_sp); 4892 err_acl_init: 4893 mlxsw_sp_nve_fini(mlxsw_sp); 4894 err_nve_init: 4895 mlxsw_sp_afa_fini(mlxsw_sp); 4896 err_afa_init: 4897 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4898 err_counter_pool_init: 4899 mlxsw_sp_switchdev_fini(mlxsw_sp); 4900 err_switchdev_init: 4901 mlxsw_sp_span_fini(mlxsw_sp); 4902 err_span_init: 4903 mlxsw_sp_lag_fini(mlxsw_sp); 4904 err_lag_init: 4905 mlxsw_sp_buffers_fini(mlxsw_sp); 4906 err_buffers_init: 4907 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4908 err_devlink_traps_init: 4909 mlxsw_sp_traps_fini(mlxsw_sp); 4910 err_traps_init: 4911 mlxsw_sp_fids_fini(mlxsw_sp); 4912 err_fids_init: 4913 mlxsw_sp_kvdl_fini(mlxsw_sp); 4914 return err; 4915 } 4916 4917 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4918 const struct mlxsw_bus_info *mlxsw_bus_info) 4919 { 4920 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4921 4922 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4923 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4924 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4925 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4926 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4927 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4928 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4929 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4930 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4931 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4932 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4933 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4934 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4935 mlxsw_sp->listeners = mlxsw_sp1_listener; 4936 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4937 4938 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4939 } 4940 4941 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4942 const struct mlxsw_bus_info *mlxsw_bus_info) 4943 { 4944 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4945 4946 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4947 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4948 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4949 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4950 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4951 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4952 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4953 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4954 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4955 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4956 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4957 4958 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4959 } 4960 4961 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4962 { 4963 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4964 4965 mlxsw_sp_ports_remove(mlxsw_sp); 4966 mlxsw_sp_dpipe_fini(mlxsw_sp); 4967 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4968 if (mlxsw_sp->clock) { 4969 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4970 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4971 } 4972 mlxsw_sp_router_fini(mlxsw_sp); 4973 mlxsw_sp_acl_fini(mlxsw_sp); 4974 mlxsw_sp_nve_fini(mlxsw_sp); 4975 mlxsw_sp_afa_fini(mlxsw_sp); 4976 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4977 mlxsw_sp_switchdev_fini(mlxsw_sp); 4978 mlxsw_sp_span_fini(mlxsw_sp); 4979 mlxsw_sp_lag_fini(mlxsw_sp); 4980 mlxsw_sp_buffers_fini(mlxsw_sp); 4981 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4982 mlxsw_sp_traps_fini(mlxsw_sp); 4983 mlxsw_sp_fids_fini(mlxsw_sp); 4984 mlxsw_sp_kvdl_fini(mlxsw_sp); 4985 } 4986 4987 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4988 * 802.1Q FIDs 4989 */ 4990 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4991 VLAN_VID_MASK - 1) 4992 4993 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4994 .used_max_mid = 1, 4995 .max_mid = MLXSW_SP_MID_MAX, 4996 .used_flood_tables = 1, 4997 .used_flood_mode = 1, 4998 .flood_mode = 3, 4999 .max_fid_flood_tables = 3, 5000 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5001 .used_max_ib_mc = 1, 5002 .max_ib_mc = 0, 5003 .used_max_pkey = 1, 5004 .max_pkey = 0, 5005 .used_kvd_sizes = 1, 5006 .kvd_hash_single_parts = 59, 5007 .kvd_hash_double_parts = 41, 5008 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5009 .swid_config = { 5010 { 5011 .used_type = 1, 5012 .type = MLXSW_PORT_SWID_TYPE_ETH, 5013 } 5014 }, 5015 }; 5016 5017 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5018 .used_max_mid = 1, 5019 .max_mid = MLXSW_SP_MID_MAX, 5020 .used_flood_tables = 1, 5021 .used_flood_mode = 1, 5022 .flood_mode = 3, 5023 .max_fid_flood_tables = 3, 5024 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5025 .used_max_ib_mc = 1, 5026 .max_ib_mc = 0, 5027 .used_max_pkey = 1, 5028 .max_pkey = 0, 5029 .swid_config = { 5030 { 5031 .used_type = 1, 5032 .type = MLXSW_PORT_SWID_TYPE_ETH, 5033 } 5034 }, 5035 }; 5036 5037 static void 5038 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5039 struct devlink_resource_size_params *kvd_size_params, 5040 struct devlink_resource_size_params *linear_size_params, 5041 struct devlink_resource_size_params *hash_double_size_params, 5042 struct devlink_resource_size_params *hash_single_size_params) 5043 { 5044 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5045 KVD_SINGLE_MIN_SIZE); 5046 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5047 KVD_DOUBLE_MIN_SIZE); 5048 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5049 u32 linear_size_min = 0; 5050 5051 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5052 MLXSW_SP_KVD_GRANULARITY, 5053 DEVLINK_RESOURCE_UNIT_ENTRY); 5054 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5055 kvd_size - single_size_min - 5056 double_size_min, 5057 MLXSW_SP_KVD_GRANULARITY, 5058 DEVLINK_RESOURCE_UNIT_ENTRY); 5059 devlink_resource_size_params_init(hash_double_size_params, 5060 double_size_min, 5061 kvd_size - single_size_min - 5062 linear_size_min, 5063 MLXSW_SP_KVD_GRANULARITY, 5064 DEVLINK_RESOURCE_UNIT_ENTRY); 5065 devlink_resource_size_params_init(hash_single_size_params, 5066 single_size_min, 5067 kvd_size - double_size_min - 5068 linear_size_min, 5069 MLXSW_SP_KVD_GRANULARITY, 5070 DEVLINK_RESOURCE_UNIT_ENTRY); 5071 } 5072 5073 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5074 { 5075 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5076 struct devlink_resource_size_params hash_single_size_params; 5077 struct devlink_resource_size_params hash_double_size_params; 5078 struct devlink_resource_size_params linear_size_params; 5079 struct devlink_resource_size_params kvd_size_params; 5080 u32 kvd_size, single_size, double_size, linear_size; 5081 const struct mlxsw_config_profile *profile; 5082 int err; 5083 5084 profile = &mlxsw_sp1_config_profile; 5085 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5086 return -EIO; 5087 5088 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5089 &linear_size_params, 5090 &hash_double_size_params, 5091 &hash_single_size_params); 5092 5093 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5094 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5095 kvd_size, MLXSW_SP_RESOURCE_KVD, 5096 DEVLINK_RESOURCE_ID_PARENT_TOP, 5097 &kvd_size_params); 5098 if (err) 5099 return err; 5100 5101 linear_size = profile->kvd_linear_size; 5102 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5103 linear_size, 5104 MLXSW_SP_RESOURCE_KVD_LINEAR, 5105 MLXSW_SP_RESOURCE_KVD, 5106 &linear_size_params); 5107 if (err) 5108 return err; 5109 5110 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5111 if (err) 5112 return err; 5113 5114 double_size = kvd_size - linear_size; 5115 double_size *= profile->kvd_hash_double_parts; 5116 double_size /= profile->kvd_hash_double_parts + 5117 profile->kvd_hash_single_parts; 5118 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5119 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5120 double_size, 5121 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5122 MLXSW_SP_RESOURCE_KVD, 5123 &hash_double_size_params); 5124 if (err) 5125 return err; 5126 5127 single_size = kvd_size - double_size - linear_size; 5128 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5129 single_size, 5130 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5131 MLXSW_SP_RESOURCE_KVD, 5132 &hash_single_size_params); 5133 if (err) 5134 return err; 5135 5136 return 0; 5137 } 5138 5139 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5140 { 5141 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5142 struct devlink_resource_size_params kvd_size_params; 5143 u32 kvd_size; 5144 5145 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5146 return -EIO; 5147 5148 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5149 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5150 MLXSW_SP_KVD_GRANULARITY, 5151 DEVLINK_RESOURCE_UNIT_ENTRY); 5152 5153 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5154 kvd_size, MLXSW_SP_RESOURCE_KVD, 5155 DEVLINK_RESOURCE_ID_PARENT_TOP, 5156 &kvd_size_params); 5157 } 5158 5159 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5160 { 5161 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5162 } 5163 5164 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5165 { 5166 return mlxsw_sp2_resources_kvd_register(mlxsw_core); 5167 } 5168 5169 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5170 const struct mlxsw_config_profile *profile, 5171 u64 *p_single_size, u64 *p_double_size, 5172 u64 *p_linear_size) 5173 { 5174 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5175 u32 double_size; 5176 int err; 5177 5178 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5179 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5180 return -EIO; 5181 5182 /* The hash part is what left of the kvd without the 5183 * linear part. It is split to the single size and 5184 * double size by the parts ratio from the profile. 5185 * Both sizes must be a multiplications of the 5186 * granularity from the profile. In case the user 5187 * provided the sizes they are obtained via devlink. 5188 */ 5189 err = devlink_resource_size_get(devlink, 5190 MLXSW_SP_RESOURCE_KVD_LINEAR, 5191 p_linear_size); 5192 if (err) 5193 *p_linear_size = profile->kvd_linear_size; 5194 5195 err = devlink_resource_size_get(devlink, 5196 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5197 p_double_size); 5198 if (err) { 5199 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5200 *p_linear_size; 5201 double_size *= profile->kvd_hash_double_parts; 5202 double_size /= profile->kvd_hash_double_parts + 5203 profile->kvd_hash_single_parts; 5204 *p_double_size = rounddown(double_size, 5205 MLXSW_SP_KVD_GRANULARITY); 5206 } 5207 5208 err = devlink_resource_size_get(devlink, 5209 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5210 p_single_size); 5211 if (err) 5212 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5213 *p_double_size - *p_linear_size; 5214 5215 /* Check results are legal. */ 5216 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5217 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5218 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5219 return -EIO; 5220 5221 return 0; 5222 } 5223 5224 static int 5225 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5226 union devlink_param_value val, 5227 struct netlink_ext_ack *extack) 5228 { 5229 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5230 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5231 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5232 return -EINVAL; 5233 } 5234 5235 return 0; 5236 } 5237 5238 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5239 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5240 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5241 NULL, NULL, 5242 mlxsw_sp_devlink_param_fw_load_policy_validate), 5243 }; 5244 5245 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5246 { 5247 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5248 union devlink_param_value value; 5249 int err; 5250 5251 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5252 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5253 if (err) 5254 return err; 5255 5256 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5257 devlink_param_driverinit_value_set(devlink, 5258 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5259 value); 5260 return 0; 5261 } 5262 5263 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5264 { 5265 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5266 mlxsw_sp_devlink_params, 5267 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5268 } 5269 5270 static int 5271 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5272 struct devlink_param_gset_ctx *ctx) 5273 { 5274 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5275 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5276 5277 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5278 return 0; 5279 } 5280 5281 static int 5282 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5283 struct devlink_param_gset_ctx *ctx) 5284 { 5285 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5286 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5287 5288 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5289 } 5290 5291 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5292 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5293 "acl_region_rehash_interval", 5294 DEVLINK_PARAM_TYPE_U32, 5295 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5296 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5297 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5298 NULL), 5299 }; 5300 5301 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5302 { 5303 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5304 union devlink_param_value value; 5305 int err; 5306 5307 err = mlxsw_sp_params_register(mlxsw_core); 5308 if (err) 5309 return err; 5310 5311 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5312 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5313 if (err) 5314 goto err_devlink_params_register; 5315 5316 value.vu32 = 0; 5317 devlink_param_driverinit_value_set(devlink, 5318 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5319 value); 5320 return 0; 5321 5322 err_devlink_params_register: 5323 mlxsw_sp_params_unregister(mlxsw_core); 5324 return err; 5325 } 5326 5327 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5328 { 5329 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5330 mlxsw_sp2_devlink_params, 5331 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5332 mlxsw_sp_params_unregister(mlxsw_core); 5333 } 5334 5335 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5336 struct sk_buff *skb, u8 local_port) 5337 { 5338 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5339 5340 skb_pull(skb, MLXSW_TXHDR_LEN); 5341 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5342 } 5343 5344 static struct mlxsw_driver mlxsw_sp1_driver = { 5345 .kind = mlxsw_sp1_driver_name, 5346 .priv_size = sizeof(struct mlxsw_sp), 5347 .init = mlxsw_sp1_init, 5348 .fini = mlxsw_sp_fini, 5349 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5350 .port_split = mlxsw_sp_port_split, 5351 .port_unsplit = mlxsw_sp_port_unsplit, 5352 .sb_pool_get = mlxsw_sp_sb_pool_get, 5353 .sb_pool_set = mlxsw_sp_sb_pool_set, 5354 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5355 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5356 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5357 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5358 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5359 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5360 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5361 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5362 .flash_update = mlxsw_sp_flash_update, 5363 .trap_init = mlxsw_sp_trap_init, 5364 .trap_fini = mlxsw_sp_trap_fini, 5365 .trap_action_set = mlxsw_sp_trap_action_set, 5366 .trap_group_init = mlxsw_sp_trap_group_init, 5367 .txhdr_construct = mlxsw_sp_txhdr_construct, 5368 .resources_register = mlxsw_sp1_resources_register, 5369 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5370 .params_register = mlxsw_sp_params_register, 5371 .params_unregister = mlxsw_sp_params_unregister, 5372 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5373 .txhdr_len = MLXSW_TXHDR_LEN, 5374 .profile = &mlxsw_sp1_config_profile, 5375 .res_query_enabled = true, 5376 }; 5377 5378 static struct mlxsw_driver mlxsw_sp2_driver = { 5379 .kind = mlxsw_sp2_driver_name, 5380 .priv_size = sizeof(struct mlxsw_sp), 5381 .init = mlxsw_sp2_init, 5382 .fini = mlxsw_sp_fini, 5383 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5384 .port_split = mlxsw_sp_port_split, 5385 .port_unsplit = mlxsw_sp_port_unsplit, 5386 .sb_pool_get = mlxsw_sp_sb_pool_get, 5387 .sb_pool_set = mlxsw_sp_sb_pool_set, 5388 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5389 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5390 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5391 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5392 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5393 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5394 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5395 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5396 .flash_update = mlxsw_sp_flash_update, 5397 .trap_init = mlxsw_sp_trap_init, 5398 .trap_fini = mlxsw_sp_trap_fini, 5399 .trap_action_set = mlxsw_sp_trap_action_set, 5400 .trap_group_init = mlxsw_sp_trap_group_init, 5401 .txhdr_construct = mlxsw_sp_txhdr_construct, 5402 .resources_register = mlxsw_sp2_resources_register, 5403 .params_register = mlxsw_sp2_params_register, 5404 .params_unregister = mlxsw_sp2_params_unregister, 5405 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5406 .txhdr_len = MLXSW_TXHDR_LEN, 5407 .profile = &mlxsw_sp2_config_profile, 5408 .res_query_enabled = true, 5409 }; 5410 5411 static struct mlxsw_driver mlxsw_sp3_driver = { 5412 .kind = mlxsw_sp3_driver_name, 5413 .priv_size = sizeof(struct mlxsw_sp), 5414 .init = mlxsw_sp2_init, 5415 .fini = mlxsw_sp_fini, 5416 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5417 .port_split = mlxsw_sp_port_split, 5418 .port_unsplit = mlxsw_sp_port_unsplit, 5419 .sb_pool_get = mlxsw_sp_sb_pool_get, 5420 .sb_pool_set = mlxsw_sp_sb_pool_set, 5421 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5422 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5423 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5424 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5425 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5426 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5427 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5428 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5429 .flash_update = mlxsw_sp_flash_update, 5430 .trap_init = mlxsw_sp_trap_init, 5431 .trap_fini = mlxsw_sp_trap_fini, 5432 .trap_action_set = mlxsw_sp_trap_action_set, 5433 .trap_group_init = mlxsw_sp_trap_group_init, 5434 .txhdr_construct = mlxsw_sp_txhdr_construct, 5435 .resources_register = mlxsw_sp2_resources_register, 5436 .params_register = mlxsw_sp2_params_register, 5437 .params_unregister = mlxsw_sp2_params_unregister, 5438 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5439 .txhdr_len = MLXSW_TXHDR_LEN, 5440 .profile = &mlxsw_sp2_config_profile, 5441 .res_query_enabled = true, 5442 }; 5443 5444 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5445 { 5446 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5447 } 5448 5449 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5450 { 5451 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5452 int ret = 0; 5453 5454 if (mlxsw_sp_port_dev_check(lower_dev)) { 5455 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5456 ret = 1; 5457 } 5458 5459 return ret; 5460 } 5461 5462 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5463 { 5464 struct mlxsw_sp_port *mlxsw_sp_port; 5465 5466 if (mlxsw_sp_port_dev_check(dev)) 5467 return netdev_priv(dev); 5468 5469 mlxsw_sp_port = NULL; 5470 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5471 5472 return mlxsw_sp_port; 5473 } 5474 5475 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5476 { 5477 struct mlxsw_sp_port *mlxsw_sp_port; 5478 5479 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5480 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5481 } 5482 5483 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5484 { 5485 struct mlxsw_sp_port *mlxsw_sp_port; 5486 5487 if (mlxsw_sp_port_dev_check(dev)) 5488 return netdev_priv(dev); 5489 5490 mlxsw_sp_port = NULL; 5491 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5492 &mlxsw_sp_port); 5493 5494 return mlxsw_sp_port; 5495 } 5496 5497 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5498 { 5499 struct mlxsw_sp_port *mlxsw_sp_port; 5500 5501 rcu_read_lock(); 5502 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5503 if (mlxsw_sp_port) 5504 dev_hold(mlxsw_sp_port->dev); 5505 rcu_read_unlock(); 5506 return mlxsw_sp_port; 5507 } 5508 5509 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5510 { 5511 dev_put(mlxsw_sp_port->dev); 5512 } 5513 5514 static void 5515 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5516 struct net_device *lag_dev) 5517 { 5518 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5519 struct net_device *upper_dev; 5520 struct list_head *iter; 5521 5522 if (netif_is_bridge_port(lag_dev)) 5523 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5524 5525 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5526 if (!netif_is_bridge_port(upper_dev)) 5527 continue; 5528 br_dev = netdev_master_upper_dev_get(upper_dev); 5529 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5530 } 5531 } 5532 5533 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5534 { 5535 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5536 5537 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5539 } 5540 5541 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5542 { 5543 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5544 5545 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5546 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5547 } 5548 5549 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5550 u16 lag_id, u8 port_index) 5551 { 5552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5553 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5554 5555 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5556 lag_id, port_index); 5557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5558 } 5559 5560 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5561 u16 lag_id) 5562 { 5563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5564 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5565 5566 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5567 lag_id); 5568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5569 } 5570 5571 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5572 u16 lag_id) 5573 { 5574 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5575 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5576 5577 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5578 lag_id); 5579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5580 } 5581 5582 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5583 u16 lag_id) 5584 { 5585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5586 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5587 5588 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5589 lag_id); 5590 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5591 } 5592 5593 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5594 struct net_device *lag_dev, 5595 u16 *p_lag_id) 5596 { 5597 struct mlxsw_sp_upper *lag; 5598 int free_lag_id = -1; 5599 u64 max_lag; 5600 int i; 5601 5602 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5603 for (i = 0; i < max_lag; i++) { 5604 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5605 if (lag->ref_count) { 5606 if (lag->dev == lag_dev) { 5607 *p_lag_id = i; 5608 return 0; 5609 } 5610 } else if (free_lag_id < 0) { 5611 free_lag_id = i; 5612 } 5613 } 5614 if (free_lag_id < 0) 5615 return -EBUSY; 5616 *p_lag_id = free_lag_id; 5617 return 0; 5618 } 5619 5620 static bool 5621 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5622 struct net_device *lag_dev, 5623 struct netdev_lag_upper_info *lag_upper_info, 5624 struct netlink_ext_ack *extack) 5625 { 5626 u16 lag_id; 5627 5628 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5629 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5630 return false; 5631 } 5632 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5633 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5634 return false; 5635 } 5636 return true; 5637 } 5638 5639 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5640 u16 lag_id, u8 *p_port_index) 5641 { 5642 u64 max_lag_members; 5643 int i; 5644 5645 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5646 MAX_LAG_MEMBERS); 5647 for (i = 0; i < max_lag_members; i++) { 5648 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5649 *p_port_index = i; 5650 return 0; 5651 } 5652 } 5653 return -EBUSY; 5654 } 5655 5656 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5657 struct net_device *lag_dev) 5658 { 5659 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5660 struct mlxsw_sp_upper *lag; 5661 u16 lag_id; 5662 u8 port_index; 5663 int err; 5664 5665 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5666 if (err) 5667 return err; 5668 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5669 if (!lag->ref_count) { 5670 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5671 if (err) 5672 return err; 5673 lag->dev = lag_dev; 5674 } 5675 5676 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5677 if (err) 5678 return err; 5679 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5680 if (err) 5681 goto err_col_port_add; 5682 5683 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5684 mlxsw_sp_port->local_port); 5685 mlxsw_sp_port->lag_id = lag_id; 5686 mlxsw_sp_port->lagged = 1; 5687 lag->ref_count++; 5688 5689 /* Port is no longer usable as a router interface */ 5690 if (mlxsw_sp_port->default_vlan->fid) 5691 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5692 5693 return 0; 5694 5695 err_col_port_add: 5696 if (!lag->ref_count) 5697 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5698 return err; 5699 } 5700 5701 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5702 struct net_device *lag_dev) 5703 { 5704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5705 u16 lag_id = mlxsw_sp_port->lag_id; 5706 struct mlxsw_sp_upper *lag; 5707 5708 if (!mlxsw_sp_port->lagged) 5709 return; 5710 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5711 WARN_ON(lag->ref_count == 0); 5712 5713 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5714 5715 /* Any VLANs configured on the port are no longer valid */ 5716 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5717 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5718 /* Make the LAG and its directly linked uppers leave bridges they 5719 * are memeber in 5720 */ 5721 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5722 5723 if (lag->ref_count == 1) 5724 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5725 5726 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5727 mlxsw_sp_port->local_port); 5728 mlxsw_sp_port->lagged = 0; 5729 lag->ref_count--; 5730 5731 /* Make sure untagged frames are allowed to ingress */ 5732 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5733 } 5734 5735 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5736 u16 lag_id) 5737 { 5738 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5739 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5740 5741 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5742 mlxsw_sp_port->local_port); 5743 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5744 } 5745 5746 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5747 u16 lag_id) 5748 { 5749 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5750 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5751 5752 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5753 mlxsw_sp_port->local_port); 5754 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5755 } 5756 5757 static int 5758 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5759 { 5760 int err; 5761 5762 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5763 mlxsw_sp_port->lag_id); 5764 if (err) 5765 return err; 5766 5767 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5768 if (err) 5769 goto err_dist_port_add; 5770 5771 return 0; 5772 5773 err_dist_port_add: 5774 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5775 return err; 5776 } 5777 5778 static int 5779 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5780 { 5781 int err; 5782 5783 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5784 mlxsw_sp_port->lag_id); 5785 if (err) 5786 return err; 5787 5788 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5789 mlxsw_sp_port->lag_id); 5790 if (err) 5791 goto err_col_port_disable; 5792 5793 return 0; 5794 5795 err_col_port_disable: 5796 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5797 return err; 5798 } 5799 5800 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5801 struct netdev_lag_lower_state_info *info) 5802 { 5803 if (info->tx_enabled) 5804 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5805 else 5806 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5807 } 5808 5809 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5810 bool enable) 5811 { 5812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5813 enum mlxsw_reg_spms_state spms_state; 5814 char *spms_pl; 5815 u16 vid; 5816 int err; 5817 5818 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5819 MLXSW_REG_SPMS_STATE_DISCARDING; 5820 5821 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5822 if (!spms_pl) 5823 return -ENOMEM; 5824 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5825 5826 for (vid = 0; vid < VLAN_N_VID; vid++) 5827 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5828 5829 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5830 kfree(spms_pl); 5831 return err; 5832 } 5833 5834 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5835 { 5836 u16 vid = 1; 5837 int err; 5838 5839 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5840 if (err) 5841 return err; 5842 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5843 if (err) 5844 goto err_port_stp_set; 5845 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5846 true, false); 5847 if (err) 5848 goto err_port_vlan_set; 5849 5850 for (; vid <= VLAN_N_VID - 1; vid++) { 5851 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5852 vid, false); 5853 if (err) 5854 goto err_vid_learning_set; 5855 } 5856 5857 return 0; 5858 5859 err_vid_learning_set: 5860 for (vid--; vid >= 1; vid--) 5861 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5862 err_port_vlan_set: 5863 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5864 err_port_stp_set: 5865 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5866 return err; 5867 } 5868 5869 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5870 { 5871 u16 vid; 5872 5873 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5874 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5875 vid, true); 5876 5877 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5878 false, false); 5879 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5880 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5881 } 5882 5883 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5884 { 5885 unsigned int num_vxlans = 0; 5886 struct net_device *dev; 5887 struct list_head *iter; 5888 5889 netdev_for_each_lower_dev(br_dev, dev, iter) { 5890 if (netif_is_vxlan(dev)) 5891 num_vxlans++; 5892 } 5893 5894 return num_vxlans > 1; 5895 } 5896 5897 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5898 { 5899 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5900 struct net_device *dev; 5901 struct list_head *iter; 5902 5903 netdev_for_each_lower_dev(br_dev, dev, iter) { 5904 u16 pvid; 5905 int err; 5906 5907 if (!netif_is_vxlan(dev)) 5908 continue; 5909 5910 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5911 if (err || !pvid) 5912 continue; 5913 5914 if (test_and_set_bit(pvid, vlans)) 5915 return false; 5916 } 5917 5918 return true; 5919 } 5920 5921 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5922 struct netlink_ext_ack *extack) 5923 { 5924 if (br_multicast_enabled(br_dev)) { 5925 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5926 return false; 5927 } 5928 5929 if (!br_vlan_enabled(br_dev) && 5930 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5931 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5932 return false; 5933 } 5934 5935 if (br_vlan_enabled(br_dev) && 5936 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5937 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5938 return false; 5939 } 5940 5941 return true; 5942 } 5943 5944 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5945 struct net_device *dev, 5946 unsigned long event, void *ptr) 5947 { 5948 struct netdev_notifier_changeupper_info *info; 5949 struct mlxsw_sp_port *mlxsw_sp_port; 5950 struct netlink_ext_ack *extack; 5951 struct net_device *upper_dev; 5952 struct mlxsw_sp *mlxsw_sp; 5953 int err = 0; 5954 5955 mlxsw_sp_port = netdev_priv(dev); 5956 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5957 info = ptr; 5958 extack = netdev_notifier_info_to_extack(&info->info); 5959 5960 switch (event) { 5961 case NETDEV_PRECHANGEUPPER: 5962 upper_dev = info->upper_dev; 5963 if (!is_vlan_dev(upper_dev) && 5964 !netif_is_lag_master(upper_dev) && 5965 !netif_is_bridge_master(upper_dev) && 5966 !netif_is_ovs_master(upper_dev) && 5967 !netif_is_macvlan(upper_dev)) { 5968 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5969 return -EINVAL; 5970 } 5971 if (!info->linking) 5972 break; 5973 if (netif_is_bridge_master(upper_dev) && 5974 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5975 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5976 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5977 return -EOPNOTSUPP; 5978 if (netdev_has_any_upper_dev(upper_dev) && 5979 (!netif_is_bridge_master(upper_dev) || 5980 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5981 upper_dev))) { 5982 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5983 return -EINVAL; 5984 } 5985 if (netif_is_lag_master(upper_dev) && 5986 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5987 info->upper_info, extack)) 5988 return -EINVAL; 5989 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5990 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5991 return -EINVAL; 5992 } 5993 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5994 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5995 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5996 return -EINVAL; 5997 } 5998 if (netif_is_macvlan(upper_dev) && 5999 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6000 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6001 return -EOPNOTSUPP; 6002 } 6003 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6004 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6005 return -EINVAL; 6006 } 6007 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6008 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6009 return -EINVAL; 6010 } 6011 break; 6012 case NETDEV_CHANGEUPPER: 6013 upper_dev = info->upper_dev; 6014 if (netif_is_bridge_master(upper_dev)) { 6015 if (info->linking) 6016 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6017 lower_dev, 6018 upper_dev, 6019 extack); 6020 else 6021 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6022 lower_dev, 6023 upper_dev); 6024 } else if (netif_is_lag_master(upper_dev)) { 6025 if (info->linking) { 6026 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6027 upper_dev); 6028 } else { 6029 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6030 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6031 upper_dev); 6032 } 6033 } else if (netif_is_ovs_master(upper_dev)) { 6034 if (info->linking) 6035 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6036 else 6037 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6038 } else if (netif_is_macvlan(upper_dev)) { 6039 if (!info->linking) 6040 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6041 } else if (is_vlan_dev(upper_dev)) { 6042 struct net_device *br_dev; 6043 6044 if (!netif_is_bridge_port(upper_dev)) 6045 break; 6046 if (info->linking) 6047 break; 6048 br_dev = netdev_master_upper_dev_get(upper_dev); 6049 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6050 br_dev); 6051 } 6052 break; 6053 } 6054 6055 return err; 6056 } 6057 6058 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6059 unsigned long event, void *ptr) 6060 { 6061 struct netdev_notifier_changelowerstate_info *info; 6062 struct mlxsw_sp_port *mlxsw_sp_port; 6063 int err; 6064 6065 mlxsw_sp_port = netdev_priv(dev); 6066 info = ptr; 6067 6068 switch (event) { 6069 case NETDEV_CHANGELOWERSTATE: 6070 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6071 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6072 info->lower_state_info); 6073 if (err) 6074 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6075 } 6076 break; 6077 } 6078 6079 return 0; 6080 } 6081 6082 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6083 struct net_device *port_dev, 6084 unsigned long event, void *ptr) 6085 { 6086 switch (event) { 6087 case NETDEV_PRECHANGEUPPER: 6088 case NETDEV_CHANGEUPPER: 6089 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6090 event, ptr); 6091 case NETDEV_CHANGELOWERSTATE: 6092 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6093 ptr); 6094 } 6095 6096 return 0; 6097 } 6098 6099 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6100 unsigned long event, void *ptr) 6101 { 6102 struct net_device *dev; 6103 struct list_head *iter; 6104 int ret; 6105 6106 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6107 if (mlxsw_sp_port_dev_check(dev)) { 6108 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6109 ptr); 6110 if (ret) 6111 return ret; 6112 } 6113 } 6114 6115 return 0; 6116 } 6117 6118 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6119 struct net_device *dev, 6120 unsigned long event, void *ptr, 6121 u16 vid) 6122 { 6123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6125 struct netdev_notifier_changeupper_info *info = ptr; 6126 struct netlink_ext_ack *extack; 6127 struct net_device *upper_dev; 6128 int err = 0; 6129 6130 extack = netdev_notifier_info_to_extack(&info->info); 6131 6132 switch (event) { 6133 case NETDEV_PRECHANGEUPPER: 6134 upper_dev = info->upper_dev; 6135 if (!netif_is_bridge_master(upper_dev) && 6136 !netif_is_macvlan(upper_dev)) { 6137 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6138 return -EINVAL; 6139 } 6140 if (!info->linking) 6141 break; 6142 if (netif_is_bridge_master(upper_dev) && 6143 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6144 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6145 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6146 return -EOPNOTSUPP; 6147 if (netdev_has_any_upper_dev(upper_dev) && 6148 (!netif_is_bridge_master(upper_dev) || 6149 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6150 upper_dev))) { 6151 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6152 return -EINVAL; 6153 } 6154 if (netif_is_macvlan(upper_dev) && 6155 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6156 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6157 return -EOPNOTSUPP; 6158 } 6159 break; 6160 case NETDEV_CHANGEUPPER: 6161 upper_dev = info->upper_dev; 6162 if (netif_is_bridge_master(upper_dev)) { 6163 if (info->linking) 6164 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6165 vlan_dev, 6166 upper_dev, 6167 extack); 6168 else 6169 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6170 vlan_dev, 6171 upper_dev); 6172 } else if (netif_is_macvlan(upper_dev)) { 6173 if (!info->linking) 6174 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6175 } else { 6176 err = -EINVAL; 6177 WARN_ON(1); 6178 } 6179 break; 6180 } 6181 6182 return err; 6183 } 6184 6185 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6186 struct net_device *lag_dev, 6187 unsigned long event, 6188 void *ptr, u16 vid) 6189 { 6190 struct net_device *dev; 6191 struct list_head *iter; 6192 int ret; 6193 6194 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6195 if (mlxsw_sp_port_dev_check(dev)) { 6196 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6197 event, ptr, 6198 vid); 6199 if (ret) 6200 return ret; 6201 } 6202 } 6203 6204 return 0; 6205 } 6206 6207 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6208 struct net_device *br_dev, 6209 unsigned long event, void *ptr, 6210 u16 vid) 6211 { 6212 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6213 struct netdev_notifier_changeupper_info *info = ptr; 6214 struct netlink_ext_ack *extack; 6215 struct net_device *upper_dev; 6216 6217 if (!mlxsw_sp) 6218 return 0; 6219 6220 extack = netdev_notifier_info_to_extack(&info->info); 6221 6222 switch (event) { 6223 case NETDEV_PRECHANGEUPPER: 6224 upper_dev = info->upper_dev; 6225 if (!netif_is_macvlan(upper_dev)) { 6226 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6227 return -EOPNOTSUPP; 6228 } 6229 if (!info->linking) 6230 break; 6231 if (netif_is_macvlan(upper_dev) && 6232 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6233 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6234 return -EOPNOTSUPP; 6235 } 6236 break; 6237 case NETDEV_CHANGEUPPER: 6238 upper_dev = info->upper_dev; 6239 if (info->linking) 6240 break; 6241 if (netif_is_macvlan(upper_dev)) 6242 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6243 break; 6244 } 6245 6246 return 0; 6247 } 6248 6249 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6250 unsigned long event, void *ptr) 6251 { 6252 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6253 u16 vid = vlan_dev_vlan_id(vlan_dev); 6254 6255 if (mlxsw_sp_port_dev_check(real_dev)) 6256 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6257 event, ptr, vid); 6258 else if (netif_is_lag_master(real_dev)) 6259 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6260 real_dev, event, 6261 ptr, vid); 6262 else if (netif_is_bridge_master(real_dev)) 6263 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6264 event, ptr, vid); 6265 6266 return 0; 6267 } 6268 6269 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6270 unsigned long event, void *ptr) 6271 { 6272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6273 struct netdev_notifier_changeupper_info *info = ptr; 6274 struct netlink_ext_ack *extack; 6275 struct net_device *upper_dev; 6276 6277 if (!mlxsw_sp) 6278 return 0; 6279 6280 extack = netdev_notifier_info_to_extack(&info->info); 6281 6282 switch (event) { 6283 case NETDEV_PRECHANGEUPPER: 6284 upper_dev = info->upper_dev; 6285 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6286 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6287 return -EOPNOTSUPP; 6288 } 6289 if (!info->linking) 6290 break; 6291 if (netif_is_macvlan(upper_dev) && 6292 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6293 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6294 return -EOPNOTSUPP; 6295 } 6296 break; 6297 case NETDEV_CHANGEUPPER: 6298 upper_dev = info->upper_dev; 6299 if (info->linking) 6300 break; 6301 if (is_vlan_dev(upper_dev)) 6302 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6303 if (netif_is_macvlan(upper_dev)) 6304 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6305 break; 6306 } 6307 6308 return 0; 6309 } 6310 6311 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6312 unsigned long event, void *ptr) 6313 { 6314 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6315 struct netdev_notifier_changeupper_info *info = ptr; 6316 struct netlink_ext_ack *extack; 6317 6318 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6319 return 0; 6320 6321 extack = netdev_notifier_info_to_extack(&info->info); 6322 6323 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6324 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6325 6326 return -EOPNOTSUPP; 6327 } 6328 6329 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6330 { 6331 struct netdev_notifier_changeupper_info *info = ptr; 6332 6333 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6334 return false; 6335 return netif_is_l3_master(info->upper_dev); 6336 } 6337 6338 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6339 struct net_device *dev, 6340 unsigned long event, void *ptr) 6341 { 6342 struct netdev_notifier_changeupper_info *cu_info; 6343 struct netdev_notifier_info *info = ptr; 6344 struct netlink_ext_ack *extack; 6345 struct net_device *upper_dev; 6346 6347 extack = netdev_notifier_info_to_extack(info); 6348 6349 switch (event) { 6350 case NETDEV_CHANGEUPPER: 6351 cu_info = container_of(info, 6352 struct netdev_notifier_changeupper_info, 6353 info); 6354 upper_dev = cu_info->upper_dev; 6355 if (!netif_is_bridge_master(upper_dev)) 6356 return 0; 6357 if (!mlxsw_sp_lower_get(upper_dev)) 6358 return 0; 6359 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6360 return -EOPNOTSUPP; 6361 if (cu_info->linking) { 6362 if (!netif_running(dev)) 6363 return 0; 6364 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6365 * device needs to be mapped to a VLAN, but at this 6366 * point no VLANs are configured on the VxLAN device 6367 */ 6368 if (br_vlan_enabled(upper_dev)) 6369 return 0; 6370 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6371 dev, 0, extack); 6372 } else { 6373 /* VLANs were already flushed, which triggered the 6374 * necessary cleanup 6375 */ 6376 if (br_vlan_enabled(upper_dev)) 6377 return 0; 6378 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6379 } 6380 break; 6381 case NETDEV_PRE_UP: 6382 upper_dev = netdev_master_upper_dev_get(dev); 6383 if (!upper_dev) 6384 return 0; 6385 if (!netif_is_bridge_master(upper_dev)) 6386 return 0; 6387 if (!mlxsw_sp_lower_get(upper_dev)) 6388 return 0; 6389 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6390 extack); 6391 case NETDEV_DOWN: 6392 upper_dev = netdev_master_upper_dev_get(dev); 6393 if (!upper_dev) 6394 return 0; 6395 if (!netif_is_bridge_master(upper_dev)) 6396 return 0; 6397 if (!mlxsw_sp_lower_get(upper_dev)) 6398 return 0; 6399 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6400 break; 6401 } 6402 6403 return 0; 6404 } 6405 6406 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6407 unsigned long event, void *ptr) 6408 { 6409 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6410 struct mlxsw_sp_span_entry *span_entry; 6411 struct mlxsw_sp *mlxsw_sp; 6412 int err = 0; 6413 6414 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6415 if (event == NETDEV_UNREGISTER) { 6416 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6417 if (span_entry) 6418 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6419 } 6420 mlxsw_sp_span_respin(mlxsw_sp); 6421 6422 if (netif_is_vxlan(dev)) 6423 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6424 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6425 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6426 event, ptr); 6427 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6428 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6429 event, ptr); 6430 else if (event == NETDEV_PRE_CHANGEADDR || 6431 event == NETDEV_CHANGEADDR || 6432 event == NETDEV_CHANGEMTU) 6433 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6434 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6435 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6436 else if (mlxsw_sp_port_dev_check(dev)) 6437 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6438 else if (netif_is_lag_master(dev)) 6439 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6440 else if (is_vlan_dev(dev)) 6441 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6442 else if (netif_is_bridge_master(dev)) 6443 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6444 else if (netif_is_macvlan(dev)) 6445 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6446 6447 return notifier_from_errno(err); 6448 } 6449 6450 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6451 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6452 }; 6453 6454 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6455 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6456 }; 6457 6458 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6459 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6460 {0, }, 6461 }; 6462 6463 static struct pci_driver mlxsw_sp1_pci_driver = { 6464 .name = mlxsw_sp1_driver_name, 6465 .id_table = mlxsw_sp1_pci_id_table, 6466 }; 6467 6468 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6469 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6470 {0, }, 6471 }; 6472 6473 static struct pci_driver mlxsw_sp2_pci_driver = { 6474 .name = mlxsw_sp2_driver_name, 6475 .id_table = mlxsw_sp2_pci_id_table, 6476 }; 6477 6478 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6479 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6480 {0, }, 6481 }; 6482 6483 static struct pci_driver mlxsw_sp3_pci_driver = { 6484 .name = mlxsw_sp3_driver_name, 6485 .id_table = mlxsw_sp3_pci_id_table, 6486 }; 6487 6488 static int __init mlxsw_sp_module_init(void) 6489 { 6490 int err; 6491 6492 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6493 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6494 6495 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6496 if (err) 6497 goto err_sp1_core_driver_register; 6498 6499 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6500 if (err) 6501 goto err_sp2_core_driver_register; 6502 6503 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6504 if (err) 6505 goto err_sp3_core_driver_register; 6506 6507 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6508 if (err) 6509 goto err_sp1_pci_driver_register; 6510 6511 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6512 if (err) 6513 goto err_sp2_pci_driver_register; 6514 6515 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6516 if (err) 6517 goto err_sp3_pci_driver_register; 6518 6519 return 0; 6520 6521 err_sp3_pci_driver_register: 6522 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6523 err_sp2_pci_driver_register: 6524 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6525 err_sp1_pci_driver_register: 6526 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6527 err_sp3_core_driver_register: 6528 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6529 err_sp2_core_driver_register: 6530 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6531 err_sp1_core_driver_register: 6532 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6533 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6534 return err; 6535 } 6536 6537 static void __exit mlxsw_sp_module_exit(void) 6538 { 6539 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6540 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6541 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6542 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6543 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6544 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6545 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6546 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6547 } 6548 6549 module_init(mlxsw_sp_module_init); 6550 module_exit(mlxsw_sp_module_exit); 6551 6552 MODULE_LICENSE("Dual BSD/GPL"); 6553 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6554 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6555 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6556 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6557 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6558 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6559