1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1886 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 69 static const char mlxsw_sp_driver_version[] = "1.0"; 70 71 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 72 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 73 }; 74 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 75 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 76 }; 77 78 /* tx_hdr_version 79 * Tx header version. 80 * Must be set to 1. 81 */ 82 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 83 84 /* tx_hdr_ctl 85 * Packet control type. 86 * 0 - Ethernet control (e.g. EMADs, LACP) 87 * 1 - Ethernet data 88 */ 89 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 90 91 /* tx_hdr_proto 92 * Packet protocol type. Must be set to 1 (Ethernet). 93 */ 94 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 95 96 /* tx_hdr_rx_is_router 97 * Packet is sent from the router. Valid for data packets only. 98 */ 99 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 100 101 /* tx_hdr_fid_valid 102 * Indicates if the 'fid' field is valid and should be used for 103 * forwarding lookup. Valid for data packets only. 104 */ 105 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 106 107 /* tx_hdr_swid 108 * Switch partition ID. Must be set to 0. 109 */ 110 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 111 112 /* tx_hdr_control_tclass 113 * Indicates if the packet should use the control TClass and not one 114 * of the data TClasses. 115 */ 116 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 117 118 /* tx_hdr_etclass 119 * Egress TClass to be used on the egress device on the egress port. 120 */ 121 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 122 123 /* tx_hdr_port_mid 124 * Destination local port for unicast packets. 125 * Destination multicast ID for multicast packets. 126 * 127 * Control packets are directed to a specific egress port, while data 128 * packets are transmitted through the CPU port (0) into the switch partition, 129 * where forwarding rules are applied. 130 */ 131 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 132 133 /* tx_hdr_fid 134 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 135 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 136 * Valid for data packets only. 137 */ 138 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 139 140 /* tx_hdr_type 141 * 0 - Data packets 142 * 6 - Control packets 143 */ 144 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 145 146 struct mlxsw_sp_mlxfw_dev { 147 struct mlxfw_dev mlxfw_dev; 148 struct mlxsw_sp *mlxsw_sp; 149 }; 150 151 struct mlxsw_sp_ptp_ops { 152 struct mlxsw_sp_ptp_clock * 153 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 154 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 155 156 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 157 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 158 159 /* Notify a driver that a packet that might be PTP was received. Driver 160 * is responsible for freeing the passed-in SKB. 161 */ 162 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 163 u8 local_port); 164 165 /* Notify a driver that a timestamped packet was transmitted. Driver 166 * is responsible for freeing the passed-in SKB. 167 */ 168 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 169 u8 local_port); 170 171 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 172 struct hwtstamp_config *config); 173 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 174 struct hwtstamp_config *config); 175 void (*shaper_work)(struct work_struct *work); 176 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 177 struct ethtool_ts_info *info); 178 int (*get_stats_count)(void); 179 void (*get_stats_strings)(u8 **p); 180 void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 181 u64 *data, int data_index); 182 }; 183 184 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 185 u16 component_index, u32 *p_max_size, 186 u8 *p_align_bits, u16 *p_max_write_size) 187 { 188 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 189 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 191 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 192 int err; 193 194 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 196 if (err) 197 return err; 198 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 199 p_max_write_size); 200 201 *p_align_bits = max_t(u8, *p_align_bits, 2); 202 *p_max_write_size = min_t(u16, *p_max_write_size, 203 MLXSW_REG_MCDA_MAX_DATA_LEN); 204 return 0; 205 } 206 207 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 208 { 209 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 210 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 211 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 212 char mcc_pl[MLXSW_REG_MCC_LEN]; 213 u8 control_state; 214 int err; 215 216 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 218 if (err) 219 return err; 220 221 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 222 if (control_state != MLXFW_FSM_STATE_IDLE) 223 return -EBUSY; 224 225 mlxsw_reg_mcc_pack(mcc_pl, 226 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 227 0, *fwhandle, 0); 228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 229 } 230 231 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 232 u32 fwhandle, u16 component_index, 233 u32 component_size) 234 { 235 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 236 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 237 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 238 char mcc_pl[MLXSW_REG_MCC_LEN]; 239 240 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 241 component_index, fwhandle, component_size); 242 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 243 } 244 245 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 246 u32 fwhandle, u8 *data, u16 size, 247 u32 offset) 248 { 249 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 250 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 252 char mcda_pl[MLXSW_REG_MCDA_LEN]; 253 254 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 256 } 257 258 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 259 u32 fwhandle, u16 component_index) 260 { 261 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 262 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 264 char mcc_pl[MLXSW_REG_MCC_LEN]; 265 266 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 267 component_index, fwhandle, 0); 268 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 269 } 270 271 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 272 { 273 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 274 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 276 char mcc_pl[MLXSW_REG_MCC_LEN]; 277 278 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 279 fwhandle, 0); 280 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 281 } 282 283 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 284 enum mlxfw_fsm_state *fsm_state, 285 enum mlxfw_fsm_state_err *fsm_state_err) 286 { 287 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 288 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 290 char mcc_pl[MLXSW_REG_MCC_LEN]; 291 u8 control_state; 292 u8 error_code; 293 int err; 294 295 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 if (err) 298 return err; 299 300 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 301 *fsm_state = control_state; 302 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 303 MLXFW_FSM_STATE_ERR_MAX); 304 return 0; 305 } 306 307 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 308 { 309 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 310 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 312 char mcc_pl[MLXSW_REG_MCC_LEN]; 313 314 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 315 fwhandle, 0); 316 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 317 } 318 319 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 320 { 321 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 322 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 324 char mcc_pl[MLXSW_REG_MCC_LEN]; 325 326 mlxsw_reg_mcc_pack(mcc_pl, 327 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 328 fwhandle, 0); 329 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 330 } 331 332 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 333 const char *msg, const char *comp_name, 334 u32 done_bytes, u32 total_bytes) 335 { 336 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 337 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 338 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 339 340 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 341 msg, comp_name, 342 done_bytes, total_bytes); 343 } 344 345 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 346 .component_query = mlxsw_sp_component_query, 347 .fsm_lock = mlxsw_sp_fsm_lock, 348 .fsm_component_update = mlxsw_sp_fsm_component_update, 349 .fsm_block_download = mlxsw_sp_fsm_block_download, 350 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 351 .fsm_activate = mlxsw_sp_fsm_activate, 352 .fsm_query_state = mlxsw_sp_fsm_query_state, 353 .fsm_cancel = mlxsw_sp_fsm_cancel, 354 .fsm_release = mlxsw_sp_fsm_release, 355 .status_notify = mlxsw_sp_status_notify, 356 }; 357 358 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 359 const struct firmware *firmware, 360 struct netlink_ext_ack *extack) 361 { 362 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 363 .mlxfw_dev = { 364 .ops = &mlxsw_sp_mlxfw_dev_ops, 365 .psid = mlxsw_sp->bus_info->psid, 366 .psid_size = strlen(mlxsw_sp->bus_info->psid), 367 }, 368 .mlxsw_sp = mlxsw_sp 369 }; 370 int err; 371 372 mlxsw_core_fw_flash_start(mlxsw_sp->core); 373 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 374 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 375 firmware, extack); 376 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 377 mlxsw_core_fw_flash_end(mlxsw_sp->core); 378 379 return err; 380 } 381 382 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 383 { 384 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 385 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 386 const char *fw_filename = mlxsw_sp->fw_filename; 387 union devlink_param_value value; 388 const struct firmware *firmware; 389 int err; 390 391 /* Don't check if driver does not require it */ 392 if (!req_rev || !fw_filename) 393 return 0; 394 395 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 396 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 397 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 398 &value); 399 if (err) 400 return err; 401 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 402 return 0; 403 404 /* Validate driver & FW are compatible */ 405 if (rev->major != req_rev->major) { 406 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 407 rev->major, req_rev->major); 408 return -EINVAL; 409 } 410 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 411 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 412 (rev->minor > req_rev->minor || 413 (rev->minor == req_rev->minor && 414 rev->subminor >= req_rev->subminor))) 415 return 0; 416 417 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 418 rev->major, rev->minor, rev->subminor); 419 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 420 fw_filename); 421 422 err = request_firmware_direct(&firmware, fw_filename, 423 mlxsw_sp->bus_info->dev); 424 if (err) { 425 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 426 fw_filename); 427 return err; 428 } 429 430 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 431 release_firmware(firmware); 432 if (err) 433 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 434 435 /* On FW flash success, tell the caller FW reset is needed 436 * if current FW supports it. 437 */ 438 if (rev->minor >= req_rev->can_reset_minor) 439 return err ? err : -EAGAIN; 440 else 441 return 0; 442 } 443 444 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 445 const char *file_name, const char *component, 446 struct netlink_ext_ack *extack) 447 { 448 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 449 const struct firmware *firmware; 450 int err; 451 452 if (component) 453 return -EOPNOTSUPP; 454 455 err = request_firmware_direct(&firmware, file_name, 456 mlxsw_sp->bus_info->dev); 457 if (err) 458 return err; 459 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 460 release_firmware(firmware); 461 462 return err; 463 } 464 465 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 466 unsigned int counter_index, u64 *packets, 467 u64 *bytes) 468 { 469 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 470 int err; 471 472 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 473 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 474 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 475 if (err) 476 return err; 477 if (packets) 478 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 479 if (bytes) 480 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 481 return 0; 482 } 483 484 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 485 unsigned int counter_index) 486 { 487 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 488 489 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 490 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 492 } 493 494 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 495 unsigned int *p_counter_index) 496 { 497 int err; 498 499 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 500 p_counter_index); 501 if (err) 502 return err; 503 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 504 if (err) 505 goto err_counter_clear; 506 return 0; 507 508 err_counter_clear: 509 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 510 *p_counter_index); 511 return err; 512 } 513 514 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 515 unsigned int counter_index) 516 { 517 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 518 counter_index); 519 } 520 521 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 522 const struct mlxsw_tx_info *tx_info) 523 { 524 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 525 526 memset(txhdr, 0, MLXSW_TXHDR_LEN); 527 528 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 529 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 530 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 531 mlxsw_tx_hdr_swid_set(txhdr, 0); 532 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 533 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 534 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 535 } 536 537 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 538 { 539 switch (state) { 540 case BR_STATE_FORWARDING: 541 return MLXSW_REG_SPMS_STATE_FORWARDING; 542 case BR_STATE_LEARNING: 543 return MLXSW_REG_SPMS_STATE_LEARNING; 544 case BR_STATE_LISTENING: /* fall-through */ 545 case BR_STATE_DISABLED: /* fall-through */ 546 case BR_STATE_BLOCKING: 547 return MLXSW_REG_SPMS_STATE_DISCARDING; 548 default: 549 BUG(); 550 } 551 } 552 553 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 554 u8 state) 555 { 556 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 558 char *spms_pl; 559 int err; 560 561 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 562 if (!spms_pl) 563 return -ENOMEM; 564 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 565 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 566 567 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 568 kfree(spms_pl); 569 return err; 570 } 571 572 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 573 { 574 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 575 int err; 576 577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 578 if (err) 579 return err; 580 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 581 return 0; 582 } 583 584 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 585 bool enable, u32 rate) 586 { 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 589 590 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 592 } 593 594 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 595 bool is_up) 596 { 597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 598 char paos_pl[MLXSW_REG_PAOS_LEN]; 599 600 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 601 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 602 MLXSW_PORT_ADMIN_STATUS_DOWN); 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 604 } 605 606 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 607 unsigned char *addr) 608 { 609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 610 char ppad_pl[MLXSW_REG_PPAD_LEN]; 611 612 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 613 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 615 } 616 617 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 618 { 619 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 620 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 621 622 ether_addr_copy(addr, mlxsw_sp->base_mac); 623 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 624 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 625 } 626 627 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 628 { 629 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 630 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 631 int max_mtu; 632 int err; 633 634 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 635 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 636 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 637 if (err) 638 return err; 639 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 640 641 if (mtu > max_mtu) 642 return -EINVAL; 643 644 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 645 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 646 } 647 648 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 649 { 650 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 651 char pspa_pl[MLXSW_REG_PSPA_LEN]; 652 653 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 655 } 656 657 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 658 { 659 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 660 char svpe_pl[MLXSW_REG_SVPE_LEN]; 661 662 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 664 } 665 666 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 667 bool learn_enable) 668 { 669 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 670 char *spvmlr_pl; 671 int err; 672 673 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 674 if (!spvmlr_pl) 675 return -ENOMEM; 676 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 677 learn_enable); 678 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 679 kfree(spvmlr_pl); 680 return err; 681 } 682 683 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 684 u16 vid) 685 { 686 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 687 char spvid_pl[MLXSW_REG_SPVID_LEN]; 688 689 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 691 } 692 693 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 694 bool allow) 695 { 696 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 697 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 698 699 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 700 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 701 } 702 703 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 704 { 705 int err; 706 707 if (!vid) { 708 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 709 if (err) 710 return err; 711 } else { 712 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 713 if (err) 714 return err; 715 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 716 if (err) 717 goto err_port_allow_untagged_set; 718 } 719 720 mlxsw_sp_port->pvid = vid; 721 return 0; 722 723 err_port_allow_untagged_set: 724 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 725 return err; 726 } 727 728 static int 729 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 730 { 731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 732 char sspr_pl[MLXSW_REG_SSPR_LEN]; 733 734 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 736 } 737 738 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 739 u8 local_port, u8 *p_module, 740 u8 *p_width, u8 *p_lane) 741 { 742 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 743 int err; 744 745 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 746 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 747 if (err) 748 return err; 749 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 750 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 751 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 752 return 0; 753 } 754 755 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 756 u8 module, u8 width, u8 lane) 757 { 758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 759 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 760 int i; 761 762 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 763 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 764 for (i = 0; i < width; i++) { 765 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 766 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 767 } 768 769 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 770 } 771 772 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 773 { 774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 775 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 776 777 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 778 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 780 } 781 782 static int mlxsw_sp_port_open(struct net_device *dev) 783 { 784 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 785 int err; 786 787 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 788 if (err) 789 return err; 790 netif_start_queue(dev); 791 return 0; 792 } 793 794 static int mlxsw_sp_port_stop(struct net_device *dev) 795 { 796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 797 798 netif_stop_queue(dev); 799 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 800 } 801 802 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 803 struct net_device *dev) 804 { 805 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 807 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 808 const struct mlxsw_tx_info tx_info = { 809 .local_port = mlxsw_sp_port->local_port, 810 .is_emad = false, 811 }; 812 u64 len; 813 int err; 814 815 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 816 817 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 818 return NETDEV_TX_BUSY; 819 820 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 821 struct sk_buff *skb_orig = skb; 822 823 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 824 if (!skb) { 825 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 826 dev_kfree_skb_any(skb_orig); 827 return NETDEV_TX_OK; 828 } 829 dev_consume_skb_any(skb_orig); 830 } 831 832 if (eth_skb_pad(skb)) { 833 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 834 return NETDEV_TX_OK; 835 } 836 837 mlxsw_sp_txhdr_construct(skb, &tx_info); 838 /* TX header is consumed by HW on the way so we shouldn't count its 839 * bytes as being sent. 840 */ 841 len = skb->len - MLXSW_TXHDR_LEN; 842 843 /* Due to a race we might fail here because of a full queue. In that 844 * unlikely case we simply drop the packet. 845 */ 846 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 847 848 if (!err) { 849 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 850 u64_stats_update_begin(&pcpu_stats->syncp); 851 pcpu_stats->tx_packets++; 852 pcpu_stats->tx_bytes += len; 853 u64_stats_update_end(&pcpu_stats->syncp); 854 } else { 855 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 856 dev_kfree_skb_any(skb); 857 } 858 return NETDEV_TX_OK; 859 } 860 861 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 862 { 863 } 864 865 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 866 { 867 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 868 struct sockaddr *addr = p; 869 int err; 870 871 if (!is_valid_ether_addr(addr->sa_data)) 872 return -EADDRNOTAVAIL; 873 874 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 875 if (err) 876 return err; 877 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 878 return 0; 879 } 880 881 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 882 int mtu) 883 { 884 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 885 } 886 887 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 888 889 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 890 u16 delay) 891 { 892 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 893 BITS_PER_BYTE)); 894 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 895 mtu); 896 } 897 898 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 899 * Assumes 100m cable and maximum MTU. 900 */ 901 #define MLXSW_SP_PAUSE_DELAY 58752 902 903 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 904 u16 delay, bool pfc, bool pause) 905 { 906 if (pfc) 907 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 908 else if (pause) 909 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 910 else 911 return 0; 912 } 913 914 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 915 bool lossy) 916 { 917 if (lossy) 918 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 919 else 920 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 921 thres); 922 } 923 924 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 925 u8 *prio_tc, bool pause_en, 926 struct ieee_pfc *my_pfc) 927 { 928 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 929 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 930 u16 delay = !!my_pfc ? my_pfc->delay : 0; 931 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 932 u32 taken_headroom_cells = 0; 933 u32 max_headroom_cells; 934 int i, j, err; 935 936 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 937 938 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 939 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 940 if (err) 941 return err; 942 943 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 944 bool configure = false; 945 bool pfc = false; 946 u16 thres_cells; 947 u16 delay_cells; 948 u16 total_cells; 949 bool lossy; 950 951 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 952 if (prio_tc[j] == i) { 953 pfc = pfc_en & BIT(j); 954 configure = true; 955 break; 956 } 957 } 958 959 if (!configure) 960 continue; 961 962 lossy = !(pfc || pause_en); 963 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 964 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 965 pfc, pause_en); 966 total_cells = thres_cells + delay_cells; 967 968 taken_headroom_cells += total_cells; 969 if (taken_headroom_cells > max_headroom_cells) 970 return -ENOBUFS; 971 972 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 973 thres_cells, lossy); 974 } 975 976 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 977 } 978 979 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 980 int mtu, bool pause_en) 981 { 982 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 983 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 984 struct ieee_pfc *my_pfc; 985 u8 *prio_tc; 986 987 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 988 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 989 990 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 991 pause_en, my_pfc); 992 } 993 994 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 995 { 996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 997 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 998 int err; 999 1000 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1001 if (err) 1002 return err; 1003 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1004 if (err) 1005 goto err_span_port_mtu_update; 1006 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1007 if (err) 1008 goto err_port_mtu_set; 1009 dev->mtu = mtu; 1010 return 0; 1011 1012 err_port_mtu_set: 1013 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1014 err_span_port_mtu_update: 1015 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1016 return err; 1017 } 1018 1019 static int 1020 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1021 struct rtnl_link_stats64 *stats) 1022 { 1023 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1024 struct mlxsw_sp_port_pcpu_stats *p; 1025 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1026 u32 tx_dropped = 0; 1027 unsigned int start; 1028 int i; 1029 1030 for_each_possible_cpu(i) { 1031 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1032 do { 1033 start = u64_stats_fetch_begin_irq(&p->syncp); 1034 rx_packets = p->rx_packets; 1035 rx_bytes = p->rx_bytes; 1036 tx_packets = p->tx_packets; 1037 tx_bytes = p->tx_bytes; 1038 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1039 1040 stats->rx_packets += rx_packets; 1041 stats->rx_bytes += rx_bytes; 1042 stats->tx_packets += tx_packets; 1043 stats->tx_bytes += tx_bytes; 1044 /* tx_dropped is u32, updated without syncp protection. */ 1045 tx_dropped += p->tx_dropped; 1046 } 1047 stats->tx_dropped = tx_dropped; 1048 return 0; 1049 } 1050 1051 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1052 { 1053 switch (attr_id) { 1054 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1055 return true; 1056 } 1057 1058 return false; 1059 } 1060 1061 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1062 void *sp) 1063 { 1064 switch (attr_id) { 1065 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1066 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1067 } 1068 1069 return -EINVAL; 1070 } 1071 1072 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1073 int prio, char *ppcnt_pl) 1074 { 1075 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1077 1078 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1079 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1080 } 1081 1082 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1083 struct rtnl_link_stats64 *stats) 1084 { 1085 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1086 int err; 1087 1088 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1089 0, ppcnt_pl); 1090 if (err) 1091 goto out; 1092 1093 stats->tx_packets = 1094 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1095 stats->rx_packets = 1096 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1097 stats->tx_bytes = 1098 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1099 stats->rx_bytes = 1100 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1101 stats->multicast = 1102 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1103 1104 stats->rx_crc_errors = 1105 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1106 stats->rx_frame_errors = 1107 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1108 1109 stats->rx_length_errors = ( 1110 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1111 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1112 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1113 1114 stats->rx_errors = (stats->rx_crc_errors + 1115 stats->rx_frame_errors + stats->rx_length_errors); 1116 1117 out: 1118 return err; 1119 } 1120 1121 static void 1122 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1123 struct mlxsw_sp_port_xstats *xstats) 1124 { 1125 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1126 int err, i; 1127 1128 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1129 ppcnt_pl); 1130 if (!err) 1131 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1132 1133 for (i = 0; i < TC_MAX_QUEUE; i++) { 1134 err = mlxsw_sp_port_get_stats_raw(dev, 1135 MLXSW_REG_PPCNT_TC_CONG_TC, 1136 i, ppcnt_pl); 1137 if (!err) 1138 xstats->wred_drop[i] = 1139 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1140 1141 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1142 i, ppcnt_pl); 1143 if (err) 1144 continue; 1145 1146 xstats->backlog[i] = 1147 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1148 xstats->tail_drop[i] = 1149 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1150 } 1151 1152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1153 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1154 i, ppcnt_pl); 1155 if (err) 1156 continue; 1157 1158 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1159 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1160 } 1161 } 1162 1163 static void update_stats_cache(struct work_struct *work) 1164 { 1165 struct mlxsw_sp_port *mlxsw_sp_port = 1166 container_of(work, struct mlxsw_sp_port, 1167 periodic_hw_stats.update_dw.work); 1168 1169 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1170 goto out; 1171 1172 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1173 &mlxsw_sp_port->periodic_hw_stats.stats); 1174 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1175 &mlxsw_sp_port->periodic_hw_stats.xstats); 1176 1177 out: 1178 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1179 MLXSW_HW_STATS_UPDATE_TIME); 1180 } 1181 1182 /* Return the stats from a cache that is updated periodically, 1183 * as this function might get called in an atomic context. 1184 */ 1185 static void 1186 mlxsw_sp_port_get_stats64(struct net_device *dev, 1187 struct rtnl_link_stats64 *stats) 1188 { 1189 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1190 1191 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1192 } 1193 1194 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1195 u16 vid_begin, u16 vid_end, 1196 bool is_member, bool untagged) 1197 { 1198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1199 char *spvm_pl; 1200 int err; 1201 1202 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1203 if (!spvm_pl) 1204 return -ENOMEM; 1205 1206 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1207 vid_end, is_member, untagged); 1208 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1209 kfree(spvm_pl); 1210 return err; 1211 } 1212 1213 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1214 u16 vid_end, bool is_member, bool untagged) 1215 { 1216 u16 vid, vid_e; 1217 int err; 1218 1219 for (vid = vid_begin; vid <= vid_end; 1220 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1221 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1222 vid_end); 1223 1224 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1225 is_member, untagged); 1226 if (err) 1227 return err; 1228 } 1229 1230 return 0; 1231 } 1232 1233 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1234 bool flush_default) 1235 { 1236 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1237 1238 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1239 &mlxsw_sp_port->vlans_list, list) { 1240 if (!flush_default && 1241 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1242 continue; 1243 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1244 } 1245 } 1246 1247 static void 1248 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1249 { 1250 if (mlxsw_sp_port_vlan->bridge_port) 1251 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1252 else if (mlxsw_sp_port_vlan->fid) 1253 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1254 } 1255 1256 struct mlxsw_sp_port_vlan * 1257 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1258 { 1259 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1260 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1261 int err; 1262 1263 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1264 if (mlxsw_sp_port_vlan) 1265 return ERR_PTR(-EEXIST); 1266 1267 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1268 if (err) 1269 return ERR_PTR(err); 1270 1271 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1272 if (!mlxsw_sp_port_vlan) { 1273 err = -ENOMEM; 1274 goto err_port_vlan_alloc; 1275 } 1276 1277 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1278 mlxsw_sp_port_vlan->vid = vid; 1279 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1280 1281 return mlxsw_sp_port_vlan; 1282 1283 err_port_vlan_alloc: 1284 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1285 return ERR_PTR(err); 1286 } 1287 1288 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1289 { 1290 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1291 u16 vid = mlxsw_sp_port_vlan->vid; 1292 1293 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1294 list_del(&mlxsw_sp_port_vlan->list); 1295 kfree(mlxsw_sp_port_vlan); 1296 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1297 } 1298 1299 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1300 __be16 __always_unused proto, u16 vid) 1301 { 1302 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1303 1304 /* VLAN 0 is added to HW filter when device goes up, but it is 1305 * reserved in our case, so simply return. 1306 */ 1307 if (!vid) 1308 return 0; 1309 1310 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1311 } 1312 1313 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1314 __be16 __always_unused proto, u16 vid) 1315 { 1316 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1317 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1318 1319 /* VLAN 0 is removed from HW filter when device goes down, but 1320 * it is reserved in our case, so simply return. 1321 */ 1322 if (!vid) 1323 return 0; 1324 1325 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1326 if (!mlxsw_sp_port_vlan) 1327 return 0; 1328 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1329 1330 return 0; 1331 } 1332 1333 static struct mlxsw_sp_port_mall_tc_entry * 1334 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1335 unsigned long cookie) { 1336 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1337 1338 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1339 if (mall_tc_entry->cookie == cookie) 1340 return mall_tc_entry; 1341 1342 return NULL; 1343 } 1344 1345 static int 1346 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1347 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1348 const struct flow_action_entry *act, 1349 bool ingress) 1350 { 1351 enum mlxsw_sp_span_type span_type; 1352 1353 if (!act->dev) { 1354 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1355 return -EINVAL; 1356 } 1357 1358 mirror->ingress = ingress; 1359 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1360 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1361 true, &mirror->span_id); 1362 } 1363 1364 static void 1365 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1366 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1367 { 1368 enum mlxsw_sp_span_type span_type; 1369 1370 span_type = mirror->ingress ? 1371 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1372 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1373 span_type, true); 1374 } 1375 1376 static int 1377 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1378 struct tc_cls_matchall_offload *cls, 1379 const struct flow_action_entry *act, 1380 bool ingress) 1381 { 1382 int err; 1383 1384 if (!mlxsw_sp_port->sample) 1385 return -EOPNOTSUPP; 1386 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1387 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1388 return -EEXIST; 1389 } 1390 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1391 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1392 return -EOPNOTSUPP; 1393 } 1394 1395 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1396 act->sample.psample_group); 1397 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1398 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1399 mlxsw_sp_port->sample->rate = act->sample.rate; 1400 1401 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1402 if (err) 1403 goto err_port_sample_set; 1404 return 0; 1405 1406 err_port_sample_set: 1407 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1408 return err; 1409 } 1410 1411 static void 1412 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1413 { 1414 if (!mlxsw_sp_port->sample) 1415 return; 1416 1417 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1418 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1419 } 1420 1421 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1422 struct tc_cls_matchall_offload *f, 1423 bool ingress) 1424 { 1425 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1426 __be16 protocol = f->common.protocol; 1427 struct flow_action_entry *act; 1428 int err; 1429 1430 if (!flow_offload_has_one_action(&f->rule->action)) { 1431 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1432 return -EOPNOTSUPP; 1433 } 1434 1435 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1436 if (!mall_tc_entry) 1437 return -ENOMEM; 1438 mall_tc_entry->cookie = f->cookie; 1439 1440 act = &f->rule->action.entries[0]; 1441 1442 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1443 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1444 1445 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1446 mirror = &mall_tc_entry->mirror; 1447 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1448 mirror, act, 1449 ingress); 1450 } else if (act->id == FLOW_ACTION_SAMPLE && 1451 protocol == htons(ETH_P_ALL)) { 1452 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1453 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1454 act, ingress); 1455 } else { 1456 err = -EOPNOTSUPP; 1457 } 1458 1459 if (err) 1460 goto err_add_action; 1461 1462 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1463 return 0; 1464 1465 err_add_action: 1466 kfree(mall_tc_entry); 1467 return err; 1468 } 1469 1470 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1471 struct tc_cls_matchall_offload *f) 1472 { 1473 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1474 1475 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1476 f->cookie); 1477 if (!mall_tc_entry) { 1478 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1479 return; 1480 } 1481 list_del(&mall_tc_entry->list); 1482 1483 switch (mall_tc_entry->type) { 1484 case MLXSW_SP_PORT_MALL_MIRROR: 1485 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1486 &mall_tc_entry->mirror); 1487 break; 1488 case MLXSW_SP_PORT_MALL_SAMPLE: 1489 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1490 break; 1491 default: 1492 WARN_ON(1); 1493 } 1494 1495 kfree(mall_tc_entry); 1496 } 1497 1498 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1499 struct tc_cls_matchall_offload *f, 1500 bool ingress) 1501 { 1502 switch (f->command) { 1503 case TC_CLSMATCHALL_REPLACE: 1504 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1505 ingress); 1506 case TC_CLSMATCHALL_DESTROY: 1507 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1508 return 0; 1509 default: 1510 return -EOPNOTSUPP; 1511 } 1512 } 1513 1514 static int 1515 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1516 struct flow_cls_offload *f) 1517 { 1518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1519 1520 switch (f->command) { 1521 case FLOW_CLS_REPLACE: 1522 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1523 case FLOW_CLS_DESTROY: 1524 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1525 return 0; 1526 case FLOW_CLS_STATS: 1527 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1528 case FLOW_CLS_TMPLT_CREATE: 1529 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1530 case FLOW_CLS_TMPLT_DESTROY: 1531 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1532 return 0; 1533 default: 1534 return -EOPNOTSUPP; 1535 } 1536 } 1537 1538 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1539 void *type_data, 1540 void *cb_priv, bool ingress) 1541 { 1542 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1543 1544 switch (type) { 1545 case TC_SETUP_CLSMATCHALL: 1546 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1547 type_data)) 1548 return -EOPNOTSUPP; 1549 1550 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1551 ingress); 1552 case TC_SETUP_CLSFLOWER: 1553 return 0; 1554 default: 1555 return -EOPNOTSUPP; 1556 } 1557 } 1558 1559 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1560 void *type_data, 1561 void *cb_priv) 1562 { 1563 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1564 cb_priv, true); 1565 } 1566 1567 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1568 void *type_data, 1569 void *cb_priv) 1570 { 1571 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1572 cb_priv, false); 1573 } 1574 1575 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1576 void *type_data, void *cb_priv) 1577 { 1578 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1579 1580 switch (type) { 1581 case TC_SETUP_CLSMATCHALL: 1582 return 0; 1583 case TC_SETUP_CLSFLOWER: 1584 if (mlxsw_sp_acl_block_disabled(acl_block)) 1585 return -EOPNOTSUPP; 1586 1587 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1588 default: 1589 return -EOPNOTSUPP; 1590 } 1591 } 1592 1593 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1594 { 1595 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1596 1597 mlxsw_sp_acl_block_destroy(acl_block); 1598 } 1599 1600 static LIST_HEAD(mlxsw_sp_block_cb_list); 1601 1602 static int 1603 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1604 struct flow_block_offload *f, bool ingress) 1605 { 1606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1607 struct mlxsw_sp_acl_block *acl_block; 1608 struct flow_block_cb *block_cb; 1609 bool register_block = false; 1610 int err; 1611 1612 block_cb = flow_block_cb_lookup(f->block, 1613 mlxsw_sp_setup_tc_block_cb_flower, 1614 mlxsw_sp); 1615 if (!block_cb) { 1616 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1617 if (!acl_block) 1618 return -ENOMEM; 1619 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1620 mlxsw_sp, acl_block, 1621 mlxsw_sp_tc_block_flower_release); 1622 if (IS_ERR(block_cb)) { 1623 mlxsw_sp_acl_block_destroy(acl_block); 1624 err = PTR_ERR(block_cb); 1625 goto err_cb_register; 1626 } 1627 register_block = true; 1628 } else { 1629 acl_block = flow_block_cb_priv(block_cb); 1630 } 1631 flow_block_cb_incref(block_cb); 1632 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1633 mlxsw_sp_port, ingress, f->extack); 1634 if (err) 1635 goto err_block_bind; 1636 1637 if (ingress) 1638 mlxsw_sp_port->ing_acl_block = acl_block; 1639 else 1640 mlxsw_sp_port->eg_acl_block = acl_block; 1641 1642 if (register_block) { 1643 flow_block_cb_add(block_cb, f); 1644 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1645 } 1646 1647 return 0; 1648 1649 err_block_bind: 1650 if (!flow_block_cb_decref(block_cb)) 1651 flow_block_cb_free(block_cb); 1652 err_cb_register: 1653 return err; 1654 } 1655 1656 static void 1657 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1658 struct flow_block_offload *f, bool ingress) 1659 { 1660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1661 struct mlxsw_sp_acl_block *acl_block; 1662 struct flow_block_cb *block_cb; 1663 int err; 1664 1665 block_cb = flow_block_cb_lookup(f->block, 1666 mlxsw_sp_setup_tc_block_cb_flower, 1667 mlxsw_sp); 1668 if (!block_cb) 1669 return; 1670 1671 if (ingress) 1672 mlxsw_sp_port->ing_acl_block = NULL; 1673 else 1674 mlxsw_sp_port->eg_acl_block = NULL; 1675 1676 acl_block = flow_block_cb_priv(block_cb); 1677 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1678 mlxsw_sp_port, ingress); 1679 if (!err && !flow_block_cb_decref(block_cb)) { 1680 flow_block_cb_remove(block_cb, f); 1681 list_del(&block_cb->driver_list); 1682 } 1683 } 1684 1685 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1686 struct flow_block_offload *f) 1687 { 1688 struct flow_block_cb *block_cb; 1689 flow_setup_cb_t *cb; 1690 bool ingress; 1691 int err; 1692 1693 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1694 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1695 ingress = true; 1696 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1697 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1698 ingress = false; 1699 } else { 1700 return -EOPNOTSUPP; 1701 } 1702 1703 f->driver_block_list = &mlxsw_sp_block_cb_list; 1704 1705 switch (f->command) { 1706 case FLOW_BLOCK_BIND: 1707 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1708 &mlxsw_sp_block_cb_list)) 1709 return -EBUSY; 1710 1711 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1712 mlxsw_sp_port, NULL); 1713 if (IS_ERR(block_cb)) 1714 return PTR_ERR(block_cb); 1715 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1716 ingress); 1717 if (err) { 1718 flow_block_cb_free(block_cb); 1719 return err; 1720 } 1721 flow_block_cb_add(block_cb, f); 1722 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1723 return 0; 1724 case FLOW_BLOCK_UNBIND: 1725 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1726 f, ingress); 1727 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1728 if (!block_cb) 1729 return -ENOENT; 1730 1731 flow_block_cb_remove(block_cb, f); 1732 list_del(&block_cb->driver_list); 1733 return 0; 1734 default: 1735 return -EOPNOTSUPP; 1736 } 1737 } 1738 1739 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1740 void *type_data) 1741 { 1742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1743 1744 switch (type) { 1745 case TC_SETUP_BLOCK: 1746 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1747 case TC_SETUP_QDISC_RED: 1748 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1749 case TC_SETUP_QDISC_PRIO: 1750 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1751 default: 1752 return -EOPNOTSUPP; 1753 } 1754 } 1755 1756 1757 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1758 { 1759 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1760 1761 if (!enable) { 1762 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1763 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1764 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1765 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1766 return -EINVAL; 1767 } 1768 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1769 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1770 } else { 1771 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1772 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1773 } 1774 return 0; 1775 } 1776 1777 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1778 { 1779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1780 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1781 int err; 1782 1783 if (netif_running(dev)) 1784 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1785 1786 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1787 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1788 pplr_pl); 1789 1790 if (netif_running(dev)) 1791 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1792 1793 return err; 1794 } 1795 1796 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1797 1798 static int mlxsw_sp_handle_feature(struct net_device *dev, 1799 netdev_features_t wanted_features, 1800 netdev_features_t feature, 1801 mlxsw_sp_feature_handler feature_handler) 1802 { 1803 netdev_features_t changes = wanted_features ^ dev->features; 1804 bool enable = !!(wanted_features & feature); 1805 int err; 1806 1807 if (!(changes & feature)) 1808 return 0; 1809 1810 err = feature_handler(dev, enable); 1811 if (err) { 1812 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1813 enable ? "Enable" : "Disable", &feature, err); 1814 return err; 1815 } 1816 1817 if (enable) 1818 dev->features |= feature; 1819 else 1820 dev->features &= ~feature; 1821 1822 return 0; 1823 } 1824 static int mlxsw_sp_set_features(struct net_device *dev, 1825 netdev_features_t features) 1826 { 1827 netdev_features_t oper_features = dev->features; 1828 int err = 0; 1829 1830 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1831 mlxsw_sp_feature_hw_tc); 1832 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1833 mlxsw_sp_feature_loopback); 1834 1835 if (err) { 1836 dev->features = oper_features; 1837 return -EINVAL; 1838 } 1839 1840 return 0; 1841 } 1842 1843 static struct devlink_port * 1844 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1845 { 1846 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1848 1849 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1850 mlxsw_sp_port->local_port); 1851 } 1852 1853 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1854 struct ifreq *ifr) 1855 { 1856 struct hwtstamp_config config; 1857 int err; 1858 1859 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1860 return -EFAULT; 1861 1862 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1863 &config); 1864 if (err) 1865 return err; 1866 1867 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1868 return -EFAULT; 1869 1870 return 0; 1871 } 1872 1873 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1874 struct ifreq *ifr) 1875 { 1876 struct hwtstamp_config config; 1877 int err; 1878 1879 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1880 &config); 1881 if (err) 1882 return err; 1883 1884 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1885 return -EFAULT; 1886 1887 return 0; 1888 } 1889 1890 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1891 { 1892 struct hwtstamp_config config = {0}; 1893 1894 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1895 } 1896 1897 static int 1898 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1899 { 1900 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1901 1902 switch (cmd) { 1903 case SIOCSHWTSTAMP: 1904 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1905 case SIOCGHWTSTAMP: 1906 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1907 default: 1908 return -EOPNOTSUPP; 1909 } 1910 } 1911 1912 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1913 .ndo_open = mlxsw_sp_port_open, 1914 .ndo_stop = mlxsw_sp_port_stop, 1915 .ndo_start_xmit = mlxsw_sp_port_xmit, 1916 .ndo_setup_tc = mlxsw_sp_setup_tc, 1917 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1918 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1919 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1920 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1921 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1922 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1923 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1924 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1925 .ndo_set_features = mlxsw_sp_set_features, 1926 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1927 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1928 }; 1929 1930 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1931 struct ethtool_drvinfo *drvinfo) 1932 { 1933 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1934 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1935 1936 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1937 sizeof(drvinfo->driver)); 1938 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1939 sizeof(drvinfo->version)); 1940 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1941 "%d.%d.%d", 1942 mlxsw_sp->bus_info->fw_rev.major, 1943 mlxsw_sp->bus_info->fw_rev.minor, 1944 mlxsw_sp->bus_info->fw_rev.subminor); 1945 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1946 sizeof(drvinfo->bus_info)); 1947 } 1948 1949 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1950 struct ethtool_pauseparam *pause) 1951 { 1952 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1953 1954 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1955 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1956 } 1957 1958 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1959 struct ethtool_pauseparam *pause) 1960 { 1961 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1962 1963 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1964 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1965 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1966 1967 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1968 pfcc_pl); 1969 } 1970 1971 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1972 struct ethtool_pauseparam *pause) 1973 { 1974 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1975 bool pause_en = pause->tx_pause || pause->rx_pause; 1976 int err; 1977 1978 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1979 netdev_err(dev, "PFC already enabled on port\n"); 1980 return -EINVAL; 1981 } 1982 1983 if (pause->autoneg) { 1984 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1985 return -EINVAL; 1986 } 1987 1988 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1989 if (err) { 1990 netdev_err(dev, "Failed to configure port's headroom\n"); 1991 return err; 1992 } 1993 1994 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1995 if (err) { 1996 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1997 goto err_port_pause_configure; 1998 } 1999 2000 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 2001 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 2002 2003 return 0; 2004 2005 err_port_pause_configure: 2006 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2007 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2008 return err; 2009 } 2010 2011 struct mlxsw_sp_port_hw_stats { 2012 char str[ETH_GSTRING_LEN]; 2013 u64 (*getter)(const char *payload); 2014 bool cells_bytes; 2015 }; 2016 2017 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2018 { 2019 .str = "a_frames_transmitted_ok", 2020 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2021 }, 2022 { 2023 .str = "a_frames_received_ok", 2024 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2025 }, 2026 { 2027 .str = "a_frame_check_sequence_errors", 2028 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2029 }, 2030 { 2031 .str = "a_alignment_errors", 2032 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2033 }, 2034 { 2035 .str = "a_octets_transmitted_ok", 2036 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2037 }, 2038 { 2039 .str = "a_octets_received_ok", 2040 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2041 }, 2042 { 2043 .str = "a_multicast_frames_xmitted_ok", 2044 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2045 }, 2046 { 2047 .str = "a_broadcast_frames_xmitted_ok", 2048 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2049 }, 2050 { 2051 .str = "a_multicast_frames_received_ok", 2052 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2053 }, 2054 { 2055 .str = "a_broadcast_frames_received_ok", 2056 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2057 }, 2058 { 2059 .str = "a_in_range_length_errors", 2060 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2061 }, 2062 { 2063 .str = "a_out_of_range_length_field", 2064 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2065 }, 2066 { 2067 .str = "a_frame_too_long_errors", 2068 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2069 }, 2070 { 2071 .str = "a_symbol_error_during_carrier", 2072 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2073 }, 2074 { 2075 .str = "a_mac_control_frames_transmitted", 2076 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2077 }, 2078 { 2079 .str = "a_mac_control_frames_received", 2080 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2081 }, 2082 { 2083 .str = "a_unsupported_opcodes_received", 2084 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2085 }, 2086 { 2087 .str = "a_pause_mac_ctrl_frames_received", 2088 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2089 }, 2090 { 2091 .str = "a_pause_mac_ctrl_frames_xmitted", 2092 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2093 }, 2094 }; 2095 2096 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2097 2098 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2099 { 2100 .str = "if_in_discards", 2101 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2102 }, 2103 { 2104 .str = "if_out_discards", 2105 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2106 }, 2107 { 2108 .str = "if_out_errors", 2109 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2110 }, 2111 }; 2112 2113 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2114 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2115 2116 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2117 { 2118 .str = "ether_stats_undersize_pkts", 2119 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2120 }, 2121 { 2122 .str = "ether_stats_oversize_pkts", 2123 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2124 }, 2125 { 2126 .str = "ether_stats_fragments", 2127 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2128 }, 2129 { 2130 .str = "ether_pkts64octets", 2131 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2132 }, 2133 { 2134 .str = "ether_pkts65to127octets", 2135 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2136 }, 2137 { 2138 .str = "ether_pkts128to255octets", 2139 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2140 }, 2141 { 2142 .str = "ether_pkts256to511octets", 2143 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2144 }, 2145 { 2146 .str = "ether_pkts512to1023octets", 2147 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2148 }, 2149 { 2150 .str = "ether_pkts1024to1518octets", 2151 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2152 }, 2153 { 2154 .str = "ether_pkts1519to2047octets", 2155 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2156 }, 2157 { 2158 .str = "ether_pkts2048to4095octets", 2159 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2160 }, 2161 { 2162 .str = "ether_pkts4096to8191octets", 2163 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2164 }, 2165 { 2166 .str = "ether_pkts8192to10239octets", 2167 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2168 }, 2169 }; 2170 2171 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2172 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2173 2174 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2175 { 2176 .str = "dot3stats_fcs_errors", 2177 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2178 }, 2179 { 2180 .str = "dot3stats_symbol_errors", 2181 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2182 }, 2183 { 2184 .str = "dot3control_in_unknown_opcodes", 2185 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2186 }, 2187 { 2188 .str = "dot3in_pause_frames", 2189 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2190 }, 2191 }; 2192 2193 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2194 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2195 2196 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2197 { 2198 .str = "discard_ingress_general", 2199 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2200 }, 2201 { 2202 .str = "discard_ingress_policy_engine", 2203 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2204 }, 2205 { 2206 .str = "discard_ingress_vlan_membership", 2207 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2208 }, 2209 { 2210 .str = "discard_ingress_tag_frame_type", 2211 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2212 }, 2213 { 2214 .str = "discard_egress_vlan_membership", 2215 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2216 }, 2217 { 2218 .str = "discard_loopback_filter", 2219 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2220 }, 2221 { 2222 .str = "discard_egress_general", 2223 .getter = mlxsw_reg_ppcnt_egress_general_get, 2224 }, 2225 { 2226 .str = "discard_egress_hoq", 2227 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2228 }, 2229 { 2230 .str = "discard_egress_policy_engine", 2231 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2232 }, 2233 { 2234 .str = "discard_ingress_tx_link_down", 2235 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2236 }, 2237 { 2238 .str = "discard_egress_stp_filter", 2239 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2240 }, 2241 { 2242 .str = "discard_egress_sll", 2243 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2244 }, 2245 }; 2246 2247 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2248 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2249 2250 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2251 { 2252 .str = "rx_octets_prio", 2253 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2254 }, 2255 { 2256 .str = "rx_frames_prio", 2257 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2258 }, 2259 { 2260 .str = "tx_octets_prio", 2261 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2262 }, 2263 { 2264 .str = "tx_frames_prio", 2265 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2266 }, 2267 { 2268 .str = "rx_pause_prio", 2269 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2270 }, 2271 { 2272 .str = "rx_pause_duration_prio", 2273 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2274 }, 2275 { 2276 .str = "tx_pause_prio", 2277 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2278 }, 2279 { 2280 .str = "tx_pause_duration_prio", 2281 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2282 }, 2283 }; 2284 2285 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2286 2287 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2288 { 2289 .str = "tc_transmit_queue_tc", 2290 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2291 .cells_bytes = true, 2292 }, 2293 { 2294 .str = "tc_no_buffer_discard_uc_tc", 2295 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2296 }, 2297 }; 2298 2299 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2300 2301 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2302 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2303 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2304 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2305 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2306 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2307 IEEE_8021QAZ_MAX_TCS) + \ 2308 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2309 TC_MAX_QUEUE)) 2310 2311 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2312 { 2313 int i; 2314 2315 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2316 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2317 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2318 *p += ETH_GSTRING_LEN; 2319 } 2320 } 2321 2322 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2323 { 2324 int i; 2325 2326 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2327 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2328 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2329 *p += ETH_GSTRING_LEN; 2330 } 2331 } 2332 2333 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2334 u32 stringset, u8 *data) 2335 { 2336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2337 u8 *p = data; 2338 int i; 2339 2340 switch (stringset) { 2341 case ETH_SS_STATS: 2342 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2343 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2344 ETH_GSTRING_LEN); 2345 p += ETH_GSTRING_LEN; 2346 } 2347 2348 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2349 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2350 ETH_GSTRING_LEN); 2351 p += ETH_GSTRING_LEN; 2352 } 2353 2354 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2355 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2356 ETH_GSTRING_LEN); 2357 p += ETH_GSTRING_LEN; 2358 } 2359 2360 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2361 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2362 ETH_GSTRING_LEN); 2363 p += ETH_GSTRING_LEN; 2364 } 2365 2366 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2367 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2368 ETH_GSTRING_LEN); 2369 p += ETH_GSTRING_LEN; 2370 } 2371 2372 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2373 mlxsw_sp_port_get_prio_strings(&p, i); 2374 2375 for (i = 0; i < TC_MAX_QUEUE; i++) 2376 mlxsw_sp_port_get_tc_strings(&p, i); 2377 2378 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); 2379 break; 2380 } 2381 } 2382 2383 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2384 enum ethtool_phys_id_state state) 2385 { 2386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2388 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2389 bool active; 2390 2391 switch (state) { 2392 case ETHTOOL_ID_ACTIVE: 2393 active = true; 2394 break; 2395 case ETHTOOL_ID_INACTIVE: 2396 active = false; 2397 break; 2398 default: 2399 return -EOPNOTSUPP; 2400 } 2401 2402 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2404 } 2405 2406 static int 2407 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2408 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2409 { 2410 switch (grp) { 2411 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2412 *p_hw_stats = mlxsw_sp_port_hw_stats; 2413 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2414 break; 2415 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2416 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2417 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2418 break; 2419 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2420 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2421 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2422 break; 2423 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2424 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2425 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2426 break; 2427 case MLXSW_REG_PPCNT_DISCARD_CNT: 2428 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2429 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2430 break; 2431 case MLXSW_REG_PPCNT_PRIO_CNT: 2432 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2433 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2434 break; 2435 case MLXSW_REG_PPCNT_TC_CNT: 2436 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2437 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2438 break; 2439 default: 2440 WARN_ON(1); 2441 return -EOPNOTSUPP; 2442 } 2443 return 0; 2444 } 2445 2446 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2447 enum mlxsw_reg_ppcnt_grp grp, int prio, 2448 u64 *data, int data_index) 2449 { 2450 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2451 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2452 struct mlxsw_sp_port_hw_stats *hw_stats; 2453 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2454 int i, len; 2455 int err; 2456 2457 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2458 if (err) 2459 return; 2460 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2461 for (i = 0; i < len; i++) { 2462 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2463 if (!hw_stats[i].cells_bytes) 2464 continue; 2465 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2466 data[data_index + i]); 2467 } 2468 } 2469 2470 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2471 struct ethtool_stats *stats, u64 *data) 2472 { 2473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2474 int i, data_index = 0; 2475 2476 /* IEEE 802.3 Counters */ 2477 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2478 data, data_index); 2479 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2480 2481 /* RFC 2863 Counters */ 2482 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2483 data, data_index); 2484 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2485 2486 /* RFC 2819 Counters */ 2487 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2488 data, data_index); 2489 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2490 2491 /* RFC 3635 Counters */ 2492 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2493 data, data_index); 2494 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2495 2496 /* Discard Counters */ 2497 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2498 data, data_index); 2499 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2500 2501 /* Per-Priority Counters */ 2502 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2503 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2504 data, data_index); 2505 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2506 } 2507 2508 /* Per-TC Counters */ 2509 for (i = 0; i < TC_MAX_QUEUE; i++) { 2510 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2511 data, data_index); 2512 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2513 } 2514 2515 /* PTP counters */ 2516 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, 2517 data, data_index); 2518 data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2519 } 2520 2521 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2522 { 2523 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2524 2525 switch (sset) { 2526 case ETH_SS_STATS: 2527 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + 2528 mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); 2529 default: 2530 return -EOPNOTSUPP; 2531 } 2532 } 2533 2534 struct mlxsw_sp1_port_link_mode { 2535 enum ethtool_link_mode_bit_indices mask_ethtool; 2536 u32 mask; 2537 u32 speed; 2538 }; 2539 2540 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2541 { 2542 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2543 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2544 .speed = SPEED_100, 2545 }, 2546 { 2547 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2548 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2549 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2550 .speed = SPEED_1000, 2551 }, 2552 { 2553 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2554 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2555 .speed = SPEED_10000, 2556 }, 2557 { 2558 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2559 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2560 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2561 .speed = SPEED_10000, 2562 }, 2563 { 2564 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2565 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2566 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2567 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2568 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2569 .speed = SPEED_10000, 2570 }, 2571 { 2572 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2573 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2574 .speed = SPEED_20000, 2575 }, 2576 { 2577 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2578 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2579 .speed = SPEED_40000, 2580 }, 2581 { 2582 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2583 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2584 .speed = SPEED_40000, 2585 }, 2586 { 2587 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2588 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2589 .speed = SPEED_40000, 2590 }, 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2594 .speed = SPEED_40000, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2598 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2599 .speed = SPEED_25000, 2600 }, 2601 { 2602 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2603 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2604 .speed = SPEED_25000, 2605 }, 2606 { 2607 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2609 .speed = SPEED_25000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2613 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2614 .speed = SPEED_50000, 2615 }, 2616 { 2617 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2619 .speed = SPEED_50000, 2620 }, 2621 { 2622 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2623 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2624 .speed = SPEED_50000, 2625 }, 2626 { 2627 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2628 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2629 .speed = SPEED_100000, 2630 }, 2631 { 2632 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2633 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2634 .speed = SPEED_100000, 2635 }, 2636 { 2637 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2638 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2639 .speed = SPEED_100000, 2640 }, 2641 { 2642 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2643 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2644 .speed = SPEED_100000, 2645 }, 2646 }; 2647 2648 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2649 2650 static void 2651 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2652 u32 ptys_eth_proto, 2653 struct ethtool_link_ksettings *cmd) 2654 { 2655 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2656 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2657 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2658 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2659 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2660 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2661 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2662 2663 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2664 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2665 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2666 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2667 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2668 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2669 } 2670 2671 static void 2672 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2673 u8 width, unsigned long *mode) 2674 { 2675 int i; 2676 2677 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2678 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2679 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2680 mode); 2681 } 2682 } 2683 2684 static u32 2685 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2686 { 2687 int i; 2688 2689 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2690 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2691 return mlxsw_sp1_port_link_mode[i].speed; 2692 } 2693 2694 return SPEED_UNKNOWN; 2695 } 2696 2697 static void 2698 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2699 u32 ptys_eth_proto, 2700 struct ethtool_link_ksettings *cmd) 2701 { 2702 cmd->base.speed = SPEED_UNKNOWN; 2703 cmd->base.duplex = DUPLEX_UNKNOWN; 2704 2705 if (!carrier_ok) 2706 return; 2707 2708 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2709 if (cmd->base.speed != SPEED_UNKNOWN) 2710 cmd->base.duplex = DUPLEX_FULL; 2711 } 2712 2713 static u32 2714 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 2715 const struct ethtool_link_ksettings *cmd) 2716 { 2717 u32 ptys_proto = 0; 2718 int i; 2719 2720 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2721 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2722 cmd->link_modes.advertising)) 2723 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2724 } 2725 return ptys_proto; 2726 } 2727 2728 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, 2729 u32 speed) 2730 { 2731 u32 ptys_proto = 0; 2732 int i; 2733 2734 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2735 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2736 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2737 } 2738 return ptys_proto; 2739 } 2740 2741 static u32 2742 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2743 { 2744 u32 ptys_proto = 0; 2745 int i; 2746 2747 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2748 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2749 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2750 } 2751 return ptys_proto; 2752 } 2753 2754 static int 2755 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2756 u32 *base_speed) 2757 { 2758 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2759 return 0; 2760 } 2761 2762 static void 2763 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2764 u8 local_port, u32 proto_admin, bool autoneg) 2765 { 2766 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2767 } 2768 2769 static void 2770 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2771 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2772 u32 *p_eth_proto_oper) 2773 { 2774 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2775 p_eth_proto_oper); 2776 } 2777 2778 static const struct mlxsw_sp_port_type_speed_ops 2779 mlxsw_sp1_port_type_speed_ops = { 2780 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2781 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2782 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2783 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2784 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2785 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2786 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2787 .port_speed_base = mlxsw_sp1_port_speed_base, 2788 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2789 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2790 }; 2791 2792 static const enum ethtool_link_mode_bit_indices 2793 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2794 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2795 }; 2796 2797 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2798 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2799 2800 static const enum ethtool_link_mode_bit_indices 2801 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2802 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2803 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2804 }; 2805 2806 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2807 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2808 2809 static const enum ethtool_link_mode_bit_indices 2810 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2811 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2812 }; 2813 2814 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2815 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2816 2817 static const enum ethtool_link_mode_bit_indices 2818 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2819 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2820 }; 2821 2822 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2823 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2824 2825 static const enum ethtool_link_mode_bit_indices 2826 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2827 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2828 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2829 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2830 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2831 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2832 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2833 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2834 }; 2835 2836 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2837 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2838 2839 static const enum ethtool_link_mode_bit_indices 2840 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2841 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2842 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2843 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2844 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2845 }; 2846 2847 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2848 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2849 2850 static const enum ethtool_link_mode_bit_indices 2851 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2852 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2853 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2854 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2855 }; 2856 2857 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2858 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2859 2860 static const enum ethtool_link_mode_bit_indices 2861 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2862 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2863 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2864 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2865 }; 2866 2867 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2868 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2869 2870 static const enum ethtool_link_mode_bit_indices 2871 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2872 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2873 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2874 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2875 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2876 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2877 }; 2878 2879 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2880 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2881 2882 static const enum ethtool_link_mode_bit_indices 2883 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2884 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2885 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2886 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2887 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2888 }; 2889 2890 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2891 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2892 2893 static const enum ethtool_link_mode_bit_indices 2894 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2895 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2896 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2897 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2898 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2899 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2900 }; 2901 2902 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2903 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2904 2905 static const enum ethtool_link_mode_bit_indices 2906 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2907 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2908 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2909 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2910 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2911 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2912 }; 2913 2914 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2915 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2916 2917 #define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) 2918 #define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) 2919 #define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) 2920 2921 static u8 mlxsw_sp_port_mask_width_get(u8 width) 2922 { 2923 switch (width) { 2924 case 1: 2925 return MLXSW_SP_PORT_MASK_WIDTH_1X; 2926 case 2: 2927 return MLXSW_SP_PORT_MASK_WIDTH_2X; 2928 case 4: 2929 return MLXSW_SP_PORT_MASK_WIDTH_4X; 2930 default: 2931 WARN_ON_ONCE(1); 2932 return 0; 2933 } 2934 } 2935 2936 struct mlxsw_sp2_port_link_mode { 2937 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2938 int m_ethtool_len; 2939 u32 mask; 2940 u32 speed; 2941 u8 mask_width; 2942 }; 2943 2944 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2945 { 2946 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2947 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2948 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2949 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2950 MLXSW_SP_PORT_MASK_WIDTH_2X | 2951 MLXSW_SP_PORT_MASK_WIDTH_4X, 2952 .speed = SPEED_100, 2953 }, 2954 { 2955 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2956 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2957 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2958 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2959 MLXSW_SP_PORT_MASK_WIDTH_2X | 2960 MLXSW_SP_PORT_MASK_WIDTH_4X, 2961 .speed = SPEED_1000, 2962 }, 2963 { 2964 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2965 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2966 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2967 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2968 MLXSW_SP_PORT_MASK_WIDTH_2X | 2969 MLXSW_SP_PORT_MASK_WIDTH_4X, 2970 .speed = SPEED_2500, 2971 }, 2972 { 2973 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2974 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2975 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2976 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2977 MLXSW_SP_PORT_MASK_WIDTH_2X | 2978 MLXSW_SP_PORT_MASK_WIDTH_4X, 2979 .speed = SPEED_5000, 2980 }, 2981 { 2982 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2983 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2984 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2985 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 2986 MLXSW_SP_PORT_MASK_WIDTH_2X | 2987 MLXSW_SP_PORT_MASK_WIDTH_4X, 2988 .speed = SPEED_10000, 2989 }, 2990 { 2991 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2992 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2993 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2994 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 2995 .speed = SPEED_40000, 2996 }, 2997 { 2998 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2999 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 3000 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 3001 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | 3002 MLXSW_SP_PORT_MASK_WIDTH_2X | 3003 MLXSW_SP_PORT_MASK_WIDTH_4X, 3004 .speed = SPEED_25000, 3005 }, 3006 { 3007 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 3008 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 3009 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 3010 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | 3011 MLXSW_SP_PORT_MASK_WIDTH_4X, 3012 .speed = SPEED_50000, 3013 }, 3014 { 3015 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 3016 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 3017 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 3018 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, 3019 .speed = SPEED_50000, 3020 }, 3021 { 3022 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 3023 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 3024 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 3025 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 3026 .speed = SPEED_100000, 3027 }, 3028 { 3029 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 3030 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 3031 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 3032 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, 3033 .speed = SPEED_100000, 3034 }, 3035 { 3036 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 3037 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 3038 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 3039 .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, 3040 .speed = SPEED_200000, 3041 }, 3042 }; 3043 3044 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3045 3046 static void 3047 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3048 u32 ptys_eth_proto, 3049 struct ethtool_link_ksettings *cmd) 3050 { 3051 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3052 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3053 } 3054 3055 static void 3056 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3057 unsigned long *mode) 3058 { 3059 int i; 3060 3061 for (i = 0; i < link_mode->m_ethtool_len; i++) 3062 __set_bit(link_mode->mask_ethtool[i], mode); 3063 } 3064 3065 static void 3066 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3067 u8 width, unsigned long *mode) 3068 { 3069 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3070 int i; 3071 3072 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3073 if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && 3074 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3075 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3076 mode); 3077 } 3078 } 3079 3080 static u32 3081 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3082 { 3083 int i; 3084 3085 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3086 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3087 return mlxsw_sp2_port_link_mode[i].speed; 3088 } 3089 3090 return SPEED_UNKNOWN; 3091 } 3092 3093 static void 3094 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3095 u32 ptys_eth_proto, 3096 struct ethtool_link_ksettings *cmd) 3097 { 3098 cmd->base.speed = SPEED_UNKNOWN; 3099 cmd->base.duplex = DUPLEX_UNKNOWN; 3100 3101 if (!carrier_ok) 3102 return; 3103 3104 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3105 if (cmd->base.speed != SPEED_UNKNOWN) 3106 cmd->base.duplex = DUPLEX_FULL; 3107 } 3108 3109 static bool 3110 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3111 const unsigned long *mode) 3112 { 3113 int cnt = 0; 3114 int i; 3115 3116 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3117 if (test_bit(link_mode->mask_ethtool[i], mode)) 3118 cnt++; 3119 } 3120 3121 return cnt == link_mode->m_ethtool_len; 3122 } 3123 3124 static u32 3125 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, 3126 const struct ethtool_link_ksettings *cmd) 3127 { 3128 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3129 u32 ptys_proto = 0; 3130 int i; 3131 3132 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3133 if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && 3134 mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3135 cmd->link_modes.advertising)) 3136 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3137 } 3138 return ptys_proto; 3139 } 3140 3141 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, 3142 u8 width, u32 speed) 3143 { 3144 u8 mask_width = mlxsw_sp_port_mask_width_get(width); 3145 u32 ptys_proto = 0; 3146 int i; 3147 3148 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3149 if ((speed == mlxsw_sp2_port_link_mode[i].speed) && 3150 (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) 3151 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3152 } 3153 return ptys_proto; 3154 } 3155 3156 static u32 3157 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3158 { 3159 u32 ptys_proto = 0; 3160 int i; 3161 3162 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3163 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3164 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3165 } 3166 return ptys_proto; 3167 } 3168 3169 static int 3170 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3171 u32 *base_speed) 3172 { 3173 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3174 u32 eth_proto_cap; 3175 int err; 3176 3177 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3178 * it from firmware. 3179 */ 3180 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3182 if (err) 3183 return err; 3184 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3185 3186 if (eth_proto_cap & 3187 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3188 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3189 return 0; 3190 } 3191 3192 if (eth_proto_cap & 3193 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3194 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3195 return 0; 3196 } 3197 3198 return -EIO; 3199 } 3200 3201 static void 3202 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3203 u8 local_port, u32 proto_admin, 3204 bool autoneg) 3205 { 3206 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3207 } 3208 3209 static void 3210 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3211 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3212 u32 *p_eth_proto_oper) 3213 { 3214 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3215 p_eth_proto_admin, p_eth_proto_oper); 3216 } 3217 3218 static const struct mlxsw_sp_port_type_speed_ops 3219 mlxsw_sp2_port_type_speed_ops = { 3220 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3221 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3222 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3223 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3224 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3225 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3226 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3227 .port_speed_base = mlxsw_sp2_port_speed_base, 3228 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3229 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3230 }; 3231 3232 static void 3233 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3234 u8 width, struct ethtool_link_ksettings *cmd) 3235 { 3236 const struct mlxsw_sp_port_type_speed_ops *ops; 3237 3238 ops = mlxsw_sp->port_type_speed_ops; 3239 3240 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3241 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3242 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3243 3244 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3245 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, 3246 cmd->link_modes.supported); 3247 } 3248 3249 static void 3250 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3251 u32 eth_proto_admin, bool autoneg, u8 width, 3252 struct ethtool_link_ksettings *cmd) 3253 { 3254 const struct mlxsw_sp_port_type_speed_ops *ops; 3255 3256 ops = mlxsw_sp->port_type_speed_ops; 3257 3258 if (!autoneg) 3259 return; 3260 3261 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3262 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, 3263 cmd->link_modes.advertising); 3264 } 3265 3266 static u8 3267 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3268 { 3269 switch (connector_type) { 3270 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3271 return PORT_OTHER; 3272 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3273 return PORT_NONE; 3274 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3275 return PORT_TP; 3276 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3277 return PORT_AUI; 3278 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3279 return PORT_BNC; 3280 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3281 return PORT_MII; 3282 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3283 return PORT_FIBRE; 3284 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3285 return PORT_DA; 3286 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3287 return PORT_OTHER; 3288 default: 3289 WARN_ON_ONCE(1); 3290 return PORT_OTHER; 3291 } 3292 } 3293 3294 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3295 struct ethtool_link_ksettings *cmd) 3296 { 3297 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3298 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3300 const struct mlxsw_sp_port_type_speed_ops *ops; 3301 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3302 u8 connector_type; 3303 bool autoneg; 3304 int err; 3305 3306 ops = mlxsw_sp->port_type_speed_ops; 3307 3308 autoneg = mlxsw_sp_port->link.autoneg; 3309 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3310 0, false); 3311 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3312 if (err) 3313 return err; 3314 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3315 ð_proto_admin, ð_proto_oper); 3316 3317 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, 3318 mlxsw_sp_port->mapping.width, cmd); 3319 3320 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3321 mlxsw_sp_port->mapping.width, cmd); 3322 3323 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3324 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3325 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3326 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3327 eth_proto_oper, cmd); 3328 3329 return 0; 3330 } 3331 3332 static int 3333 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3334 const struct ethtool_link_ksettings *cmd) 3335 { 3336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3338 const struct mlxsw_sp_port_type_speed_ops *ops; 3339 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3340 u32 eth_proto_cap, eth_proto_new; 3341 bool autoneg; 3342 int err; 3343 3344 ops = mlxsw_sp->port_type_speed_ops; 3345 3346 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3347 0, false); 3348 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3349 if (err) 3350 return err; 3351 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3352 3353 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3354 eth_proto_new = autoneg ? 3355 ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, 3356 cmd) : 3357 ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, 3358 cmd->base.speed); 3359 3360 eth_proto_new = eth_proto_new & eth_proto_cap; 3361 if (!eth_proto_new) { 3362 netdev_err(dev, "No supported speed requested\n"); 3363 return -EINVAL; 3364 } 3365 3366 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3367 eth_proto_new, autoneg); 3368 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3369 if (err) 3370 return err; 3371 3372 mlxsw_sp_port->link.autoneg = autoneg; 3373 3374 if (!netif_running(dev)) 3375 return 0; 3376 3377 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3378 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3379 3380 return 0; 3381 } 3382 3383 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3384 struct ethtool_modinfo *modinfo) 3385 { 3386 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3388 int err; 3389 3390 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3391 mlxsw_sp_port->mapping.module, 3392 modinfo); 3393 3394 return err; 3395 } 3396 3397 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3398 struct ethtool_eeprom *ee, 3399 u8 *data) 3400 { 3401 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3402 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3403 int err; 3404 3405 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3406 mlxsw_sp_port->mapping.module, ee, 3407 data); 3408 3409 return err; 3410 } 3411 3412 static int 3413 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3414 { 3415 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3416 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3417 3418 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3419 } 3420 3421 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3422 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3423 .get_link = ethtool_op_get_link, 3424 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3425 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3426 .get_strings = mlxsw_sp_port_get_strings, 3427 .set_phys_id = mlxsw_sp_port_set_phys_id, 3428 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3429 .get_sset_count = mlxsw_sp_port_get_sset_count, 3430 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3431 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3432 .get_module_info = mlxsw_sp_get_module_info, 3433 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3434 .get_ts_info = mlxsw_sp_get_ts_info, 3435 }; 3436 3437 static int 3438 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3439 { 3440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3441 const struct mlxsw_sp_port_type_speed_ops *ops; 3442 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3443 u32 eth_proto_admin; 3444 u32 upper_speed; 3445 u32 base_speed; 3446 int err; 3447 3448 ops = mlxsw_sp->port_type_speed_ops; 3449 3450 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3451 &base_speed); 3452 if (err) 3453 return err; 3454 upper_speed = base_speed * width; 3455 3456 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3457 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3458 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3459 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3460 } 3461 3462 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3463 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3464 bool dwrr, u8 dwrr_weight) 3465 { 3466 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3467 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3468 3469 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3470 next_index); 3471 mlxsw_reg_qeec_de_set(qeec_pl, true); 3472 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3473 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3475 } 3476 3477 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3478 enum mlxsw_reg_qeec_hr hr, u8 index, 3479 u8 next_index, u32 maxrate) 3480 { 3481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3482 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3483 3484 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3485 next_index); 3486 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3487 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3488 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3489 } 3490 3491 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3492 enum mlxsw_reg_qeec_hr hr, u8 index, 3493 u8 next_index, u32 minrate) 3494 { 3495 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3496 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3497 3498 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3499 next_index); 3500 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3501 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3502 3503 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3504 } 3505 3506 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3507 u8 switch_prio, u8 tclass) 3508 { 3509 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3510 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3511 3512 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3513 tclass); 3514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3515 } 3516 3517 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3518 { 3519 int err, i; 3520 3521 /* Setup the elements hierarcy, so that each TC is linked to 3522 * one subgroup, which are all member in the same group. 3523 */ 3524 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3525 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3526 0); 3527 if (err) 3528 return err; 3529 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3530 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3531 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3532 0, false, 0); 3533 if (err) 3534 return err; 3535 } 3536 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3537 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3538 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3539 false, 0); 3540 if (err) 3541 return err; 3542 3543 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3544 MLXSW_REG_QEEC_HIERARCY_TC, 3545 i + 8, i, 3546 true, 100); 3547 if (err) 3548 return err; 3549 } 3550 3551 /* Make sure the max shaper is disabled in all hierarchies that support 3552 * it. Note that this disables ptps (PTP shaper), but that is intended 3553 * for the initial configuration. 3554 */ 3555 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3556 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3557 MLXSW_REG_QEEC_MAS_DIS); 3558 if (err) 3559 return err; 3560 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3561 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3562 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3563 i, 0, 3564 MLXSW_REG_QEEC_MAS_DIS); 3565 if (err) 3566 return err; 3567 } 3568 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3569 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3570 MLXSW_REG_QEEC_HIERARCY_TC, 3571 i, i, 3572 MLXSW_REG_QEEC_MAS_DIS); 3573 if (err) 3574 return err; 3575 3576 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3577 MLXSW_REG_QEEC_HIERARCY_TC, 3578 i + 8, i, 3579 MLXSW_REG_QEEC_MAS_DIS); 3580 if (err) 3581 return err; 3582 } 3583 3584 /* Configure the min shaper for multicast TCs. */ 3585 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3586 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3587 MLXSW_REG_QEEC_HIERARCY_TC, 3588 i + 8, i, 3589 MLXSW_REG_QEEC_MIS_MIN); 3590 if (err) 3591 return err; 3592 } 3593 3594 /* Map all priorities to traffic class 0. */ 3595 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3596 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3597 if (err) 3598 return err; 3599 } 3600 3601 return 0; 3602 } 3603 3604 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3605 bool enable) 3606 { 3607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3608 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3609 3610 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3612 } 3613 3614 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3615 bool split, u8 module, u8 width, u8 lane) 3616 { 3617 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3618 struct mlxsw_sp_port *mlxsw_sp_port; 3619 struct net_device *dev; 3620 int err; 3621 3622 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3623 module + 1, split, lane / width, 3624 mlxsw_sp->base_mac, 3625 sizeof(mlxsw_sp->base_mac)); 3626 if (err) { 3627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3628 local_port); 3629 return err; 3630 } 3631 3632 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3633 if (!dev) { 3634 err = -ENOMEM; 3635 goto err_alloc_etherdev; 3636 } 3637 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3638 mlxsw_sp_port = netdev_priv(dev); 3639 mlxsw_sp_port->dev = dev; 3640 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3641 mlxsw_sp_port->local_port = local_port; 3642 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3643 mlxsw_sp_port->split = split; 3644 mlxsw_sp_port->mapping.module = module; 3645 mlxsw_sp_port->mapping.width = width; 3646 mlxsw_sp_port->mapping.lane = lane; 3647 mlxsw_sp_port->link.autoneg = 1; 3648 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3649 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3650 3651 mlxsw_sp_port->pcpu_stats = 3652 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3653 if (!mlxsw_sp_port->pcpu_stats) { 3654 err = -ENOMEM; 3655 goto err_alloc_stats; 3656 } 3657 3658 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3659 GFP_KERNEL); 3660 if (!mlxsw_sp_port->sample) { 3661 err = -ENOMEM; 3662 goto err_alloc_sample; 3663 } 3664 3665 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3666 &update_stats_cache); 3667 3668 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3669 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3670 3671 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3672 if (err) { 3673 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3674 mlxsw_sp_port->local_port); 3675 goto err_port_module_map; 3676 } 3677 3678 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3679 if (err) { 3680 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3681 mlxsw_sp_port->local_port); 3682 goto err_port_swid_set; 3683 } 3684 3685 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3686 if (err) { 3687 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3688 mlxsw_sp_port->local_port); 3689 goto err_dev_addr_init; 3690 } 3691 3692 netif_carrier_off(dev); 3693 3694 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3695 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3696 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3697 3698 dev->min_mtu = 0; 3699 dev->max_mtu = ETH_MAX_MTU; 3700 3701 /* Each packet needs to have a Tx header (metadata) on top all other 3702 * headers. 3703 */ 3704 dev->needed_headroom = MLXSW_TXHDR_LEN; 3705 3706 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3707 if (err) { 3708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3709 mlxsw_sp_port->local_port); 3710 goto err_port_system_port_mapping_set; 3711 } 3712 3713 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3714 if (err) { 3715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3716 mlxsw_sp_port->local_port); 3717 goto err_port_speed_by_width_set; 3718 } 3719 3720 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3723 mlxsw_sp_port->local_port); 3724 goto err_port_mtu_set; 3725 } 3726 3727 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3728 if (err) 3729 goto err_port_admin_status_set; 3730 3731 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3732 if (err) { 3733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3734 mlxsw_sp_port->local_port); 3735 goto err_port_buffers_init; 3736 } 3737 3738 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3739 if (err) { 3740 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3741 mlxsw_sp_port->local_port); 3742 goto err_port_ets_init; 3743 } 3744 3745 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3746 if (err) { 3747 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3748 mlxsw_sp_port->local_port); 3749 goto err_port_tc_mc_mode; 3750 } 3751 3752 /* ETS and buffers must be initialized before DCB. */ 3753 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3754 if (err) { 3755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3756 mlxsw_sp_port->local_port); 3757 goto err_port_dcb_init; 3758 } 3759 3760 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3761 if (err) { 3762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3763 mlxsw_sp_port->local_port); 3764 goto err_port_fids_init; 3765 } 3766 3767 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3768 if (err) { 3769 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3770 mlxsw_sp_port->local_port); 3771 goto err_port_qdiscs_init; 3772 } 3773 3774 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 3775 false); 3776 if (err) { 3777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 3778 mlxsw_sp_port->local_port); 3779 goto err_port_vlan_clear; 3780 } 3781 3782 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3783 if (err) { 3784 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3785 mlxsw_sp_port->local_port); 3786 goto err_port_nve_init; 3787 } 3788 3789 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3790 if (err) { 3791 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3792 mlxsw_sp_port->local_port); 3793 goto err_port_pvid_set; 3794 } 3795 3796 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3797 MLXSW_SP_DEFAULT_VID); 3798 if (IS_ERR(mlxsw_sp_port_vlan)) { 3799 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3800 mlxsw_sp_port->local_port); 3801 err = PTR_ERR(mlxsw_sp_port_vlan); 3802 goto err_port_vlan_create; 3803 } 3804 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3805 3806 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3807 mlxsw_sp->ptp_ops->shaper_work); 3808 3809 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3810 err = register_netdev(dev); 3811 if (err) { 3812 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3813 mlxsw_sp_port->local_port); 3814 goto err_register_netdev; 3815 } 3816 3817 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3818 mlxsw_sp_port, dev); 3819 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3820 return 0; 3821 3822 err_register_netdev: 3823 mlxsw_sp->ports[local_port] = NULL; 3824 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3825 err_port_vlan_create: 3826 err_port_pvid_set: 3827 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3828 err_port_nve_init: 3829 err_port_vlan_clear: 3830 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3831 err_port_qdiscs_init: 3832 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3833 err_port_fids_init: 3834 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3835 err_port_dcb_init: 3836 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3837 err_port_tc_mc_mode: 3838 err_port_ets_init: 3839 err_port_buffers_init: 3840 err_port_admin_status_set: 3841 err_port_mtu_set: 3842 err_port_speed_by_width_set: 3843 err_port_system_port_mapping_set: 3844 err_dev_addr_init: 3845 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3846 err_port_swid_set: 3847 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3848 err_port_module_map: 3849 kfree(mlxsw_sp_port->sample); 3850 err_alloc_sample: 3851 free_percpu(mlxsw_sp_port->pcpu_stats); 3852 err_alloc_stats: 3853 free_netdev(dev); 3854 err_alloc_etherdev: 3855 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3856 return err; 3857 } 3858 3859 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3860 { 3861 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3862 3863 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3864 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3865 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3866 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3867 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3868 mlxsw_sp->ports[local_port] = NULL; 3869 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3870 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3871 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3872 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3873 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3874 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3875 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3876 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3877 kfree(mlxsw_sp_port->sample); 3878 free_percpu(mlxsw_sp_port->pcpu_stats); 3879 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3880 free_netdev(mlxsw_sp_port->dev); 3881 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3882 } 3883 3884 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 3885 { 3886 struct mlxsw_sp_port *mlxsw_sp_port; 3887 int err; 3888 3889 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 3890 if (!mlxsw_sp_port) 3891 return -ENOMEM; 3892 3893 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3894 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 3895 3896 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 3897 mlxsw_sp_port, 3898 mlxsw_sp->base_mac, 3899 sizeof(mlxsw_sp->base_mac)); 3900 if (err) { 3901 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 3902 goto err_core_cpu_port_init; 3903 } 3904 3905 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 3906 return 0; 3907 3908 err_core_cpu_port_init: 3909 kfree(mlxsw_sp_port); 3910 return err; 3911 } 3912 3913 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 3914 { 3915 struct mlxsw_sp_port *mlxsw_sp_port = 3916 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 3917 3918 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 3919 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 3920 kfree(mlxsw_sp_port); 3921 } 3922 3923 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3924 { 3925 return mlxsw_sp->ports[local_port] != NULL; 3926 } 3927 3928 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3929 { 3930 int i; 3931 3932 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3933 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3934 mlxsw_sp_port_remove(mlxsw_sp, i); 3935 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3936 kfree(mlxsw_sp->port_to_module); 3937 kfree(mlxsw_sp->ports); 3938 } 3939 3940 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3941 { 3942 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3943 u8 module, width, lane; 3944 size_t alloc_size; 3945 int i; 3946 int err; 3947 3948 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3949 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3950 if (!mlxsw_sp->ports) 3951 return -ENOMEM; 3952 3953 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3954 GFP_KERNEL); 3955 if (!mlxsw_sp->port_to_module) { 3956 err = -ENOMEM; 3957 goto err_port_to_module_alloc; 3958 } 3959 3960 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 3961 if (err) 3962 goto err_cpu_port_create; 3963 3964 for (i = 1; i < max_ports; i++) { 3965 /* Mark as invalid */ 3966 mlxsw_sp->port_to_module[i] = -1; 3967 3968 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3969 &width, &lane); 3970 if (err) 3971 goto err_port_module_info_get; 3972 if (!width) 3973 continue; 3974 mlxsw_sp->port_to_module[i] = module; 3975 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3976 module, width, lane); 3977 if (err) 3978 goto err_port_create; 3979 } 3980 return 0; 3981 3982 err_port_create: 3983 err_port_module_info_get: 3984 for (i--; i >= 1; i--) 3985 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 err_cpu_port_create: 3989 kfree(mlxsw_sp->port_to_module); 3990 err_port_to_module_alloc: 3991 kfree(mlxsw_sp->ports); 3992 return err; 3993 } 3994 3995 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3996 { 3997 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3998 3999 return local_port - offset; 4000 } 4001 4002 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 4003 u8 module, unsigned int count, u8 offset) 4004 { 4005 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 4006 int err, i; 4007 4008 for (i = 0; i < count; i++) { 4009 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 4010 true, module, width, i * width); 4011 if (err) 4012 goto err_port_create; 4013 } 4014 4015 return 0; 4016 4017 err_port_create: 4018 for (i--; i >= 0; i--) 4019 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4020 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4021 return err; 4022 } 4023 4024 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 4025 u8 base_port, unsigned int count) 4026 { 4027 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 4028 int i; 4029 4030 /* Split by four means we need to re-create two ports, otherwise 4031 * only one. 4032 */ 4033 count = count / 2; 4034 4035 for (i = 0; i < count; i++) { 4036 local_port = base_port + i * 2; 4037 if (mlxsw_sp->port_to_module[local_port] < 0) 4038 continue; 4039 module = mlxsw_sp->port_to_module[local_port]; 4040 4041 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 4042 width, 0); 4043 } 4044 } 4045 4046 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4047 unsigned int count, 4048 struct netlink_ext_ack *extack) 4049 { 4050 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4051 u8 local_ports_in_1x, local_ports_in_2x, offset; 4052 struct mlxsw_sp_port *mlxsw_sp_port; 4053 u8 module, cur_width, base_port; 4054 int i; 4055 int err; 4056 4057 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4058 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4059 return -EIO; 4060 4061 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4062 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4063 4064 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4065 if (!mlxsw_sp_port) { 4066 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4067 local_port); 4068 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4069 return -EINVAL; 4070 } 4071 4072 module = mlxsw_sp_port->mapping.module; 4073 cur_width = mlxsw_sp_port->mapping.width; 4074 4075 if (count != 2 && count != 4) { 4076 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 4077 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 4078 return -EINVAL; 4079 } 4080 4081 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 4082 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 4083 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 4084 return -EINVAL; 4085 } 4086 4087 /* Make sure we have enough slave (even) ports for the split. */ 4088 if (count == 2) { 4089 offset = local_ports_in_2x; 4090 base_port = local_port; 4091 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 4092 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4093 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4094 return -EINVAL; 4095 } 4096 } else { 4097 offset = local_ports_in_1x; 4098 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4099 if (mlxsw_sp->ports[base_port + 1] || 4100 mlxsw_sp->ports[base_port + 3]) { 4101 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 4102 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 4103 return -EINVAL; 4104 } 4105 } 4106 4107 for (i = 0; i < count; i++) 4108 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4109 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4110 4111 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4112 offset); 4113 if (err) { 4114 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4115 goto err_port_split_create; 4116 } 4117 4118 return 0; 4119 4120 err_port_split_create: 4121 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4122 return err; 4123 } 4124 4125 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4126 struct netlink_ext_ack *extack) 4127 { 4128 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4129 u8 local_ports_in_1x, local_ports_in_2x, offset; 4130 struct mlxsw_sp_port *mlxsw_sp_port; 4131 u8 cur_width, base_port; 4132 unsigned int count; 4133 int i; 4134 4135 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4136 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4137 return -EIO; 4138 4139 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4140 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4141 4142 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4143 if (!mlxsw_sp_port) { 4144 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4145 local_port); 4146 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4147 return -EINVAL; 4148 } 4149 4150 if (!mlxsw_sp_port->split) { 4151 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4152 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4153 return -EINVAL; 4154 } 4155 4156 cur_width = mlxsw_sp_port->mapping.width; 4157 count = cur_width == 1 ? 4 : 2; 4158 4159 if (count == 2) 4160 offset = local_ports_in_2x; 4161 else 4162 offset = local_ports_in_1x; 4163 4164 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4165 4166 /* Determine which ports to remove. */ 4167 if (count == 2 && local_port >= base_port + 2) 4168 base_port = base_port + 2; 4169 4170 for (i = 0; i < count; i++) 4171 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4172 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4173 4174 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4175 4176 return 0; 4177 } 4178 4179 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4180 char *pude_pl, void *priv) 4181 { 4182 struct mlxsw_sp *mlxsw_sp = priv; 4183 struct mlxsw_sp_port *mlxsw_sp_port; 4184 enum mlxsw_reg_pude_oper_status status; 4185 u8 local_port; 4186 4187 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4188 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4189 if (!mlxsw_sp_port) 4190 return; 4191 4192 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4193 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4194 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4195 netif_carrier_on(mlxsw_sp_port->dev); 4196 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4197 } else { 4198 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4199 netif_carrier_off(mlxsw_sp_port->dev); 4200 } 4201 } 4202 4203 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4204 char *mtpptr_pl, bool ingress) 4205 { 4206 u8 local_port; 4207 u8 num_rec; 4208 int i; 4209 4210 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4211 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4212 for (i = 0; i < num_rec; i++) { 4213 u8 domain_number; 4214 u8 message_type; 4215 u16 sequence_id; 4216 u64 timestamp; 4217 4218 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4219 &domain_number, &sequence_id, 4220 ×tamp); 4221 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4222 message_type, domain_number, 4223 sequence_id, timestamp); 4224 } 4225 } 4226 4227 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4228 char *mtpptr_pl, void *priv) 4229 { 4230 struct mlxsw_sp *mlxsw_sp = priv; 4231 4232 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4233 } 4234 4235 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4236 char *mtpptr_pl, void *priv) 4237 { 4238 struct mlxsw_sp *mlxsw_sp = priv; 4239 4240 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4241 } 4242 4243 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4244 u8 local_port, void *priv) 4245 { 4246 struct mlxsw_sp *mlxsw_sp = priv; 4247 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4248 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4249 4250 if (unlikely(!mlxsw_sp_port)) { 4251 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4252 local_port); 4253 return; 4254 } 4255 4256 skb->dev = mlxsw_sp_port->dev; 4257 4258 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4259 u64_stats_update_begin(&pcpu_stats->syncp); 4260 pcpu_stats->rx_packets++; 4261 pcpu_stats->rx_bytes += skb->len; 4262 u64_stats_update_end(&pcpu_stats->syncp); 4263 4264 skb->protocol = eth_type_trans(skb, skb->dev); 4265 netif_receive_skb(skb); 4266 } 4267 4268 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4269 void *priv) 4270 { 4271 skb->offload_fwd_mark = 1; 4272 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4273 } 4274 4275 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4276 u8 local_port, void *priv) 4277 { 4278 skb->offload_l3_fwd_mark = 1; 4279 skb->offload_fwd_mark = 1; 4280 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4281 } 4282 4283 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4284 void *priv) 4285 { 4286 struct mlxsw_sp *mlxsw_sp = priv; 4287 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4288 struct psample_group *psample_group; 4289 u32 size; 4290 4291 if (unlikely(!mlxsw_sp_port)) { 4292 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4293 local_port); 4294 goto out; 4295 } 4296 if (unlikely(!mlxsw_sp_port->sample)) { 4297 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4298 local_port); 4299 goto out; 4300 } 4301 4302 size = mlxsw_sp_port->sample->truncate ? 4303 mlxsw_sp_port->sample->trunc_size : skb->len; 4304 4305 rcu_read_lock(); 4306 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4307 if (!psample_group) 4308 goto out_unlock; 4309 psample_sample_packet(psample_group, skb, size, 4310 mlxsw_sp_port->dev->ifindex, 0, 4311 mlxsw_sp_port->sample->rate); 4312 out_unlock: 4313 rcu_read_unlock(); 4314 out: 4315 consume_skb(skb); 4316 } 4317 4318 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4319 void *priv) 4320 { 4321 struct mlxsw_sp *mlxsw_sp = priv; 4322 4323 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4324 } 4325 4326 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4327 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4328 _is_ctrl, SP_##_trap_group, DISCARD) 4329 4330 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4331 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4332 _is_ctrl, SP_##_trap_group, DISCARD) 4333 4334 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4335 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4336 _is_ctrl, SP_##_trap_group, DISCARD) 4337 4338 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4339 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4340 4341 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4342 /* Events */ 4343 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4344 /* L2 traps */ 4345 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4346 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4347 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4348 false, SP_LLDP, DISCARD), 4349 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4350 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4351 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4352 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4353 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4354 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4355 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4356 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4357 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4358 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4359 false), 4360 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4361 false), 4362 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4363 false), 4364 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4365 false), 4366 /* L3 traps */ 4367 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4368 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4369 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4370 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4371 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4372 false), 4373 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4374 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4375 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4376 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4377 false), 4378 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4379 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4380 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4381 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4382 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4383 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4384 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4385 false), 4386 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4387 false), 4388 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4389 false), 4390 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4391 false), 4392 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4393 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4394 false), 4395 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4396 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4397 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4398 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4399 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4400 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4401 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4402 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4403 /* PKT Sample trap */ 4404 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4405 false, SP_IP2ME, DISCARD), 4406 /* ACL trap */ 4407 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4408 /* Multicast Router Traps */ 4409 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4410 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4411 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4412 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4413 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4414 /* NVE traps */ 4415 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4416 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4417 /* PTP traps */ 4418 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4419 false, SP_PTP0, DISCARD), 4420 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4421 }; 4422 4423 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4424 /* Events */ 4425 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4426 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4427 }; 4428 4429 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4430 { 4431 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4432 enum mlxsw_reg_qpcr_ir_units ir_units; 4433 int max_cpu_policers; 4434 bool is_bytes; 4435 u8 burst_size; 4436 u32 rate; 4437 int i, err; 4438 4439 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4440 return -EIO; 4441 4442 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4443 4444 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4445 for (i = 0; i < max_cpu_policers; i++) { 4446 is_bytes = false; 4447 switch (i) { 4448 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4449 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4450 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4451 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4452 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4453 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4454 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4455 rate = 128; 4456 burst_size = 7; 4457 break; 4458 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4459 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4460 rate = 16 * 1024; 4461 burst_size = 10; 4462 break; 4463 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4464 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4465 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4466 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4467 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4468 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4469 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4470 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4471 rate = 1024; 4472 burst_size = 7; 4473 break; 4474 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4475 rate = 1024; 4476 burst_size = 7; 4477 break; 4478 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4479 rate = 24 * 1024; 4480 burst_size = 12; 4481 break; 4482 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4483 rate = 19 * 1024; 4484 burst_size = 12; 4485 break; 4486 default: 4487 continue; 4488 } 4489 4490 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4491 burst_size); 4492 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4493 if (err) 4494 return err; 4495 } 4496 4497 return 0; 4498 } 4499 4500 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4501 { 4502 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4503 enum mlxsw_reg_htgt_trap_group i; 4504 int max_cpu_policers; 4505 int max_trap_groups; 4506 u8 priority, tc; 4507 u16 policer_id; 4508 int err; 4509 4510 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4511 return -EIO; 4512 4513 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4514 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4515 4516 for (i = 0; i < max_trap_groups; i++) { 4517 policer_id = i; 4518 switch (i) { 4519 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4520 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4521 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4522 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4523 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4524 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4525 priority = 5; 4526 tc = 5; 4527 break; 4528 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4529 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4530 priority = 4; 4531 tc = 4; 4532 break; 4533 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4534 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4536 priority = 3; 4537 tc = 3; 4538 break; 4539 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4540 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4542 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4543 priority = 2; 4544 tc = 2; 4545 break; 4546 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4547 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4548 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4549 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4550 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4551 priority = 1; 4552 tc = 1; 4553 break; 4554 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4555 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4556 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4557 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4558 break; 4559 default: 4560 continue; 4561 } 4562 4563 if (max_cpu_policers <= policer_id && 4564 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4565 return -EIO; 4566 4567 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4568 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4569 if (err) 4570 return err; 4571 } 4572 4573 return 0; 4574 } 4575 4576 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4577 const struct mlxsw_listener listeners[], 4578 size_t listeners_count) 4579 { 4580 int i; 4581 int err; 4582 4583 for (i = 0; i < listeners_count; i++) { 4584 err = mlxsw_core_trap_register(mlxsw_sp->core, 4585 &listeners[i], 4586 mlxsw_sp); 4587 if (err) 4588 goto err_listener_register; 4589 4590 } 4591 return 0; 4592 4593 err_listener_register: 4594 for (i--; i >= 0; i--) { 4595 mlxsw_core_trap_unregister(mlxsw_sp->core, 4596 &listeners[i], 4597 mlxsw_sp); 4598 } 4599 return err; 4600 } 4601 4602 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4603 const struct mlxsw_listener listeners[], 4604 size_t listeners_count) 4605 { 4606 int i; 4607 4608 for (i = 0; i < listeners_count; i++) { 4609 mlxsw_core_trap_unregister(mlxsw_sp->core, 4610 &listeners[i], 4611 mlxsw_sp); 4612 } 4613 } 4614 4615 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4616 { 4617 int err; 4618 4619 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4620 if (err) 4621 return err; 4622 4623 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4624 if (err) 4625 return err; 4626 4627 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4628 ARRAY_SIZE(mlxsw_sp_listener)); 4629 if (err) 4630 return err; 4631 4632 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4633 mlxsw_sp->listeners_count); 4634 if (err) 4635 goto err_extra_traps_init; 4636 4637 return 0; 4638 4639 err_extra_traps_init: 4640 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4641 ARRAY_SIZE(mlxsw_sp_listener)); 4642 return err; 4643 } 4644 4645 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4646 { 4647 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4648 mlxsw_sp->listeners_count); 4649 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4650 ARRAY_SIZE(mlxsw_sp_listener)); 4651 } 4652 4653 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4654 4655 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4656 { 4657 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4658 u32 seed; 4659 int err; 4660 4661 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4662 MLXSW_SP_LAG_SEED_INIT); 4663 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4664 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4665 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4666 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4667 MLXSW_REG_SLCR_LAG_HASH_SIP | 4668 MLXSW_REG_SLCR_LAG_HASH_DIP | 4669 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4670 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4671 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4672 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4673 if (err) 4674 return err; 4675 4676 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4677 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4678 return -EIO; 4679 4680 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4681 sizeof(struct mlxsw_sp_upper), 4682 GFP_KERNEL); 4683 if (!mlxsw_sp->lags) 4684 return -ENOMEM; 4685 4686 return 0; 4687 } 4688 4689 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4690 { 4691 kfree(mlxsw_sp->lags); 4692 } 4693 4694 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4695 { 4696 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4697 4698 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4699 MLXSW_REG_HTGT_INVALID_POLICER, 4700 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4701 MLXSW_REG_HTGT_DEFAULT_TC); 4702 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4703 } 4704 4705 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4706 .clock_init = mlxsw_sp1_ptp_clock_init, 4707 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4708 .init = mlxsw_sp1_ptp_init, 4709 .fini = mlxsw_sp1_ptp_fini, 4710 .receive = mlxsw_sp1_ptp_receive, 4711 .transmitted = mlxsw_sp1_ptp_transmitted, 4712 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4713 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4714 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4715 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4716 .get_stats_count = mlxsw_sp1_get_stats_count, 4717 .get_stats_strings = mlxsw_sp1_get_stats_strings, 4718 .get_stats = mlxsw_sp1_get_stats, 4719 }; 4720 4721 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4722 .clock_init = mlxsw_sp2_ptp_clock_init, 4723 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4724 .init = mlxsw_sp2_ptp_init, 4725 .fini = mlxsw_sp2_ptp_fini, 4726 .receive = mlxsw_sp2_ptp_receive, 4727 .transmitted = mlxsw_sp2_ptp_transmitted, 4728 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4729 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4730 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4731 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4732 .get_stats_count = mlxsw_sp2_get_stats_count, 4733 .get_stats_strings = mlxsw_sp2_get_stats_strings, 4734 .get_stats = mlxsw_sp2_get_stats, 4735 }; 4736 4737 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4738 unsigned long event, void *ptr); 4739 4740 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4741 const struct mlxsw_bus_info *mlxsw_bus_info) 4742 { 4743 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4744 int err; 4745 4746 mlxsw_sp->core = mlxsw_core; 4747 mlxsw_sp->bus_info = mlxsw_bus_info; 4748 4749 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4750 if (err) 4751 return err; 4752 4753 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4754 if (err) { 4755 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4756 return err; 4757 } 4758 4759 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4760 if (err) { 4761 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4762 return err; 4763 } 4764 4765 err = mlxsw_sp_fids_init(mlxsw_sp); 4766 if (err) { 4767 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4768 goto err_fids_init; 4769 } 4770 4771 err = mlxsw_sp_traps_init(mlxsw_sp); 4772 if (err) { 4773 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4774 goto err_traps_init; 4775 } 4776 4777 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 4778 if (err) { 4779 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 4780 goto err_devlink_traps_init; 4781 } 4782 4783 err = mlxsw_sp_buffers_init(mlxsw_sp); 4784 if (err) { 4785 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4786 goto err_buffers_init; 4787 } 4788 4789 err = mlxsw_sp_lag_init(mlxsw_sp); 4790 if (err) { 4791 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4792 goto err_lag_init; 4793 } 4794 4795 /* Initialize SPAN before router and switchdev, so that those components 4796 * can call mlxsw_sp_span_respin(). 4797 */ 4798 err = mlxsw_sp_span_init(mlxsw_sp); 4799 if (err) { 4800 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4801 goto err_span_init; 4802 } 4803 4804 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4805 if (err) { 4806 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4807 goto err_switchdev_init; 4808 } 4809 4810 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4811 if (err) { 4812 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4813 goto err_counter_pool_init; 4814 } 4815 4816 err = mlxsw_sp_afa_init(mlxsw_sp); 4817 if (err) { 4818 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4819 goto err_afa_init; 4820 } 4821 4822 err = mlxsw_sp_nve_init(mlxsw_sp); 4823 if (err) { 4824 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4825 goto err_nve_init; 4826 } 4827 4828 err = mlxsw_sp_acl_init(mlxsw_sp); 4829 if (err) { 4830 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4831 goto err_acl_init; 4832 } 4833 4834 err = mlxsw_sp_router_init(mlxsw_sp); 4835 if (err) { 4836 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4837 goto err_router_init; 4838 } 4839 4840 if (mlxsw_sp->bus_info->read_frc_capable) { 4841 /* NULL is a valid return value from clock_init */ 4842 mlxsw_sp->clock = 4843 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4844 mlxsw_sp->bus_info->dev); 4845 if (IS_ERR(mlxsw_sp->clock)) { 4846 err = PTR_ERR(mlxsw_sp->clock); 4847 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4848 goto err_ptp_clock_init; 4849 } 4850 } 4851 4852 if (mlxsw_sp->clock) { 4853 /* NULL is a valid return value from ptp_ops->init */ 4854 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4855 if (IS_ERR(mlxsw_sp->ptp_state)) { 4856 err = PTR_ERR(mlxsw_sp->ptp_state); 4857 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4858 goto err_ptp_init; 4859 } 4860 } 4861 4862 /* Initialize netdevice notifier after router and SPAN is initialized, 4863 * so that the event handler can use router structures and call SPAN 4864 * respin. 4865 */ 4866 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4867 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4868 if (err) { 4869 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4870 goto err_netdev_notifier; 4871 } 4872 4873 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4874 if (err) { 4875 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4876 goto err_dpipe_init; 4877 } 4878 4879 err = mlxsw_sp_ports_create(mlxsw_sp); 4880 if (err) { 4881 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4882 goto err_ports_create; 4883 } 4884 4885 return 0; 4886 4887 err_ports_create: 4888 mlxsw_sp_dpipe_fini(mlxsw_sp); 4889 err_dpipe_init: 4890 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4891 err_netdev_notifier: 4892 if (mlxsw_sp->clock) 4893 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4894 err_ptp_init: 4895 if (mlxsw_sp->clock) 4896 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4897 err_ptp_clock_init: 4898 mlxsw_sp_router_fini(mlxsw_sp); 4899 err_router_init: 4900 mlxsw_sp_acl_fini(mlxsw_sp); 4901 err_acl_init: 4902 mlxsw_sp_nve_fini(mlxsw_sp); 4903 err_nve_init: 4904 mlxsw_sp_afa_fini(mlxsw_sp); 4905 err_afa_init: 4906 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4907 err_counter_pool_init: 4908 mlxsw_sp_switchdev_fini(mlxsw_sp); 4909 err_switchdev_init: 4910 mlxsw_sp_span_fini(mlxsw_sp); 4911 err_span_init: 4912 mlxsw_sp_lag_fini(mlxsw_sp); 4913 err_lag_init: 4914 mlxsw_sp_buffers_fini(mlxsw_sp); 4915 err_buffers_init: 4916 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4917 err_devlink_traps_init: 4918 mlxsw_sp_traps_fini(mlxsw_sp); 4919 err_traps_init: 4920 mlxsw_sp_fids_fini(mlxsw_sp); 4921 err_fids_init: 4922 mlxsw_sp_kvdl_fini(mlxsw_sp); 4923 return err; 4924 } 4925 4926 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4927 const struct mlxsw_bus_info *mlxsw_bus_info) 4928 { 4929 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4930 4931 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4932 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4933 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4934 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4935 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4936 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4937 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4938 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4939 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4940 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4941 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4942 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4943 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4944 mlxsw_sp->listeners = mlxsw_sp1_listener; 4945 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4946 4947 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4948 } 4949 4950 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4951 const struct mlxsw_bus_info *mlxsw_bus_info) 4952 { 4953 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4954 4955 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4956 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4957 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4958 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4959 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4960 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4961 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4962 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4963 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4964 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4965 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4966 4967 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4968 } 4969 4970 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4971 { 4972 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4973 4974 mlxsw_sp_ports_remove(mlxsw_sp); 4975 mlxsw_sp_dpipe_fini(mlxsw_sp); 4976 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4977 if (mlxsw_sp->clock) { 4978 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4979 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4980 } 4981 mlxsw_sp_router_fini(mlxsw_sp); 4982 mlxsw_sp_acl_fini(mlxsw_sp); 4983 mlxsw_sp_nve_fini(mlxsw_sp); 4984 mlxsw_sp_afa_fini(mlxsw_sp); 4985 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4986 mlxsw_sp_switchdev_fini(mlxsw_sp); 4987 mlxsw_sp_span_fini(mlxsw_sp); 4988 mlxsw_sp_lag_fini(mlxsw_sp); 4989 mlxsw_sp_buffers_fini(mlxsw_sp); 4990 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 4991 mlxsw_sp_traps_fini(mlxsw_sp); 4992 mlxsw_sp_fids_fini(mlxsw_sp); 4993 mlxsw_sp_kvdl_fini(mlxsw_sp); 4994 } 4995 4996 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4997 * 802.1Q FIDs 4998 */ 4999 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 5000 VLAN_VID_MASK - 1) 5001 5002 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 5003 .used_max_mid = 1, 5004 .max_mid = MLXSW_SP_MID_MAX, 5005 .used_flood_tables = 1, 5006 .used_flood_mode = 1, 5007 .flood_mode = 3, 5008 .max_fid_flood_tables = 3, 5009 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5010 .used_max_ib_mc = 1, 5011 .max_ib_mc = 0, 5012 .used_max_pkey = 1, 5013 .max_pkey = 0, 5014 .used_kvd_sizes = 1, 5015 .kvd_hash_single_parts = 59, 5016 .kvd_hash_double_parts = 41, 5017 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 5018 .swid_config = { 5019 { 5020 .used_type = 1, 5021 .type = MLXSW_PORT_SWID_TYPE_ETH, 5022 } 5023 }, 5024 }; 5025 5026 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 5027 .used_max_mid = 1, 5028 .max_mid = MLXSW_SP_MID_MAX, 5029 .used_flood_tables = 1, 5030 .used_flood_mode = 1, 5031 .flood_mode = 3, 5032 .max_fid_flood_tables = 3, 5033 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 5034 .used_max_ib_mc = 1, 5035 .max_ib_mc = 0, 5036 .used_max_pkey = 1, 5037 .max_pkey = 0, 5038 .swid_config = { 5039 { 5040 .used_type = 1, 5041 .type = MLXSW_PORT_SWID_TYPE_ETH, 5042 } 5043 }, 5044 }; 5045 5046 static void 5047 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 5048 struct devlink_resource_size_params *kvd_size_params, 5049 struct devlink_resource_size_params *linear_size_params, 5050 struct devlink_resource_size_params *hash_double_size_params, 5051 struct devlink_resource_size_params *hash_single_size_params) 5052 { 5053 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5054 KVD_SINGLE_MIN_SIZE); 5055 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 5056 KVD_DOUBLE_MIN_SIZE); 5057 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5058 u32 linear_size_min = 0; 5059 5060 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 5061 MLXSW_SP_KVD_GRANULARITY, 5062 DEVLINK_RESOURCE_UNIT_ENTRY); 5063 devlink_resource_size_params_init(linear_size_params, linear_size_min, 5064 kvd_size - single_size_min - 5065 double_size_min, 5066 MLXSW_SP_KVD_GRANULARITY, 5067 DEVLINK_RESOURCE_UNIT_ENTRY); 5068 devlink_resource_size_params_init(hash_double_size_params, 5069 double_size_min, 5070 kvd_size - single_size_min - 5071 linear_size_min, 5072 MLXSW_SP_KVD_GRANULARITY, 5073 DEVLINK_RESOURCE_UNIT_ENTRY); 5074 devlink_resource_size_params_init(hash_single_size_params, 5075 single_size_min, 5076 kvd_size - double_size_min - 5077 linear_size_min, 5078 MLXSW_SP_KVD_GRANULARITY, 5079 DEVLINK_RESOURCE_UNIT_ENTRY); 5080 } 5081 5082 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5083 { 5084 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5085 struct devlink_resource_size_params hash_single_size_params; 5086 struct devlink_resource_size_params hash_double_size_params; 5087 struct devlink_resource_size_params linear_size_params; 5088 struct devlink_resource_size_params kvd_size_params; 5089 u32 kvd_size, single_size, double_size, linear_size; 5090 const struct mlxsw_config_profile *profile; 5091 int err; 5092 5093 profile = &mlxsw_sp1_config_profile; 5094 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5095 return -EIO; 5096 5097 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 5098 &linear_size_params, 5099 &hash_double_size_params, 5100 &hash_single_size_params); 5101 5102 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5103 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5104 kvd_size, MLXSW_SP_RESOURCE_KVD, 5105 DEVLINK_RESOURCE_ID_PARENT_TOP, 5106 &kvd_size_params); 5107 if (err) 5108 return err; 5109 5110 linear_size = profile->kvd_linear_size; 5111 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 5112 linear_size, 5113 MLXSW_SP_RESOURCE_KVD_LINEAR, 5114 MLXSW_SP_RESOURCE_KVD, 5115 &linear_size_params); 5116 if (err) 5117 return err; 5118 5119 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5120 if (err) 5121 return err; 5122 5123 double_size = kvd_size - linear_size; 5124 double_size *= profile->kvd_hash_double_parts; 5125 double_size /= profile->kvd_hash_double_parts + 5126 profile->kvd_hash_single_parts; 5127 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5128 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5129 double_size, 5130 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5131 MLXSW_SP_RESOURCE_KVD, 5132 &hash_double_size_params); 5133 if (err) 5134 return err; 5135 5136 single_size = kvd_size - double_size - linear_size; 5137 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5138 single_size, 5139 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5140 MLXSW_SP_RESOURCE_KVD, 5141 &hash_single_size_params); 5142 if (err) 5143 return err; 5144 5145 return 0; 5146 } 5147 5148 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 5149 { 5150 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5151 struct devlink_resource_size_params kvd_size_params; 5152 u32 kvd_size; 5153 5154 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 5155 return -EIO; 5156 5157 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 5158 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 5159 MLXSW_SP_KVD_GRANULARITY, 5160 DEVLINK_RESOURCE_UNIT_ENTRY); 5161 5162 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 5163 kvd_size, MLXSW_SP_RESOURCE_KVD, 5164 DEVLINK_RESOURCE_ID_PARENT_TOP, 5165 &kvd_size_params); 5166 } 5167 5168 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5169 { 5170 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5171 } 5172 5173 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5174 { 5175 return mlxsw_sp2_resources_kvd_register(mlxsw_core); 5176 } 5177 5178 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5179 const struct mlxsw_config_profile *profile, 5180 u64 *p_single_size, u64 *p_double_size, 5181 u64 *p_linear_size) 5182 { 5183 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5184 u32 double_size; 5185 int err; 5186 5187 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5188 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5189 return -EIO; 5190 5191 /* The hash part is what left of the kvd without the 5192 * linear part. It is split to the single size and 5193 * double size by the parts ratio from the profile. 5194 * Both sizes must be a multiplications of the 5195 * granularity from the profile. In case the user 5196 * provided the sizes they are obtained via devlink. 5197 */ 5198 err = devlink_resource_size_get(devlink, 5199 MLXSW_SP_RESOURCE_KVD_LINEAR, 5200 p_linear_size); 5201 if (err) 5202 *p_linear_size = profile->kvd_linear_size; 5203 5204 err = devlink_resource_size_get(devlink, 5205 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5206 p_double_size); 5207 if (err) { 5208 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5209 *p_linear_size; 5210 double_size *= profile->kvd_hash_double_parts; 5211 double_size /= profile->kvd_hash_double_parts + 5212 profile->kvd_hash_single_parts; 5213 *p_double_size = rounddown(double_size, 5214 MLXSW_SP_KVD_GRANULARITY); 5215 } 5216 5217 err = devlink_resource_size_get(devlink, 5218 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5219 p_single_size); 5220 if (err) 5221 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5222 *p_double_size - *p_linear_size; 5223 5224 /* Check results are legal. */ 5225 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5226 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5227 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5228 return -EIO; 5229 5230 return 0; 5231 } 5232 5233 static int 5234 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5235 union devlink_param_value val, 5236 struct netlink_ext_ack *extack) 5237 { 5238 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5239 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5240 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5241 return -EINVAL; 5242 } 5243 5244 return 0; 5245 } 5246 5247 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5248 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5249 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5250 NULL, NULL, 5251 mlxsw_sp_devlink_param_fw_load_policy_validate), 5252 }; 5253 5254 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5255 { 5256 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5257 union devlink_param_value value; 5258 int err; 5259 5260 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5261 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5262 if (err) 5263 return err; 5264 5265 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5266 devlink_param_driverinit_value_set(devlink, 5267 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5268 value); 5269 return 0; 5270 } 5271 5272 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5273 { 5274 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5275 mlxsw_sp_devlink_params, 5276 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5277 } 5278 5279 static int 5280 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5281 struct devlink_param_gset_ctx *ctx) 5282 { 5283 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5284 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5285 5286 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5287 return 0; 5288 } 5289 5290 static int 5291 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5292 struct devlink_param_gset_ctx *ctx) 5293 { 5294 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5295 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5296 5297 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5298 } 5299 5300 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5301 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5302 "acl_region_rehash_interval", 5303 DEVLINK_PARAM_TYPE_U32, 5304 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5305 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5306 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5307 NULL), 5308 }; 5309 5310 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5311 { 5312 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5313 union devlink_param_value value; 5314 int err; 5315 5316 err = mlxsw_sp_params_register(mlxsw_core); 5317 if (err) 5318 return err; 5319 5320 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5321 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5322 if (err) 5323 goto err_devlink_params_register; 5324 5325 value.vu32 = 0; 5326 devlink_param_driverinit_value_set(devlink, 5327 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5328 value); 5329 return 0; 5330 5331 err_devlink_params_register: 5332 mlxsw_sp_params_unregister(mlxsw_core); 5333 return err; 5334 } 5335 5336 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5337 { 5338 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5339 mlxsw_sp2_devlink_params, 5340 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5341 mlxsw_sp_params_unregister(mlxsw_core); 5342 } 5343 5344 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5345 struct sk_buff *skb, u8 local_port) 5346 { 5347 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5348 5349 skb_pull(skb, MLXSW_TXHDR_LEN); 5350 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5351 } 5352 5353 static struct mlxsw_driver mlxsw_sp1_driver = { 5354 .kind = mlxsw_sp1_driver_name, 5355 .priv_size = sizeof(struct mlxsw_sp), 5356 .init = mlxsw_sp1_init, 5357 .fini = mlxsw_sp_fini, 5358 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5359 .port_split = mlxsw_sp_port_split, 5360 .port_unsplit = mlxsw_sp_port_unsplit, 5361 .sb_pool_get = mlxsw_sp_sb_pool_get, 5362 .sb_pool_set = mlxsw_sp_sb_pool_set, 5363 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5364 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5365 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5366 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5367 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5368 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5369 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5370 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5371 .flash_update = mlxsw_sp_flash_update, 5372 .trap_init = mlxsw_sp_trap_init, 5373 .trap_fini = mlxsw_sp_trap_fini, 5374 .trap_action_set = mlxsw_sp_trap_action_set, 5375 .trap_group_init = mlxsw_sp_trap_group_init, 5376 .txhdr_construct = mlxsw_sp_txhdr_construct, 5377 .resources_register = mlxsw_sp1_resources_register, 5378 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5379 .params_register = mlxsw_sp_params_register, 5380 .params_unregister = mlxsw_sp_params_unregister, 5381 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5382 .txhdr_len = MLXSW_TXHDR_LEN, 5383 .profile = &mlxsw_sp1_config_profile, 5384 .res_query_enabled = true, 5385 }; 5386 5387 static struct mlxsw_driver mlxsw_sp2_driver = { 5388 .kind = mlxsw_sp2_driver_name, 5389 .priv_size = sizeof(struct mlxsw_sp), 5390 .init = mlxsw_sp2_init, 5391 .fini = mlxsw_sp_fini, 5392 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5393 .port_split = mlxsw_sp_port_split, 5394 .port_unsplit = mlxsw_sp_port_unsplit, 5395 .sb_pool_get = mlxsw_sp_sb_pool_get, 5396 .sb_pool_set = mlxsw_sp_sb_pool_set, 5397 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5398 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5399 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5400 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5401 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5402 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5403 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5404 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5405 .flash_update = mlxsw_sp_flash_update, 5406 .trap_init = mlxsw_sp_trap_init, 5407 .trap_fini = mlxsw_sp_trap_fini, 5408 .trap_action_set = mlxsw_sp_trap_action_set, 5409 .trap_group_init = mlxsw_sp_trap_group_init, 5410 .txhdr_construct = mlxsw_sp_txhdr_construct, 5411 .resources_register = mlxsw_sp2_resources_register, 5412 .params_register = mlxsw_sp2_params_register, 5413 .params_unregister = mlxsw_sp2_params_unregister, 5414 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5415 .txhdr_len = MLXSW_TXHDR_LEN, 5416 .profile = &mlxsw_sp2_config_profile, 5417 .res_query_enabled = true, 5418 }; 5419 5420 static struct mlxsw_driver mlxsw_sp3_driver = { 5421 .kind = mlxsw_sp3_driver_name, 5422 .priv_size = sizeof(struct mlxsw_sp), 5423 .init = mlxsw_sp2_init, 5424 .fini = mlxsw_sp_fini, 5425 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5426 .port_split = mlxsw_sp_port_split, 5427 .port_unsplit = mlxsw_sp_port_unsplit, 5428 .sb_pool_get = mlxsw_sp_sb_pool_get, 5429 .sb_pool_set = mlxsw_sp_sb_pool_set, 5430 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5431 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5432 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5433 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5434 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5435 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5436 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5437 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5438 .flash_update = mlxsw_sp_flash_update, 5439 .trap_init = mlxsw_sp_trap_init, 5440 .trap_fini = mlxsw_sp_trap_fini, 5441 .trap_action_set = mlxsw_sp_trap_action_set, 5442 .trap_group_init = mlxsw_sp_trap_group_init, 5443 .txhdr_construct = mlxsw_sp_txhdr_construct, 5444 .resources_register = mlxsw_sp2_resources_register, 5445 .params_register = mlxsw_sp2_params_register, 5446 .params_unregister = mlxsw_sp2_params_unregister, 5447 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5448 .txhdr_len = MLXSW_TXHDR_LEN, 5449 .profile = &mlxsw_sp2_config_profile, 5450 .res_query_enabled = true, 5451 }; 5452 5453 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5454 { 5455 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5456 } 5457 5458 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5459 { 5460 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5461 int ret = 0; 5462 5463 if (mlxsw_sp_port_dev_check(lower_dev)) { 5464 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5465 ret = 1; 5466 } 5467 5468 return ret; 5469 } 5470 5471 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5472 { 5473 struct mlxsw_sp_port *mlxsw_sp_port; 5474 5475 if (mlxsw_sp_port_dev_check(dev)) 5476 return netdev_priv(dev); 5477 5478 mlxsw_sp_port = NULL; 5479 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5480 5481 return mlxsw_sp_port; 5482 } 5483 5484 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5485 { 5486 struct mlxsw_sp_port *mlxsw_sp_port; 5487 5488 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5489 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5490 } 5491 5492 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5493 { 5494 struct mlxsw_sp_port *mlxsw_sp_port; 5495 5496 if (mlxsw_sp_port_dev_check(dev)) 5497 return netdev_priv(dev); 5498 5499 mlxsw_sp_port = NULL; 5500 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5501 &mlxsw_sp_port); 5502 5503 return mlxsw_sp_port; 5504 } 5505 5506 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5507 { 5508 struct mlxsw_sp_port *mlxsw_sp_port; 5509 5510 rcu_read_lock(); 5511 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5512 if (mlxsw_sp_port) 5513 dev_hold(mlxsw_sp_port->dev); 5514 rcu_read_unlock(); 5515 return mlxsw_sp_port; 5516 } 5517 5518 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5519 { 5520 dev_put(mlxsw_sp_port->dev); 5521 } 5522 5523 static void 5524 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5525 struct net_device *lag_dev) 5526 { 5527 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5528 struct net_device *upper_dev; 5529 struct list_head *iter; 5530 5531 if (netif_is_bridge_port(lag_dev)) 5532 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5533 5534 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5535 if (!netif_is_bridge_port(upper_dev)) 5536 continue; 5537 br_dev = netdev_master_upper_dev_get(upper_dev); 5538 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5539 } 5540 } 5541 5542 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5543 { 5544 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5545 5546 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5548 } 5549 5550 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5551 { 5552 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5553 5554 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5556 } 5557 5558 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5559 u16 lag_id, u8 port_index) 5560 { 5561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5562 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5563 5564 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5565 lag_id, port_index); 5566 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5567 } 5568 5569 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5570 u16 lag_id) 5571 { 5572 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5573 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5574 5575 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5576 lag_id); 5577 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5578 } 5579 5580 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5581 u16 lag_id) 5582 { 5583 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5584 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5585 5586 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5587 lag_id); 5588 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5589 } 5590 5591 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5592 u16 lag_id) 5593 { 5594 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5595 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5596 5597 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5598 lag_id); 5599 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5600 } 5601 5602 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5603 struct net_device *lag_dev, 5604 u16 *p_lag_id) 5605 { 5606 struct mlxsw_sp_upper *lag; 5607 int free_lag_id = -1; 5608 u64 max_lag; 5609 int i; 5610 5611 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5612 for (i = 0; i < max_lag; i++) { 5613 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5614 if (lag->ref_count) { 5615 if (lag->dev == lag_dev) { 5616 *p_lag_id = i; 5617 return 0; 5618 } 5619 } else if (free_lag_id < 0) { 5620 free_lag_id = i; 5621 } 5622 } 5623 if (free_lag_id < 0) 5624 return -EBUSY; 5625 *p_lag_id = free_lag_id; 5626 return 0; 5627 } 5628 5629 static bool 5630 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5631 struct net_device *lag_dev, 5632 struct netdev_lag_upper_info *lag_upper_info, 5633 struct netlink_ext_ack *extack) 5634 { 5635 u16 lag_id; 5636 5637 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5638 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5639 return false; 5640 } 5641 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5642 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5643 return false; 5644 } 5645 return true; 5646 } 5647 5648 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5649 u16 lag_id, u8 *p_port_index) 5650 { 5651 u64 max_lag_members; 5652 int i; 5653 5654 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5655 MAX_LAG_MEMBERS); 5656 for (i = 0; i < max_lag_members; i++) { 5657 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5658 *p_port_index = i; 5659 return 0; 5660 } 5661 } 5662 return -EBUSY; 5663 } 5664 5665 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5666 struct net_device *lag_dev) 5667 { 5668 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5669 struct mlxsw_sp_upper *lag; 5670 u16 lag_id; 5671 u8 port_index; 5672 int err; 5673 5674 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5675 if (err) 5676 return err; 5677 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5678 if (!lag->ref_count) { 5679 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5680 if (err) 5681 return err; 5682 lag->dev = lag_dev; 5683 } 5684 5685 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5686 if (err) 5687 return err; 5688 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5689 if (err) 5690 goto err_col_port_add; 5691 5692 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5693 mlxsw_sp_port->local_port); 5694 mlxsw_sp_port->lag_id = lag_id; 5695 mlxsw_sp_port->lagged = 1; 5696 lag->ref_count++; 5697 5698 /* Port is no longer usable as a router interface */ 5699 if (mlxsw_sp_port->default_vlan->fid) 5700 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5701 5702 return 0; 5703 5704 err_col_port_add: 5705 if (!lag->ref_count) 5706 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5707 return err; 5708 } 5709 5710 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5711 struct net_device *lag_dev) 5712 { 5713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5714 u16 lag_id = mlxsw_sp_port->lag_id; 5715 struct mlxsw_sp_upper *lag; 5716 5717 if (!mlxsw_sp_port->lagged) 5718 return; 5719 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5720 WARN_ON(lag->ref_count == 0); 5721 5722 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5723 5724 /* Any VLANs configured on the port are no longer valid */ 5725 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5726 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5727 /* Make the LAG and its directly linked uppers leave bridges they 5728 * are memeber in 5729 */ 5730 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5731 5732 if (lag->ref_count == 1) 5733 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5734 5735 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5736 mlxsw_sp_port->local_port); 5737 mlxsw_sp_port->lagged = 0; 5738 lag->ref_count--; 5739 5740 /* Make sure untagged frames are allowed to ingress */ 5741 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5742 } 5743 5744 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5745 u16 lag_id) 5746 { 5747 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5748 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5749 5750 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5751 mlxsw_sp_port->local_port); 5752 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5753 } 5754 5755 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5756 u16 lag_id) 5757 { 5758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5759 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5760 5761 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5762 mlxsw_sp_port->local_port); 5763 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5764 } 5765 5766 static int 5767 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5768 { 5769 int err; 5770 5771 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5772 mlxsw_sp_port->lag_id); 5773 if (err) 5774 return err; 5775 5776 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5777 if (err) 5778 goto err_dist_port_add; 5779 5780 return 0; 5781 5782 err_dist_port_add: 5783 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5784 return err; 5785 } 5786 5787 static int 5788 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5789 { 5790 int err; 5791 5792 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5793 mlxsw_sp_port->lag_id); 5794 if (err) 5795 return err; 5796 5797 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5798 mlxsw_sp_port->lag_id); 5799 if (err) 5800 goto err_col_port_disable; 5801 5802 return 0; 5803 5804 err_col_port_disable: 5805 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5806 return err; 5807 } 5808 5809 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5810 struct netdev_lag_lower_state_info *info) 5811 { 5812 if (info->tx_enabled) 5813 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5814 else 5815 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5816 } 5817 5818 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5819 bool enable) 5820 { 5821 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5822 enum mlxsw_reg_spms_state spms_state; 5823 char *spms_pl; 5824 u16 vid; 5825 int err; 5826 5827 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5828 MLXSW_REG_SPMS_STATE_DISCARDING; 5829 5830 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5831 if (!spms_pl) 5832 return -ENOMEM; 5833 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5834 5835 for (vid = 0; vid < VLAN_N_VID; vid++) 5836 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5837 5838 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5839 kfree(spms_pl); 5840 return err; 5841 } 5842 5843 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5844 { 5845 u16 vid = 1; 5846 int err; 5847 5848 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5849 if (err) 5850 return err; 5851 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5852 if (err) 5853 goto err_port_stp_set; 5854 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5855 true, false); 5856 if (err) 5857 goto err_port_vlan_set; 5858 5859 for (; vid <= VLAN_N_VID - 1; vid++) { 5860 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5861 vid, false); 5862 if (err) 5863 goto err_vid_learning_set; 5864 } 5865 5866 return 0; 5867 5868 err_vid_learning_set: 5869 for (vid--; vid >= 1; vid--) 5870 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5871 err_port_vlan_set: 5872 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5873 err_port_stp_set: 5874 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5875 return err; 5876 } 5877 5878 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5879 { 5880 u16 vid; 5881 5882 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5883 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5884 vid, true); 5885 5886 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5887 false, false); 5888 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5889 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5890 } 5891 5892 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5893 { 5894 unsigned int num_vxlans = 0; 5895 struct net_device *dev; 5896 struct list_head *iter; 5897 5898 netdev_for_each_lower_dev(br_dev, dev, iter) { 5899 if (netif_is_vxlan(dev)) 5900 num_vxlans++; 5901 } 5902 5903 return num_vxlans > 1; 5904 } 5905 5906 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5907 { 5908 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5909 struct net_device *dev; 5910 struct list_head *iter; 5911 5912 netdev_for_each_lower_dev(br_dev, dev, iter) { 5913 u16 pvid; 5914 int err; 5915 5916 if (!netif_is_vxlan(dev)) 5917 continue; 5918 5919 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5920 if (err || !pvid) 5921 continue; 5922 5923 if (test_and_set_bit(pvid, vlans)) 5924 return false; 5925 } 5926 5927 return true; 5928 } 5929 5930 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5931 struct netlink_ext_ack *extack) 5932 { 5933 if (br_multicast_enabled(br_dev)) { 5934 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5935 return false; 5936 } 5937 5938 if (!br_vlan_enabled(br_dev) && 5939 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5940 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5941 return false; 5942 } 5943 5944 if (br_vlan_enabled(br_dev) && 5945 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5946 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5947 return false; 5948 } 5949 5950 return true; 5951 } 5952 5953 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5954 struct net_device *dev, 5955 unsigned long event, void *ptr) 5956 { 5957 struct netdev_notifier_changeupper_info *info; 5958 struct mlxsw_sp_port *mlxsw_sp_port; 5959 struct netlink_ext_ack *extack; 5960 struct net_device *upper_dev; 5961 struct mlxsw_sp *mlxsw_sp; 5962 int err = 0; 5963 5964 mlxsw_sp_port = netdev_priv(dev); 5965 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5966 info = ptr; 5967 extack = netdev_notifier_info_to_extack(&info->info); 5968 5969 switch (event) { 5970 case NETDEV_PRECHANGEUPPER: 5971 upper_dev = info->upper_dev; 5972 if (!is_vlan_dev(upper_dev) && 5973 !netif_is_lag_master(upper_dev) && 5974 !netif_is_bridge_master(upper_dev) && 5975 !netif_is_ovs_master(upper_dev) && 5976 !netif_is_macvlan(upper_dev)) { 5977 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5978 return -EINVAL; 5979 } 5980 if (!info->linking) 5981 break; 5982 if (netif_is_bridge_master(upper_dev) && 5983 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5984 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5985 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5986 return -EOPNOTSUPP; 5987 if (netdev_has_any_upper_dev(upper_dev) && 5988 (!netif_is_bridge_master(upper_dev) || 5989 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5990 upper_dev))) { 5991 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5992 return -EINVAL; 5993 } 5994 if (netif_is_lag_master(upper_dev) && 5995 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5996 info->upper_info, extack)) 5997 return -EINVAL; 5998 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5999 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 6000 return -EINVAL; 6001 } 6002 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 6003 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 6004 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 6005 return -EINVAL; 6006 } 6007 if (netif_is_macvlan(upper_dev) && 6008 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 6009 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6010 return -EOPNOTSUPP; 6011 } 6012 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 6013 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 6014 return -EINVAL; 6015 } 6016 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 6017 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 6018 return -EINVAL; 6019 } 6020 break; 6021 case NETDEV_CHANGEUPPER: 6022 upper_dev = info->upper_dev; 6023 if (netif_is_bridge_master(upper_dev)) { 6024 if (info->linking) 6025 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6026 lower_dev, 6027 upper_dev, 6028 extack); 6029 else 6030 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6031 lower_dev, 6032 upper_dev); 6033 } else if (netif_is_lag_master(upper_dev)) { 6034 if (info->linking) { 6035 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 6036 upper_dev); 6037 } else { 6038 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 6039 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 6040 upper_dev); 6041 } 6042 } else if (netif_is_ovs_master(upper_dev)) { 6043 if (info->linking) 6044 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 6045 else 6046 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 6047 } else if (netif_is_macvlan(upper_dev)) { 6048 if (!info->linking) 6049 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6050 } else if (is_vlan_dev(upper_dev)) { 6051 struct net_device *br_dev; 6052 6053 if (!netif_is_bridge_port(upper_dev)) 6054 break; 6055 if (info->linking) 6056 break; 6057 br_dev = netdev_master_upper_dev_get(upper_dev); 6058 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 6059 br_dev); 6060 } 6061 break; 6062 } 6063 6064 return err; 6065 } 6066 6067 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 6068 unsigned long event, void *ptr) 6069 { 6070 struct netdev_notifier_changelowerstate_info *info; 6071 struct mlxsw_sp_port *mlxsw_sp_port; 6072 int err; 6073 6074 mlxsw_sp_port = netdev_priv(dev); 6075 info = ptr; 6076 6077 switch (event) { 6078 case NETDEV_CHANGELOWERSTATE: 6079 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 6080 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 6081 info->lower_state_info); 6082 if (err) 6083 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 6084 } 6085 break; 6086 } 6087 6088 return 0; 6089 } 6090 6091 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 6092 struct net_device *port_dev, 6093 unsigned long event, void *ptr) 6094 { 6095 switch (event) { 6096 case NETDEV_PRECHANGEUPPER: 6097 case NETDEV_CHANGEUPPER: 6098 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 6099 event, ptr); 6100 case NETDEV_CHANGELOWERSTATE: 6101 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 6102 ptr); 6103 } 6104 6105 return 0; 6106 } 6107 6108 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 6109 unsigned long event, void *ptr) 6110 { 6111 struct net_device *dev; 6112 struct list_head *iter; 6113 int ret; 6114 6115 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6116 if (mlxsw_sp_port_dev_check(dev)) { 6117 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 6118 ptr); 6119 if (ret) 6120 return ret; 6121 } 6122 } 6123 6124 return 0; 6125 } 6126 6127 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 6128 struct net_device *dev, 6129 unsigned long event, void *ptr, 6130 u16 vid) 6131 { 6132 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 6133 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6134 struct netdev_notifier_changeupper_info *info = ptr; 6135 struct netlink_ext_ack *extack; 6136 struct net_device *upper_dev; 6137 int err = 0; 6138 6139 extack = netdev_notifier_info_to_extack(&info->info); 6140 6141 switch (event) { 6142 case NETDEV_PRECHANGEUPPER: 6143 upper_dev = info->upper_dev; 6144 if (!netif_is_bridge_master(upper_dev) && 6145 !netif_is_macvlan(upper_dev)) { 6146 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6147 return -EINVAL; 6148 } 6149 if (!info->linking) 6150 break; 6151 if (netif_is_bridge_master(upper_dev) && 6152 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 6153 mlxsw_sp_bridge_has_vxlan(upper_dev) && 6154 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6155 return -EOPNOTSUPP; 6156 if (netdev_has_any_upper_dev(upper_dev) && 6157 (!netif_is_bridge_master(upper_dev) || 6158 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 6159 upper_dev))) { 6160 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 6161 return -EINVAL; 6162 } 6163 if (netif_is_macvlan(upper_dev) && 6164 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6165 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6166 return -EOPNOTSUPP; 6167 } 6168 break; 6169 case NETDEV_CHANGEUPPER: 6170 upper_dev = info->upper_dev; 6171 if (netif_is_bridge_master(upper_dev)) { 6172 if (info->linking) 6173 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 6174 vlan_dev, 6175 upper_dev, 6176 extack); 6177 else 6178 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 6179 vlan_dev, 6180 upper_dev); 6181 } else if (netif_is_macvlan(upper_dev)) { 6182 if (!info->linking) 6183 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6184 } else { 6185 err = -EINVAL; 6186 WARN_ON(1); 6187 } 6188 break; 6189 } 6190 6191 return err; 6192 } 6193 6194 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6195 struct net_device *lag_dev, 6196 unsigned long event, 6197 void *ptr, u16 vid) 6198 { 6199 struct net_device *dev; 6200 struct list_head *iter; 6201 int ret; 6202 6203 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6204 if (mlxsw_sp_port_dev_check(dev)) { 6205 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6206 event, ptr, 6207 vid); 6208 if (ret) 6209 return ret; 6210 } 6211 } 6212 6213 return 0; 6214 } 6215 6216 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6217 struct net_device *br_dev, 6218 unsigned long event, void *ptr, 6219 u16 vid) 6220 { 6221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6222 struct netdev_notifier_changeupper_info *info = ptr; 6223 struct netlink_ext_ack *extack; 6224 struct net_device *upper_dev; 6225 6226 if (!mlxsw_sp) 6227 return 0; 6228 6229 extack = netdev_notifier_info_to_extack(&info->info); 6230 6231 switch (event) { 6232 case NETDEV_PRECHANGEUPPER: 6233 upper_dev = info->upper_dev; 6234 if (!netif_is_macvlan(upper_dev)) { 6235 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6236 return -EOPNOTSUPP; 6237 } 6238 if (!info->linking) 6239 break; 6240 if (netif_is_macvlan(upper_dev) && 6241 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6242 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6243 return -EOPNOTSUPP; 6244 } 6245 break; 6246 case NETDEV_CHANGEUPPER: 6247 upper_dev = info->upper_dev; 6248 if (info->linking) 6249 break; 6250 if (netif_is_macvlan(upper_dev)) 6251 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6252 break; 6253 } 6254 6255 return 0; 6256 } 6257 6258 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6259 unsigned long event, void *ptr) 6260 { 6261 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6262 u16 vid = vlan_dev_vlan_id(vlan_dev); 6263 6264 if (mlxsw_sp_port_dev_check(real_dev)) 6265 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6266 event, ptr, vid); 6267 else if (netif_is_lag_master(real_dev)) 6268 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6269 real_dev, event, 6270 ptr, vid); 6271 else if (netif_is_bridge_master(real_dev)) 6272 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6273 event, ptr, vid); 6274 6275 return 0; 6276 } 6277 6278 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6279 unsigned long event, void *ptr) 6280 { 6281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6282 struct netdev_notifier_changeupper_info *info = ptr; 6283 struct netlink_ext_ack *extack; 6284 struct net_device *upper_dev; 6285 6286 if (!mlxsw_sp) 6287 return 0; 6288 6289 extack = netdev_notifier_info_to_extack(&info->info); 6290 6291 switch (event) { 6292 case NETDEV_PRECHANGEUPPER: 6293 upper_dev = info->upper_dev; 6294 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6295 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6296 return -EOPNOTSUPP; 6297 } 6298 if (!info->linking) 6299 break; 6300 if (netif_is_macvlan(upper_dev) && 6301 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6302 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6303 return -EOPNOTSUPP; 6304 } 6305 break; 6306 case NETDEV_CHANGEUPPER: 6307 upper_dev = info->upper_dev; 6308 if (info->linking) 6309 break; 6310 if (is_vlan_dev(upper_dev)) 6311 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6312 if (netif_is_macvlan(upper_dev)) 6313 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6314 break; 6315 } 6316 6317 return 0; 6318 } 6319 6320 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6321 unsigned long event, void *ptr) 6322 { 6323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6324 struct netdev_notifier_changeupper_info *info = ptr; 6325 struct netlink_ext_ack *extack; 6326 6327 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6328 return 0; 6329 6330 extack = netdev_notifier_info_to_extack(&info->info); 6331 6332 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6333 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6334 6335 return -EOPNOTSUPP; 6336 } 6337 6338 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6339 { 6340 struct netdev_notifier_changeupper_info *info = ptr; 6341 6342 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6343 return false; 6344 return netif_is_l3_master(info->upper_dev); 6345 } 6346 6347 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6348 struct net_device *dev, 6349 unsigned long event, void *ptr) 6350 { 6351 struct netdev_notifier_changeupper_info *cu_info; 6352 struct netdev_notifier_info *info = ptr; 6353 struct netlink_ext_ack *extack; 6354 struct net_device *upper_dev; 6355 6356 extack = netdev_notifier_info_to_extack(info); 6357 6358 switch (event) { 6359 case NETDEV_CHANGEUPPER: 6360 cu_info = container_of(info, 6361 struct netdev_notifier_changeupper_info, 6362 info); 6363 upper_dev = cu_info->upper_dev; 6364 if (!netif_is_bridge_master(upper_dev)) 6365 return 0; 6366 if (!mlxsw_sp_lower_get(upper_dev)) 6367 return 0; 6368 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6369 return -EOPNOTSUPP; 6370 if (cu_info->linking) { 6371 if (!netif_running(dev)) 6372 return 0; 6373 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6374 * device needs to be mapped to a VLAN, but at this 6375 * point no VLANs are configured on the VxLAN device 6376 */ 6377 if (br_vlan_enabled(upper_dev)) 6378 return 0; 6379 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6380 dev, 0, extack); 6381 } else { 6382 /* VLANs were already flushed, which triggered the 6383 * necessary cleanup 6384 */ 6385 if (br_vlan_enabled(upper_dev)) 6386 return 0; 6387 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6388 } 6389 break; 6390 case NETDEV_PRE_UP: 6391 upper_dev = netdev_master_upper_dev_get(dev); 6392 if (!upper_dev) 6393 return 0; 6394 if (!netif_is_bridge_master(upper_dev)) 6395 return 0; 6396 if (!mlxsw_sp_lower_get(upper_dev)) 6397 return 0; 6398 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6399 extack); 6400 case NETDEV_DOWN: 6401 upper_dev = netdev_master_upper_dev_get(dev); 6402 if (!upper_dev) 6403 return 0; 6404 if (!netif_is_bridge_master(upper_dev)) 6405 return 0; 6406 if (!mlxsw_sp_lower_get(upper_dev)) 6407 return 0; 6408 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6409 break; 6410 } 6411 6412 return 0; 6413 } 6414 6415 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6416 unsigned long event, void *ptr) 6417 { 6418 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6419 struct mlxsw_sp_span_entry *span_entry; 6420 struct mlxsw_sp *mlxsw_sp; 6421 int err = 0; 6422 6423 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6424 if (event == NETDEV_UNREGISTER) { 6425 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6426 if (span_entry) 6427 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6428 } 6429 mlxsw_sp_span_respin(mlxsw_sp); 6430 6431 if (netif_is_vxlan(dev)) 6432 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6433 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6434 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6435 event, ptr); 6436 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6437 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6438 event, ptr); 6439 else if (event == NETDEV_PRE_CHANGEADDR || 6440 event == NETDEV_CHANGEADDR || 6441 event == NETDEV_CHANGEMTU) 6442 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6443 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6444 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6445 else if (mlxsw_sp_port_dev_check(dev)) 6446 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6447 else if (netif_is_lag_master(dev)) 6448 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6449 else if (is_vlan_dev(dev)) 6450 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6451 else if (netif_is_bridge_master(dev)) 6452 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6453 else if (netif_is_macvlan(dev)) 6454 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6455 6456 return notifier_from_errno(err); 6457 } 6458 6459 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6460 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6461 }; 6462 6463 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6464 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6465 }; 6466 6467 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6468 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6469 {0, }, 6470 }; 6471 6472 static struct pci_driver mlxsw_sp1_pci_driver = { 6473 .name = mlxsw_sp1_driver_name, 6474 .id_table = mlxsw_sp1_pci_id_table, 6475 }; 6476 6477 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6478 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6479 {0, }, 6480 }; 6481 6482 static struct pci_driver mlxsw_sp2_pci_driver = { 6483 .name = mlxsw_sp2_driver_name, 6484 .id_table = mlxsw_sp2_pci_id_table, 6485 }; 6486 6487 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 6488 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 6489 {0, }, 6490 }; 6491 6492 static struct pci_driver mlxsw_sp3_pci_driver = { 6493 .name = mlxsw_sp3_driver_name, 6494 .id_table = mlxsw_sp3_pci_id_table, 6495 }; 6496 6497 static int __init mlxsw_sp_module_init(void) 6498 { 6499 int err; 6500 6501 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6502 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6503 6504 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6505 if (err) 6506 goto err_sp1_core_driver_register; 6507 6508 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6509 if (err) 6510 goto err_sp2_core_driver_register; 6511 6512 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 6513 if (err) 6514 goto err_sp3_core_driver_register; 6515 6516 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6517 if (err) 6518 goto err_sp1_pci_driver_register; 6519 6520 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6521 if (err) 6522 goto err_sp2_pci_driver_register; 6523 6524 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 6525 if (err) 6526 goto err_sp3_pci_driver_register; 6527 6528 return 0; 6529 6530 err_sp3_pci_driver_register: 6531 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6532 err_sp2_pci_driver_register: 6533 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6534 err_sp1_pci_driver_register: 6535 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6536 err_sp3_core_driver_register: 6537 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6538 err_sp2_core_driver_register: 6539 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6540 err_sp1_core_driver_register: 6541 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6542 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6543 return err; 6544 } 6545 6546 static void __exit mlxsw_sp_module_exit(void) 6547 { 6548 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 6549 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6550 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6551 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 6552 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6553 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6554 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6555 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6556 } 6557 6558 module_init(mlxsw_sp_module_init); 6559 module_exit(mlxsw_sp_module_exit); 6560 6561 MODULE_LICENSE("Dual BSD/GPL"); 6562 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6563 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6564 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6565 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6566 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 6567 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6568