1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 struct mlxsw_sp_ptp_ops { 151 struct mlxsw_sp_ptp_clock * 152 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 153 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 154 155 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 156 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 157 158 /* Notify a driver that a packet that might be PTP was received. Driver 159 * is responsible for freeing the passed-in SKB. 160 */ 161 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 162 u8 local_port); 163 164 /* Notify a driver that a timestamped packet was transmitted. Driver 165 * is responsible for freeing the passed-in SKB. 166 */ 167 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 168 u8 local_port); 169 170 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 171 struct hwtstamp_config *config); 172 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 173 struct hwtstamp_config *config); 174 void (*shaper_work)(struct work_struct *work); 175 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 176 struct ethtool_ts_info *info); 177 }; 178 179 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 180 u16 component_index, u32 *p_max_size, 181 u8 *p_align_bits, u16 *p_max_write_size) 182 { 183 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 184 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 185 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 186 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 187 int err; 188 189 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 191 if (err) 192 return err; 193 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 194 p_max_write_size); 195 196 *p_align_bits = max_t(u8, *p_align_bits, 2); 197 *p_max_write_size = min_t(u16, *p_max_write_size, 198 MLXSW_REG_MCDA_MAX_DATA_LEN); 199 return 0; 200 } 201 202 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcc_pl[MLXSW_REG_MCC_LEN]; 208 u8 control_state; 209 int err; 210 211 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 213 if (err) 214 return err; 215 216 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 217 if (control_state != MLXFW_FSM_STATE_IDLE) 218 return -EBUSY; 219 220 mlxsw_reg_mcc_pack(mcc_pl, 221 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 222 0, *fwhandle, 0); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 224 } 225 226 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 227 u32 fwhandle, u16 component_index, 228 u32 component_size) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcc_pl[MLXSW_REG_MCC_LEN]; 234 235 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 236 component_index, fwhandle, component_size); 237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 238 } 239 240 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 241 u32 fwhandle, u8 *data, u16 size, 242 u32 offset) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcda_pl[MLXSW_REG_MCDA_LEN]; 248 249 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 251 } 252 253 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 254 u32 fwhandle, u16 component_index) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 262 component_index, fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcc_pl[MLXSW_REG_MCC_LEN]; 272 273 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 274 fwhandle, 0); 275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 276 } 277 278 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 279 enum mlxfw_fsm_state *fsm_state, 280 enum mlxfw_fsm_state_err *fsm_state_err) 281 { 282 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 283 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 285 char mcc_pl[MLXSW_REG_MCC_LEN]; 286 u8 control_state; 287 u8 error_code; 288 int err; 289 290 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 292 if (err) 293 return err; 294 295 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 296 *fsm_state = control_state; 297 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 298 MLXFW_FSM_STATE_ERR_MAX); 299 return 0; 300 } 301 302 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 310 fwhandle, 0); 311 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 312 } 313 314 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 315 { 316 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 317 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 319 char mcc_pl[MLXSW_REG_MCC_LEN]; 320 321 mlxsw_reg_mcc_pack(mcc_pl, 322 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 323 fwhandle, 0); 324 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 325 } 326 327 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 328 const char *msg, const char *comp_name, 329 u32 done_bytes, u32 total_bytes) 330 { 331 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 332 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 333 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 334 335 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 336 msg, comp_name, 337 done_bytes, total_bytes); 338 } 339 340 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 341 .component_query = mlxsw_sp_component_query, 342 .fsm_lock = mlxsw_sp_fsm_lock, 343 .fsm_component_update = mlxsw_sp_fsm_component_update, 344 .fsm_block_download = mlxsw_sp_fsm_block_download, 345 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 346 .fsm_activate = mlxsw_sp_fsm_activate, 347 .fsm_query_state = mlxsw_sp_fsm_query_state, 348 .fsm_cancel = mlxsw_sp_fsm_cancel, 349 .fsm_release = mlxsw_sp_fsm_release, 350 .status_notify = mlxsw_sp_status_notify, 351 }; 352 353 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 354 const struct firmware *firmware, 355 struct netlink_ext_ack *extack) 356 { 357 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 358 .mlxfw_dev = { 359 .ops = &mlxsw_sp_mlxfw_dev_ops, 360 .psid = mlxsw_sp->bus_info->psid, 361 .psid_size = strlen(mlxsw_sp->bus_info->psid), 362 }, 363 .mlxsw_sp = mlxsw_sp 364 }; 365 int err; 366 367 mlxsw_core_fw_flash_start(mlxsw_sp->core); 368 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 369 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 370 firmware, extack); 371 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 372 mlxsw_core_fw_flash_end(mlxsw_sp->core); 373 374 return err; 375 } 376 377 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 378 { 379 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 380 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 381 const char *fw_filename = mlxsw_sp->fw_filename; 382 union devlink_param_value value; 383 const struct firmware *firmware; 384 int err; 385 386 /* Don't check if driver does not require it */ 387 if (!req_rev || !fw_filename) 388 return 0; 389 390 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 391 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 392 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 393 &value); 394 if (err) 395 return err; 396 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 397 return 0; 398 399 /* Validate driver & FW are compatible */ 400 if (rev->major != req_rev->major) { 401 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 402 rev->major, req_rev->major); 403 return -EINVAL; 404 } 405 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 406 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 407 (rev->minor > req_rev->minor || 408 (rev->minor == req_rev->minor && 409 rev->subminor >= req_rev->subminor))) 410 return 0; 411 412 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 413 rev->major, rev->minor, rev->subminor); 414 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 415 fw_filename); 416 417 err = request_firmware_direct(&firmware, fw_filename, 418 mlxsw_sp->bus_info->dev); 419 if (err) { 420 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 421 fw_filename); 422 return err; 423 } 424 425 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 426 release_firmware(firmware); 427 if (err) 428 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 429 430 /* On FW flash success, tell the caller FW reset is needed 431 * if current FW supports it. 432 */ 433 if (rev->minor >= req_rev->can_reset_minor) 434 return err ? err : -EAGAIN; 435 else 436 return 0; 437 } 438 439 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 440 const char *file_name, const char *component, 441 struct netlink_ext_ack *extack) 442 { 443 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 444 const struct firmware *firmware; 445 int err; 446 447 if (component) 448 return -EOPNOTSUPP; 449 450 err = request_firmware_direct(&firmware, file_name, 451 mlxsw_sp->bus_info->dev); 452 if (err) 453 return err; 454 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 455 release_firmware(firmware); 456 457 return err; 458 } 459 460 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 461 unsigned int counter_index, u64 *packets, 462 u64 *bytes) 463 { 464 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 465 int err; 466 467 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 468 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 469 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 470 if (err) 471 return err; 472 if (packets) 473 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 474 if (bytes) 475 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 476 return 0; 477 } 478 479 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index) 481 { 482 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 483 484 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 485 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 486 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 487 } 488 489 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 490 unsigned int *p_counter_index) 491 { 492 int err; 493 494 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 495 p_counter_index); 496 if (err) 497 return err; 498 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 499 if (err) 500 goto err_counter_clear; 501 return 0; 502 503 err_counter_clear: 504 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 505 *p_counter_index); 506 return err; 507 } 508 509 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 510 unsigned int counter_index) 511 { 512 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 513 counter_index); 514 } 515 516 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 517 const struct mlxsw_tx_info *tx_info) 518 { 519 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 520 521 memset(txhdr, 0, MLXSW_TXHDR_LEN); 522 523 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 524 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 525 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 526 mlxsw_tx_hdr_swid_set(txhdr, 0); 527 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 528 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 529 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 530 } 531 532 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 533 { 534 switch (state) { 535 case BR_STATE_FORWARDING: 536 return MLXSW_REG_SPMS_STATE_FORWARDING; 537 case BR_STATE_LEARNING: 538 return MLXSW_REG_SPMS_STATE_LEARNING; 539 case BR_STATE_LISTENING: /* fall-through */ 540 case BR_STATE_DISABLED: /* fall-through */ 541 case BR_STATE_BLOCKING: 542 return MLXSW_REG_SPMS_STATE_DISCARDING; 543 default: 544 BUG(); 545 } 546 } 547 548 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 549 u8 state) 550 { 551 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char *spms_pl; 554 int err; 555 556 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 557 if (!spms_pl) 558 return -ENOMEM; 559 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 560 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 561 562 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 563 kfree(spms_pl); 564 return err; 565 } 566 567 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 568 { 569 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 570 int err; 571 572 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 573 if (err) 574 return err; 575 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 576 return 0; 577 } 578 579 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 580 bool enable, u32 rate) 581 { 582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 583 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 584 585 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 587 } 588 589 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 590 bool is_up) 591 { 592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 593 char paos_pl[MLXSW_REG_PAOS_LEN]; 594 595 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 596 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 597 MLXSW_PORT_ADMIN_STATUS_DOWN); 598 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 599 } 600 601 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 602 unsigned char *addr) 603 { 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 char ppad_pl[MLXSW_REG_PPAD_LEN]; 606 607 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 608 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 610 } 611 612 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 613 { 614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 615 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 616 617 ether_addr_copy(addr, mlxsw_sp->base_mac); 618 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 619 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 620 } 621 622 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 623 { 624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 625 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 626 int max_mtu; 627 int err; 628 629 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 630 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 631 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 632 if (err) 633 return err; 634 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 635 636 if (mtu > max_mtu) 637 return -EINVAL; 638 639 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 640 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 641 } 642 643 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 644 { 645 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 646 char pspa_pl[MLXSW_REG_PSPA_LEN]; 647 648 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 650 } 651 652 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 653 { 654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 655 char svpe_pl[MLXSW_REG_SVPE_LEN]; 656 657 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 659 } 660 661 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 662 bool learn_enable) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char *spvmlr_pl; 666 int err; 667 668 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 669 if (!spvmlr_pl) 670 return -ENOMEM; 671 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 672 learn_enable); 673 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 674 kfree(spvmlr_pl); 675 return err; 676 } 677 678 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 679 u16 vid) 680 { 681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 682 char spvid_pl[MLXSW_REG_SPVID_LEN]; 683 684 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 686 } 687 688 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 689 bool allow) 690 { 691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 692 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 693 694 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 696 } 697 698 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 699 { 700 int err; 701 702 if (!vid) { 703 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 704 if (err) 705 return err; 706 } else { 707 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 708 if (err) 709 return err; 710 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 711 if (err) 712 goto err_port_allow_untagged_set; 713 } 714 715 mlxsw_sp_port->pvid = vid; 716 return 0; 717 718 err_port_allow_untagged_set: 719 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 720 return err; 721 } 722 723 static int 724 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 725 { 726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 727 char sspr_pl[MLXSW_REG_SSPR_LEN]; 728 729 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 731 } 732 733 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 734 u8 local_port, u8 *p_module, 735 u8 *p_width, u8 *p_lane) 736 { 737 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 738 int err; 739 740 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 741 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 742 if (err) 743 return err; 744 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 745 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 746 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 747 return 0; 748 } 749 750 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 751 u8 module, u8 width, u8 lane) 752 { 753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 754 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 755 int i; 756 757 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 758 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 759 for (i = 0; i < width; i++) { 760 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 761 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 762 } 763 764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 } 766 767 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 768 { 769 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 770 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 771 772 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 773 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 774 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 775 } 776 777 static int mlxsw_sp_port_open(struct net_device *dev) 778 { 779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 780 int err; 781 782 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 783 if (err) 784 return err; 785 netif_start_queue(dev); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_stop(struct net_device *dev) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 793 netif_stop_queue(dev); 794 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 795 } 796 797 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 798 struct net_device *dev) 799 { 800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 801 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 802 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 803 const struct mlxsw_tx_info tx_info = { 804 .local_port = mlxsw_sp_port->local_port, 805 .is_emad = false, 806 }; 807 u64 len; 808 int err; 809 810 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 811 812 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 813 return NETDEV_TX_BUSY; 814 815 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 816 struct sk_buff *skb_orig = skb; 817 818 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 819 if (!skb) { 820 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 821 dev_kfree_skb_any(skb_orig); 822 return NETDEV_TX_OK; 823 } 824 dev_consume_skb_any(skb_orig); 825 } 826 827 if (eth_skb_pad(skb)) { 828 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 829 return NETDEV_TX_OK; 830 } 831 832 mlxsw_sp_txhdr_construct(skb, &tx_info); 833 /* TX header is consumed by HW on the way so we shouldn't count its 834 * bytes as being sent. 835 */ 836 len = skb->len - MLXSW_TXHDR_LEN; 837 838 /* Due to a race we might fail here because of a full queue. In that 839 * unlikely case we simply drop the packet. 840 */ 841 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 842 843 if (!err) { 844 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 845 u64_stats_update_begin(&pcpu_stats->syncp); 846 pcpu_stats->tx_packets++; 847 pcpu_stats->tx_bytes += len; 848 u64_stats_update_end(&pcpu_stats->syncp); 849 } else { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 dev_kfree_skb_any(skb); 852 } 853 return NETDEV_TX_OK; 854 } 855 856 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 857 { 858 } 859 860 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 861 { 862 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 863 struct sockaddr *addr = p; 864 int err; 865 866 if (!is_valid_ether_addr(addr->sa_data)) 867 return -EADDRNOTAVAIL; 868 869 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 870 if (err) 871 return err; 872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 873 return 0; 874 } 875 876 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 877 int mtu) 878 { 879 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 880 } 881 882 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 883 884 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 885 u16 delay) 886 { 887 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 888 BITS_PER_BYTE)); 889 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 890 mtu); 891 } 892 893 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 894 * Assumes 100m cable and maximum MTU. 895 */ 896 #define MLXSW_SP_PAUSE_DELAY 58752 897 898 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 899 u16 delay, bool pfc, bool pause) 900 { 901 if (pfc) 902 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 903 else if (pause) 904 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 905 else 906 return 0; 907 } 908 909 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 910 bool lossy) 911 { 912 if (lossy) 913 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 914 else 915 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 916 thres); 917 } 918 919 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 920 u8 *prio_tc, bool pause_en, 921 struct ieee_pfc *my_pfc) 922 { 923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 924 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 925 u16 delay = !!my_pfc ? my_pfc->delay : 0; 926 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 927 u32 taken_headroom_cells = 0; 928 u32 max_headroom_cells; 929 int i, j, err; 930 931 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 932 933 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 934 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 935 if (err) 936 return err; 937 938 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 939 bool configure = false; 940 bool pfc = false; 941 u16 thres_cells; 942 u16 delay_cells; 943 u16 total_cells; 944 bool lossy; 945 946 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 947 if (prio_tc[j] == i) { 948 pfc = pfc_en & BIT(j); 949 configure = true; 950 break; 951 } 952 } 953 954 if (!configure) 955 continue; 956 957 lossy = !(pfc || pause_en); 958 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 960 pfc, pause_en); 961 total_cells = thres_cells + delay_cells; 962 963 taken_headroom_cells += total_cells; 964 if (taken_headroom_cells > max_headroom_cells) 965 return -ENOBUFS; 966 967 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 968 thres_cells, lossy); 969 } 970 971 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 972 } 973 974 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 975 int mtu, bool pause_en) 976 { 977 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 978 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 979 struct ieee_pfc *my_pfc; 980 u8 *prio_tc; 981 982 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 983 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 984 985 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 986 pause_en, my_pfc); 987 } 988 989 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 990 { 991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 992 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 993 int err; 994 995 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 996 if (err) 997 return err; 998 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 999 if (err) 1000 goto err_span_port_mtu_update; 1001 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1002 if (err) 1003 goto err_port_mtu_set; 1004 dev->mtu = mtu; 1005 return 0; 1006 1007 err_port_mtu_set: 1008 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1009 err_span_port_mtu_update: 1010 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1011 return err; 1012 } 1013 1014 static int 1015 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1016 struct rtnl_link_stats64 *stats) 1017 { 1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1019 struct mlxsw_sp_port_pcpu_stats *p; 1020 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1021 u32 tx_dropped = 0; 1022 unsigned int start; 1023 int i; 1024 1025 for_each_possible_cpu(i) { 1026 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1027 do { 1028 start = u64_stats_fetch_begin_irq(&p->syncp); 1029 rx_packets = p->rx_packets; 1030 rx_bytes = p->rx_bytes; 1031 tx_packets = p->tx_packets; 1032 tx_bytes = p->tx_bytes; 1033 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1034 1035 stats->rx_packets += rx_packets; 1036 stats->rx_bytes += rx_bytes; 1037 stats->tx_packets += tx_packets; 1038 stats->tx_bytes += tx_bytes; 1039 /* tx_dropped is u32, updated without syncp protection. */ 1040 tx_dropped += p->tx_dropped; 1041 } 1042 stats->tx_dropped = tx_dropped; 1043 return 0; 1044 } 1045 1046 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1047 { 1048 switch (attr_id) { 1049 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1057 void *sp) 1058 { 1059 switch (attr_id) { 1060 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1061 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1062 } 1063 1064 return -EINVAL; 1065 } 1066 1067 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1068 int prio, char *ppcnt_pl) 1069 { 1070 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1071 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1072 1073 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1074 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1075 } 1076 1077 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1078 struct rtnl_link_stats64 *stats) 1079 { 1080 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1081 int err; 1082 1083 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1084 0, ppcnt_pl); 1085 if (err) 1086 goto out; 1087 1088 stats->tx_packets = 1089 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1090 stats->rx_packets = 1091 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1092 stats->tx_bytes = 1093 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1094 stats->rx_bytes = 1095 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1096 stats->multicast = 1097 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1098 1099 stats->rx_crc_errors = 1100 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1101 stats->rx_frame_errors = 1102 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1103 1104 stats->rx_length_errors = ( 1105 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1106 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1108 1109 stats->rx_errors = (stats->rx_crc_errors + 1110 stats->rx_frame_errors + stats->rx_length_errors); 1111 1112 out: 1113 return err; 1114 } 1115 1116 static void 1117 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1118 struct mlxsw_sp_port_xstats *xstats) 1119 { 1120 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1121 int err, i; 1122 1123 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1124 ppcnt_pl); 1125 if (!err) 1126 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1127 1128 for (i = 0; i < TC_MAX_QUEUE; i++) { 1129 err = mlxsw_sp_port_get_stats_raw(dev, 1130 MLXSW_REG_PPCNT_TC_CONG_TC, 1131 i, ppcnt_pl); 1132 if (!err) 1133 xstats->wred_drop[i] = 1134 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1137 i, ppcnt_pl); 1138 if (err) 1139 continue; 1140 1141 xstats->backlog[i] = 1142 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1143 xstats->tail_drop[i] = 1144 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1145 } 1146 1147 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1148 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1149 i, ppcnt_pl); 1150 if (err) 1151 continue; 1152 1153 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1154 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1155 } 1156 } 1157 1158 static void update_stats_cache(struct work_struct *work) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = 1161 container_of(work, struct mlxsw_sp_port, 1162 periodic_hw_stats.update_dw.work); 1163 1164 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1165 goto out; 1166 1167 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1168 &mlxsw_sp_port->periodic_hw_stats.stats); 1169 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1170 &mlxsw_sp_port->periodic_hw_stats.xstats); 1171 1172 out: 1173 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1174 MLXSW_HW_STATS_UPDATE_TIME); 1175 } 1176 1177 /* Return the stats from a cache that is updated periodically, 1178 * as this function might get called in an atomic context. 1179 */ 1180 static void 1181 mlxsw_sp_port_get_stats64(struct net_device *dev, 1182 struct rtnl_link_stats64 *stats) 1183 { 1184 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1185 1186 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1187 } 1188 1189 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1190 u16 vid_begin, u16 vid_end, 1191 bool is_member, bool untagged) 1192 { 1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1194 char *spvm_pl; 1195 int err; 1196 1197 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1198 if (!spvm_pl) 1199 return -ENOMEM; 1200 1201 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1202 vid_end, is_member, untagged); 1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1204 kfree(spvm_pl); 1205 return err; 1206 } 1207 1208 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1209 u16 vid_end, bool is_member, bool untagged) 1210 { 1211 u16 vid, vid_e; 1212 int err; 1213 1214 for (vid = vid_begin; vid <= vid_end; 1215 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1216 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1217 vid_end); 1218 1219 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1220 is_member, untagged); 1221 if (err) 1222 return err; 1223 } 1224 1225 return 0; 1226 } 1227 1228 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1229 bool flush_default) 1230 { 1231 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1232 1233 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1234 &mlxsw_sp_port->vlans_list, list) { 1235 if (!flush_default && 1236 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1237 continue; 1238 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1239 } 1240 } 1241 1242 static void 1243 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1244 { 1245 if (mlxsw_sp_port_vlan->bridge_port) 1246 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1247 else if (mlxsw_sp_port_vlan->fid) 1248 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1249 } 1250 1251 struct mlxsw_sp_port_vlan * 1252 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1253 { 1254 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1255 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1256 int err; 1257 1258 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1259 if (mlxsw_sp_port_vlan) 1260 return ERR_PTR(-EEXIST); 1261 1262 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1263 if (err) 1264 return ERR_PTR(err); 1265 1266 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1267 if (!mlxsw_sp_port_vlan) { 1268 err = -ENOMEM; 1269 goto err_port_vlan_alloc; 1270 } 1271 1272 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1273 mlxsw_sp_port_vlan->vid = vid; 1274 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1275 1276 return mlxsw_sp_port_vlan; 1277 1278 err_port_vlan_alloc: 1279 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1280 return ERR_PTR(err); 1281 } 1282 1283 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1284 { 1285 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1286 u16 vid = mlxsw_sp_port_vlan->vid; 1287 1288 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1289 list_del(&mlxsw_sp_port_vlan->list); 1290 kfree(mlxsw_sp_port_vlan); 1291 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1292 } 1293 1294 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1295 __be16 __always_unused proto, u16 vid) 1296 { 1297 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1298 1299 /* VLAN 0 is added to HW filter when device goes up, but it is 1300 * reserved in our case, so simply return. 1301 */ 1302 if (!vid) 1303 return 0; 1304 1305 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1306 } 1307 1308 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1309 __be16 __always_unused proto, u16 vid) 1310 { 1311 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1312 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1313 1314 /* VLAN 0 is removed from HW filter when device goes down, but 1315 * it is reserved in our case, so simply return. 1316 */ 1317 if (!vid) 1318 return 0; 1319 1320 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1321 if (!mlxsw_sp_port_vlan) 1322 return 0; 1323 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1324 1325 return 0; 1326 } 1327 1328 static struct mlxsw_sp_port_mall_tc_entry * 1329 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1330 unsigned long cookie) { 1331 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1332 1333 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1334 if (mall_tc_entry->cookie == cookie) 1335 return mall_tc_entry; 1336 1337 return NULL; 1338 } 1339 1340 static int 1341 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1343 const struct flow_action_entry *act, 1344 bool ingress) 1345 { 1346 enum mlxsw_sp_span_type span_type; 1347 1348 if (!act->dev) { 1349 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1350 return -EINVAL; 1351 } 1352 1353 mirror->ingress = ingress; 1354 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1355 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1356 true, &mirror->span_id); 1357 } 1358 1359 static void 1360 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1361 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1362 { 1363 enum mlxsw_sp_span_type span_type; 1364 1365 span_type = mirror->ingress ? 1366 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1367 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1368 span_type, true); 1369 } 1370 1371 static int 1372 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1373 struct tc_cls_matchall_offload *cls, 1374 const struct flow_action_entry *act, 1375 bool ingress) 1376 { 1377 int err; 1378 1379 if (!mlxsw_sp_port->sample) 1380 return -EOPNOTSUPP; 1381 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1382 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1383 return -EEXIST; 1384 } 1385 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1386 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1387 return -EOPNOTSUPP; 1388 } 1389 1390 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1391 act->sample.psample_group); 1392 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1393 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1394 mlxsw_sp_port->sample->rate = act->sample.rate; 1395 1396 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1397 if (err) 1398 goto err_port_sample_set; 1399 return 0; 1400 1401 err_port_sample_set: 1402 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1403 return err; 1404 } 1405 1406 static void 1407 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1408 { 1409 if (!mlxsw_sp_port->sample) 1410 return; 1411 1412 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1413 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1414 } 1415 1416 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1417 struct tc_cls_matchall_offload *f, 1418 bool ingress) 1419 { 1420 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1421 __be16 protocol = f->common.protocol; 1422 struct flow_action_entry *act; 1423 int err; 1424 1425 if (!flow_offload_has_one_action(&f->rule->action)) { 1426 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1427 return -EOPNOTSUPP; 1428 } 1429 1430 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1431 if (!mall_tc_entry) 1432 return -ENOMEM; 1433 mall_tc_entry->cookie = f->cookie; 1434 1435 act = &f->rule->action.entries[0]; 1436 1437 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1438 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1439 1440 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1441 mirror = &mall_tc_entry->mirror; 1442 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1443 mirror, act, 1444 ingress); 1445 } else if (act->id == FLOW_ACTION_SAMPLE && 1446 protocol == htons(ETH_P_ALL)) { 1447 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1448 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1449 act, ingress); 1450 } else { 1451 err = -EOPNOTSUPP; 1452 } 1453 1454 if (err) 1455 goto err_add_action; 1456 1457 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1458 return 0; 1459 1460 err_add_action: 1461 kfree(mall_tc_entry); 1462 return err; 1463 } 1464 1465 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1466 struct tc_cls_matchall_offload *f) 1467 { 1468 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1469 1470 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1471 f->cookie); 1472 if (!mall_tc_entry) { 1473 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1474 return; 1475 } 1476 list_del(&mall_tc_entry->list); 1477 1478 switch (mall_tc_entry->type) { 1479 case MLXSW_SP_PORT_MALL_MIRROR: 1480 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1481 &mall_tc_entry->mirror); 1482 break; 1483 case MLXSW_SP_PORT_MALL_SAMPLE: 1484 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1485 break; 1486 default: 1487 WARN_ON(1); 1488 } 1489 1490 kfree(mall_tc_entry); 1491 } 1492 1493 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1494 struct tc_cls_matchall_offload *f, 1495 bool ingress) 1496 { 1497 switch (f->command) { 1498 case TC_CLSMATCHALL_REPLACE: 1499 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1500 ingress); 1501 case TC_CLSMATCHALL_DESTROY: 1502 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1503 return 0; 1504 default: 1505 return -EOPNOTSUPP; 1506 } 1507 } 1508 1509 static int 1510 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1511 struct tc_cls_flower_offload *f) 1512 { 1513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1514 1515 switch (f->command) { 1516 case TC_CLSFLOWER_REPLACE: 1517 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1518 case TC_CLSFLOWER_DESTROY: 1519 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1520 return 0; 1521 case TC_CLSFLOWER_STATS: 1522 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1523 case TC_CLSFLOWER_TMPLT_CREATE: 1524 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1525 case TC_CLSFLOWER_TMPLT_DESTROY: 1526 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1527 return 0; 1528 default: 1529 return -EOPNOTSUPP; 1530 } 1531 } 1532 1533 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1534 void *type_data, 1535 void *cb_priv, bool ingress) 1536 { 1537 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1538 1539 switch (type) { 1540 case TC_SETUP_CLSMATCHALL: 1541 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1542 type_data)) 1543 return -EOPNOTSUPP; 1544 1545 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1546 ingress); 1547 case TC_SETUP_CLSFLOWER: 1548 return 0; 1549 default: 1550 return -EOPNOTSUPP; 1551 } 1552 } 1553 1554 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1555 void *type_data, 1556 void *cb_priv) 1557 { 1558 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1559 cb_priv, true); 1560 } 1561 1562 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1563 void *type_data, 1564 void *cb_priv) 1565 { 1566 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1567 cb_priv, false); 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1571 void *type_data, void *cb_priv) 1572 { 1573 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1574 1575 switch (type) { 1576 case TC_SETUP_CLSMATCHALL: 1577 return 0; 1578 case TC_SETUP_CLSFLOWER: 1579 if (mlxsw_sp_acl_block_disabled(acl_block)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1583 default: 1584 return -EOPNOTSUPP; 1585 } 1586 } 1587 1588 static int 1589 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1590 struct tcf_block *block, bool ingress, 1591 struct netlink_ext_ack *extack) 1592 { 1593 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1594 struct mlxsw_sp_acl_block *acl_block; 1595 struct tcf_block_cb *block_cb; 1596 int err; 1597 1598 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1599 mlxsw_sp); 1600 if (!block_cb) { 1601 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1602 if (!acl_block) 1603 return -ENOMEM; 1604 block_cb = __tcf_block_cb_register(block, 1605 mlxsw_sp_setup_tc_block_cb_flower, 1606 mlxsw_sp, acl_block, extack); 1607 if (IS_ERR(block_cb)) { 1608 err = PTR_ERR(block_cb); 1609 goto err_cb_register; 1610 } 1611 } else { 1612 acl_block = tcf_block_cb_priv(block_cb); 1613 } 1614 tcf_block_cb_incref(block_cb); 1615 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1616 mlxsw_sp_port, ingress); 1617 if (err) 1618 goto err_block_bind; 1619 1620 if (ingress) 1621 mlxsw_sp_port->ing_acl_block = acl_block; 1622 else 1623 mlxsw_sp_port->eg_acl_block = acl_block; 1624 1625 return 0; 1626 1627 err_block_bind: 1628 if (!tcf_block_cb_decref(block_cb)) { 1629 __tcf_block_cb_unregister(block, block_cb); 1630 err_cb_register: 1631 mlxsw_sp_acl_block_destroy(acl_block); 1632 } 1633 return err; 1634 } 1635 1636 static void 1637 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1638 struct tcf_block *block, bool ingress) 1639 { 1640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1641 struct mlxsw_sp_acl_block *acl_block; 1642 struct tcf_block_cb *block_cb; 1643 int err; 1644 1645 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1646 mlxsw_sp); 1647 if (!block_cb) 1648 return; 1649 1650 if (ingress) 1651 mlxsw_sp_port->ing_acl_block = NULL; 1652 else 1653 mlxsw_sp_port->eg_acl_block = NULL; 1654 1655 acl_block = tcf_block_cb_priv(block_cb); 1656 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1657 mlxsw_sp_port, ingress); 1658 if (!err && !tcf_block_cb_decref(block_cb)) { 1659 __tcf_block_cb_unregister(block, block_cb); 1660 mlxsw_sp_acl_block_destroy(acl_block); 1661 } 1662 } 1663 1664 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1665 struct tc_block_offload *f) 1666 { 1667 tc_setup_cb_t *cb; 1668 bool ingress; 1669 int err; 1670 1671 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1672 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1673 ingress = true; 1674 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1675 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1676 ingress = false; 1677 } else { 1678 return -EOPNOTSUPP; 1679 } 1680 1681 switch (f->command) { 1682 case TC_BLOCK_BIND: 1683 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1684 mlxsw_sp_port, f->extack); 1685 if (err) 1686 return err; 1687 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1688 f->block, ingress, 1689 f->extack); 1690 if (err) { 1691 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1692 return err; 1693 } 1694 return 0; 1695 case TC_BLOCK_UNBIND: 1696 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1697 f->block, ingress); 1698 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1699 return 0; 1700 default: 1701 return -EOPNOTSUPP; 1702 } 1703 } 1704 1705 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1706 void *type_data) 1707 { 1708 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1709 1710 switch (type) { 1711 case TC_SETUP_BLOCK: 1712 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1713 case TC_SETUP_QDISC_RED: 1714 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1715 case TC_SETUP_QDISC_PRIO: 1716 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1717 default: 1718 return -EOPNOTSUPP; 1719 } 1720 } 1721 1722 1723 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1724 { 1725 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1726 1727 if (!enable) { 1728 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1729 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1730 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1731 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1732 return -EINVAL; 1733 } 1734 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1735 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1736 } else { 1737 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1738 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1739 } 1740 return 0; 1741 } 1742 1743 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1744 { 1745 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1746 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1747 int err; 1748 1749 if (netif_running(dev)) 1750 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1751 1752 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1753 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1754 pplr_pl); 1755 1756 if (netif_running(dev)) 1757 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1758 1759 return err; 1760 } 1761 1762 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1763 1764 static int mlxsw_sp_handle_feature(struct net_device *dev, 1765 netdev_features_t wanted_features, 1766 netdev_features_t feature, 1767 mlxsw_sp_feature_handler feature_handler) 1768 { 1769 netdev_features_t changes = wanted_features ^ dev->features; 1770 bool enable = !!(wanted_features & feature); 1771 int err; 1772 1773 if (!(changes & feature)) 1774 return 0; 1775 1776 err = feature_handler(dev, enable); 1777 if (err) { 1778 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1779 enable ? "Enable" : "Disable", &feature, err); 1780 return err; 1781 } 1782 1783 if (enable) 1784 dev->features |= feature; 1785 else 1786 dev->features &= ~feature; 1787 1788 return 0; 1789 } 1790 static int mlxsw_sp_set_features(struct net_device *dev, 1791 netdev_features_t features) 1792 { 1793 netdev_features_t oper_features = dev->features; 1794 int err = 0; 1795 1796 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1797 mlxsw_sp_feature_hw_tc); 1798 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1799 mlxsw_sp_feature_loopback); 1800 1801 if (err) { 1802 dev->features = oper_features; 1803 return -EINVAL; 1804 } 1805 1806 return 0; 1807 } 1808 1809 static struct devlink_port * 1810 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1811 { 1812 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1813 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1814 1815 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1816 mlxsw_sp_port->local_port); 1817 } 1818 1819 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1820 struct ifreq *ifr) 1821 { 1822 struct hwtstamp_config config; 1823 int err; 1824 1825 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1826 return -EFAULT; 1827 1828 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1829 &config); 1830 if (err) 1831 return err; 1832 1833 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1834 return -EFAULT; 1835 1836 return 0; 1837 } 1838 1839 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1840 struct ifreq *ifr) 1841 { 1842 struct hwtstamp_config config; 1843 int err; 1844 1845 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1846 &config); 1847 if (err) 1848 return err; 1849 1850 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1851 return -EFAULT; 1852 1853 return 0; 1854 } 1855 1856 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1857 { 1858 struct hwtstamp_config config = {0}; 1859 1860 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1861 } 1862 1863 static int 1864 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1865 { 1866 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1867 1868 switch (cmd) { 1869 case SIOCSHWTSTAMP: 1870 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1871 case SIOCGHWTSTAMP: 1872 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1873 default: 1874 return -EOPNOTSUPP; 1875 } 1876 } 1877 1878 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1879 .ndo_open = mlxsw_sp_port_open, 1880 .ndo_stop = mlxsw_sp_port_stop, 1881 .ndo_start_xmit = mlxsw_sp_port_xmit, 1882 .ndo_setup_tc = mlxsw_sp_setup_tc, 1883 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1884 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1885 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1886 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1887 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1888 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1889 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1890 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1891 .ndo_set_features = mlxsw_sp_set_features, 1892 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1893 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1894 }; 1895 1896 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1897 struct ethtool_drvinfo *drvinfo) 1898 { 1899 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1900 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1901 1902 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1903 sizeof(drvinfo->driver)); 1904 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1905 sizeof(drvinfo->version)); 1906 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1907 "%d.%d.%d", 1908 mlxsw_sp->bus_info->fw_rev.major, 1909 mlxsw_sp->bus_info->fw_rev.minor, 1910 mlxsw_sp->bus_info->fw_rev.subminor); 1911 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1912 sizeof(drvinfo->bus_info)); 1913 } 1914 1915 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1916 struct ethtool_pauseparam *pause) 1917 { 1918 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1919 1920 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1921 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1922 } 1923 1924 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1925 struct ethtool_pauseparam *pause) 1926 { 1927 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1928 1929 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1930 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1931 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1932 1933 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1934 pfcc_pl); 1935 } 1936 1937 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1938 struct ethtool_pauseparam *pause) 1939 { 1940 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1941 bool pause_en = pause->tx_pause || pause->rx_pause; 1942 int err; 1943 1944 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1945 netdev_err(dev, "PFC already enabled on port\n"); 1946 return -EINVAL; 1947 } 1948 1949 if (pause->autoneg) { 1950 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1951 return -EINVAL; 1952 } 1953 1954 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1955 if (err) { 1956 netdev_err(dev, "Failed to configure port's headroom\n"); 1957 return err; 1958 } 1959 1960 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1961 if (err) { 1962 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1963 goto err_port_pause_configure; 1964 } 1965 1966 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1967 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1968 1969 return 0; 1970 1971 err_port_pause_configure: 1972 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1973 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1974 return err; 1975 } 1976 1977 struct mlxsw_sp_port_hw_stats { 1978 char str[ETH_GSTRING_LEN]; 1979 u64 (*getter)(const char *payload); 1980 bool cells_bytes; 1981 }; 1982 1983 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1984 { 1985 .str = "a_frames_transmitted_ok", 1986 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1987 }, 1988 { 1989 .str = "a_frames_received_ok", 1990 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1991 }, 1992 { 1993 .str = "a_frame_check_sequence_errors", 1994 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1995 }, 1996 { 1997 .str = "a_alignment_errors", 1998 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1999 }, 2000 { 2001 .str = "a_octets_transmitted_ok", 2002 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2003 }, 2004 { 2005 .str = "a_octets_received_ok", 2006 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2007 }, 2008 { 2009 .str = "a_multicast_frames_xmitted_ok", 2010 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2011 }, 2012 { 2013 .str = "a_broadcast_frames_xmitted_ok", 2014 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2015 }, 2016 { 2017 .str = "a_multicast_frames_received_ok", 2018 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2019 }, 2020 { 2021 .str = "a_broadcast_frames_received_ok", 2022 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2023 }, 2024 { 2025 .str = "a_in_range_length_errors", 2026 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2027 }, 2028 { 2029 .str = "a_out_of_range_length_field", 2030 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2031 }, 2032 { 2033 .str = "a_frame_too_long_errors", 2034 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2035 }, 2036 { 2037 .str = "a_symbol_error_during_carrier", 2038 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2039 }, 2040 { 2041 .str = "a_mac_control_frames_transmitted", 2042 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2043 }, 2044 { 2045 .str = "a_mac_control_frames_received", 2046 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2047 }, 2048 { 2049 .str = "a_unsupported_opcodes_received", 2050 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2051 }, 2052 { 2053 .str = "a_pause_mac_ctrl_frames_received", 2054 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2055 }, 2056 { 2057 .str = "a_pause_mac_ctrl_frames_xmitted", 2058 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2059 }, 2060 }; 2061 2062 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2063 2064 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2065 { 2066 .str = "if_in_discards", 2067 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2068 }, 2069 { 2070 .str = "if_out_discards", 2071 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2072 }, 2073 { 2074 .str = "if_out_errors", 2075 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2076 }, 2077 }; 2078 2079 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2080 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2081 2082 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2083 { 2084 .str = "ether_stats_undersize_pkts", 2085 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2086 }, 2087 { 2088 .str = "ether_stats_oversize_pkts", 2089 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2090 }, 2091 { 2092 .str = "ether_stats_fragments", 2093 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2094 }, 2095 { 2096 .str = "ether_pkts64octets", 2097 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2098 }, 2099 { 2100 .str = "ether_pkts65to127octets", 2101 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2102 }, 2103 { 2104 .str = "ether_pkts128to255octets", 2105 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2106 }, 2107 { 2108 .str = "ether_pkts256to511octets", 2109 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2110 }, 2111 { 2112 .str = "ether_pkts512to1023octets", 2113 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2114 }, 2115 { 2116 .str = "ether_pkts1024to1518octets", 2117 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2118 }, 2119 { 2120 .str = "ether_pkts1519to2047octets", 2121 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2122 }, 2123 { 2124 .str = "ether_pkts2048to4095octets", 2125 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2126 }, 2127 { 2128 .str = "ether_pkts4096to8191octets", 2129 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2130 }, 2131 { 2132 .str = "ether_pkts8192to10239octets", 2133 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2134 }, 2135 }; 2136 2137 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2138 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2139 2140 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2141 { 2142 .str = "dot3stats_fcs_errors", 2143 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2144 }, 2145 { 2146 .str = "dot3stats_symbol_errors", 2147 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2148 }, 2149 { 2150 .str = "dot3control_in_unknown_opcodes", 2151 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2152 }, 2153 { 2154 .str = "dot3in_pause_frames", 2155 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2156 }, 2157 }; 2158 2159 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2160 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2161 2162 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2163 { 2164 .str = "discard_ingress_general", 2165 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2166 }, 2167 { 2168 .str = "discard_ingress_policy_engine", 2169 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2170 }, 2171 { 2172 .str = "discard_ingress_vlan_membership", 2173 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2174 }, 2175 { 2176 .str = "discard_ingress_tag_frame_type", 2177 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2178 }, 2179 { 2180 .str = "discard_egress_vlan_membership", 2181 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2182 }, 2183 { 2184 .str = "discard_loopback_filter", 2185 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2186 }, 2187 { 2188 .str = "discard_egress_general", 2189 .getter = mlxsw_reg_ppcnt_egress_general_get, 2190 }, 2191 { 2192 .str = "discard_egress_hoq", 2193 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2194 }, 2195 { 2196 .str = "discard_egress_policy_engine", 2197 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2198 }, 2199 { 2200 .str = "discard_ingress_tx_link_down", 2201 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2202 }, 2203 { 2204 .str = "discard_egress_stp_filter", 2205 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2206 }, 2207 { 2208 .str = "discard_egress_sll", 2209 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2210 }, 2211 }; 2212 2213 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2214 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2215 2216 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2217 { 2218 .str = "rx_octets_prio", 2219 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2220 }, 2221 { 2222 .str = "rx_frames_prio", 2223 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2224 }, 2225 { 2226 .str = "tx_octets_prio", 2227 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2228 }, 2229 { 2230 .str = "tx_frames_prio", 2231 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2232 }, 2233 { 2234 .str = "rx_pause_prio", 2235 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2236 }, 2237 { 2238 .str = "rx_pause_duration_prio", 2239 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2240 }, 2241 { 2242 .str = "tx_pause_prio", 2243 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2244 }, 2245 { 2246 .str = "tx_pause_duration_prio", 2247 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2248 }, 2249 }; 2250 2251 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2252 2253 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2254 { 2255 .str = "tc_transmit_queue_tc", 2256 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2257 .cells_bytes = true, 2258 }, 2259 { 2260 .str = "tc_no_buffer_discard_uc_tc", 2261 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2262 }, 2263 }; 2264 2265 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2266 2267 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2268 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2269 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2270 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2271 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2272 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2273 IEEE_8021QAZ_MAX_TCS) + \ 2274 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2275 TC_MAX_QUEUE)) 2276 2277 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2278 { 2279 int i; 2280 2281 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2282 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2283 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2284 *p += ETH_GSTRING_LEN; 2285 } 2286 } 2287 2288 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2289 { 2290 int i; 2291 2292 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2293 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2294 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2295 *p += ETH_GSTRING_LEN; 2296 } 2297 } 2298 2299 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2300 u32 stringset, u8 *data) 2301 { 2302 u8 *p = data; 2303 int i; 2304 2305 switch (stringset) { 2306 case ETH_SS_STATS: 2307 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2308 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2309 ETH_GSTRING_LEN); 2310 p += ETH_GSTRING_LEN; 2311 } 2312 2313 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2314 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2315 ETH_GSTRING_LEN); 2316 p += ETH_GSTRING_LEN; 2317 } 2318 2319 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2320 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2321 ETH_GSTRING_LEN); 2322 p += ETH_GSTRING_LEN; 2323 } 2324 2325 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2326 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2327 ETH_GSTRING_LEN); 2328 p += ETH_GSTRING_LEN; 2329 } 2330 2331 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2332 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2333 ETH_GSTRING_LEN); 2334 p += ETH_GSTRING_LEN; 2335 } 2336 2337 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2338 mlxsw_sp_port_get_prio_strings(&p, i); 2339 2340 for (i = 0; i < TC_MAX_QUEUE; i++) 2341 mlxsw_sp_port_get_tc_strings(&p, i); 2342 2343 break; 2344 } 2345 } 2346 2347 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2348 enum ethtool_phys_id_state state) 2349 { 2350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2351 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2352 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2353 bool active; 2354 2355 switch (state) { 2356 case ETHTOOL_ID_ACTIVE: 2357 active = true; 2358 break; 2359 case ETHTOOL_ID_INACTIVE: 2360 active = false; 2361 break; 2362 default: 2363 return -EOPNOTSUPP; 2364 } 2365 2366 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2368 } 2369 2370 static int 2371 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2372 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2373 { 2374 switch (grp) { 2375 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2376 *p_hw_stats = mlxsw_sp_port_hw_stats; 2377 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2378 break; 2379 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2380 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2381 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2382 break; 2383 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2384 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2385 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2386 break; 2387 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2388 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2389 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2390 break; 2391 case MLXSW_REG_PPCNT_DISCARD_CNT: 2392 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2393 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2394 break; 2395 case MLXSW_REG_PPCNT_PRIO_CNT: 2396 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2397 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2398 break; 2399 case MLXSW_REG_PPCNT_TC_CNT: 2400 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2401 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2402 break; 2403 default: 2404 WARN_ON(1); 2405 return -EOPNOTSUPP; 2406 } 2407 return 0; 2408 } 2409 2410 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2411 enum mlxsw_reg_ppcnt_grp grp, int prio, 2412 u64 *data, int data_index) 2413 { 2414 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2415 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2416 struct mlxsw_sp_port_hw_stats *hw_stats; 2417 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2418 int i, len; 2419 int err; 2420 2421 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2422 if (err) 2423 return; 2424 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2425 for (i = 0; i < len; i++) { 2426 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2427 if (!hw_stats[i].cells_bytes) 2428 continue; 2429 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2430 data[data_index + i]); 2431 } 2432 } 2433 2434 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2435 struct ethtool_stats *stats, u64 *data) 2436 { 2437 int i, data_index = 0; 2438 2439 /* IEEE 802.3 Counters */ 2440 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2441 data, data_index); 2442 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2443 2444 /* RFC 2863 Counters */ 2445 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2446 data, data_index); 2447 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2448 2449 /* RFC 2819 Counters */ 2450 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2451 data, data_index); 2452 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2453 2454 /* RFC 3635 Counters */ 2455 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2456 data, data_index); 2457 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2458 2459 /* Discard Counters */ 2460 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2461 data, data_index); 2462 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2463 2464 /* Per-Priority Counters */ 2465 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2466 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2467 data, data_index); 2468 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2469 } 2470 2471 /* Per-TC Counters */ 2472 for (i = 0; i < TC_MAX_QUEUE; i++) { 2473 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2474 data, data_index); 2475 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2476 } 2477 } 2478 2479 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2480 { 2481 switch (sset) { 2482 case ETH_SS_STATS: 2483 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2484 default: 2485 return -EOPNOTSUPP; 2486 } 2487 } 2488 2489 struct mlxsw_sp1_port_link_mode { 2490 enum ethtool_link_mode_bit_indices mask_ethtool; 2491 u32 mask; 2492 u32 speed; 2493 }; 2494 2495 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2496 { 2497 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2498 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2499 .speed = SPEED_100, 2500 }, 2501 { 2502 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2503 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2504 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2505 .speed = SPEED_1000, 2506 }, 2507 { 2508 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2509 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2510 .speed = SPEED_10000, 2511 }, 2512 { 2513 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2514 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2515 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2516 .speed = SPEED_10000, 2517 }, 2518 { 2519 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2520 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2521 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2522 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2523 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2524 .speed = SPEED_10000, 2525 }, 2526 { 2527 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2528 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2529 .speed = SPEED_20000, 2530 }, 2531 { 2532 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2533 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2534 .speed = SPEED_40000, 2535 }, 2536 { 2537 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2538 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2539 .speed = SPEED_40000, 2540 }, 2541 { 2542 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2543 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2544 .speed = SPEED_40000, 2545 }, 2546 { 2547 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2548 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2549 .speed = SPEED_40000, 2550 }, 2551 { 2552 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2553 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2554 .speed = SPEED_25000, 2555 }, 2556 { 2557 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2558 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2559 .speed = SPEED_25000, 2560 }, 2561 { 2562 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2563 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2564 .speed = SPEED_25000, 2565 }, 2566 { 2567 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2568 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2569 .speed = SPEED_50000, 2570 }, 2571 { 2572 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2573 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2574 .speed = SPEED_50000, 2575 }, 2576 { 2577 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2578 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2579 .speed = SPEED_50000, 2580 }, 2581 { 2582 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2583 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2584 .speed = SPEED_56000, 2585 }, 2586 { 2587 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2588 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2589 .speed = SPEED_56000, 2590 }, 2591 { 2592 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2593 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2594 .speed = SPEED_56000, 2595 }, 2596 { 2597 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2598 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2599 .speed = SPEED_56000, 2600 }, 2601 { 2602 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2603 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2604 .speed = SPEED_100000, 2605 }, 2606 { 2607 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2608 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2609 .speed = SPEED_100000, 2610 }, 2611 { 2612 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2613 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2614 .speed = SPEED_100000, 2615 }, 2616 { 2617 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2618 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2619 .speed = SPEED_100000, 2620 }, 2621 }; 2622 2623 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2624 2625 static void 2626 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2627 u32 ptys_eth_proto, 2628 struct ethtool_link_ksettings *cmd) 2629 { 2630 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2631 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2632 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2633 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2634 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2635 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2636 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2637 2638 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2639 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2640 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2641 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2642 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2643 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2644 } 2645 2646 static void 2647 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2648 unsigned long *mode) 2649 { 2650 int i; 2651 2652 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2653 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2654 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2655 mode); 2656 } 2657 } 2658 2659 static u32 2660 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2661 { 2662 int i; 2663 2664 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2665 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2666 return mlxsw_sp1_port_link_mode[i].speed; 2667 } 2668 2669 return SPEED_UNKNOWN; 2670 } 2671 2672 static void 2673 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2674 u32 ptys_eth_proto, 2675 struct ethtool_link_ksettings *cmd) 2676 { 2677 cmd->base.speed = SPEED_UNKNOWN; 2678 cmd->base.duplex = DUPLEX_UNKNOWN; 2679 2680 if (!carrier_ok) 2681 return; 2682 2683 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2684 if (cmd->base.speed != SPEED_UNKNOWN) 2685 cmd->base.duplex = DUPLEX_FULL; 2686 } 2687 2688 static u32 2689 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2690 const struct ethtool_link_ksettings *cmd) 2691 { 2692 u32 ptys_proto = 0; 2693 int i; 2694 2695 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2696 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2697 cmd->link_modes.advertising)) 2698 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2699 } 2700 return ptys_proto; 2701 } 2702 2703 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2704 { 2705 u32 ptys_proto = 0; 2706 int i; 2707 2708 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2709 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2710 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2711 } 2712 return ptys_proto; 2713 } 2714 2715 static u32 2716 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2717 { 2718 u32 ptys_proto = 0; 2719 int i; 2720 2721 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2722 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2723 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2724 } 2725 return ptys_proto; 2726 } 2727 2728 static int 2729 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2730 u32 *base_speed) 2731 { 2732 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2733 return 0; 2734 } 2735 2736 static void 2737 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2738 u8 local_port, u32 proto_admin, bool autoneg) 2739 { 2740 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2741 } 2742 2743 static void 2744 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2745 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2746 u32 *p_eth_proto_oper) 2747 { 2748 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2749 p_eth_proto_oper); 2750 } 2751 2752 static const struct mlxsw_sp_port_type_speed_ops 2753 mlxsw_sp1_port_type_speed_ops = { 2754 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2755 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2756 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2757 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2758 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2759 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2760 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2761 .port_speed_base = mlxsw_sp1_port_speed_base, 2762 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2763 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2764 }; 2765 2766 static const enum ethtool_link_mode_bit_indices 2767 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2768 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2769 }; 2770 2771 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2772 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2773 2774 static const enum ethtool_link_mode_bit_indices 2775 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2776 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2777 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2778 }; 2779 2780 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2781 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2782 2783 static const enum ethtool_link_mode_bit_indices 2784 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2785 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2786 }; 2787 2788 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2789 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2790 2791 static const enum ethtool_link_mode_bit_indices 2792 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2793 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2794 }; 2795 2796 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2797 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2798 2799 static const enum ethtool_link_mode_bit_indices 2800 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2801 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2802 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2803 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2804 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2805 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2806 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2807 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2808 }; 2809 2810 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2811 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2812 2813 static const enum ethtool_link_mode_bit_indices 2814 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2815 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2816 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2817 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2818 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2819 }; 2820 2821 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2822 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2823 2824 static const enum ethtool_link_mode_bit_indices 2825 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2826 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2827 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2828 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2829 }; 2830 2831 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2832 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2833 2834 static const enum ethtool_link_mode_bit_indices 2835 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2836 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2837 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2838 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2839 }; 2840 2841 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2842 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2843 2844 static const enum ethtool_link_mode_bit_indices 2845 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2846 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2847 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2848 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2849 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2850 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2851 }; 2852 2853 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2854 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2855 2856 static const enum ethtool_link_mode_bit_indices 2857 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2858 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2859 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2860 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2861 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2862 }; 2863 2864 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2865 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2866 2867 static const enum ethtool_link_mode_bit_indices 2868 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2869 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2870 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2871 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2872 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2873 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2874 }; 2875 2876 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2877 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2878 2879 static const enum ethtool_link_mode_bit_indices 2880 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2881 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2882 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2883 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2884 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2885 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2886 }; 2887 2888 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2889 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2890 2891 struct mlxsw_sp2_port_link_mode { 2892 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2893 int m_ethtool_len; 2894 u32 mask; 2895 u32 speed; 2896 }; 2897 2898 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2899 { 2900 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2901 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2902 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2903 .speed = SPEED_100, 2904 }, 2905 { 2906 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2907 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2908 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2909 .speed = SPEED_1000, 2910 }, 2911 { 2912 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2913 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2914 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2915 .speed = SPEED_2500, 2916 }, 2917 { 2918 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2919 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2920 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2921 .speed = SPEED_5000, 2922 }, 2923 { 2924 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2925 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2926 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2927 .speed = SPEED_10000, 2928 }, 2929 { 2930 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2931 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2932 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2933 .speed = SPEED_40000, 2934 }, 2935 { 2936 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2937 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2938 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2939 .speed = SPEED_25000, 2940 }, 2941 { 2942 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2943 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2944 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2945 .speed = SPEED_50000, 2946 }, 2947 { 2948 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2949 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2950 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2951 .speed = SPEED_50000, 2952 }, 2953 { 2954 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2955 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2956 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2957 .speed = SPEED_100000, 2958 }, 2959 { 2960 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2961 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2962 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2963 .speed = SPEED_100000, 2964 }, 2965 { 2966 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2967 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2968 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2969 .speed = SPEED_200000, 2970 }, 2971 }; 2972 2973 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2974 2975 static void 2976 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2977 u32 ptys_eth_proto, 2978 struct ethtool_link_ksettings *cmd) 2979 { 2980 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2981 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2982 } 2983 2984 static void 2985 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 2986 unsigned long *mode) 2987 { 2988 int i; 2989 2990 for (i = 0; i < link_mode->m_ethtool_len; i++) 2991 __set_bit(link_mode->mask_ethtool[i], mode); 2992 } 2993 2994 static void 2995 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2996 unsigned long *mode) 2997 { 2998 int i; 2999 3000 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3001 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3002 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3003 mode); 3004 } 3005 } 3006 3007 static u32 3008 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3009 { 3010 int i; 3011 3012 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3013 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3014 return mlxsw_sp2_port_link_mode[i].speed; 3015 } 3016 3017 return SPEED_UNKNOWN; 3018 } 3019 3020 static void 3021 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3022 u32 ptys_eth_proto, 3023 struct ethtool_link_ksettings *cmd) 3024 { 3025 cmd->base.speed = SPEED_UNKNOWN; 3026 cmd->base.duplex = DUPLEX_UNKNOWN; 3027 3028 if (!carrier_ok) 3029 return; 3030 3031 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3032 if (cmd->base.speed != SPEED_UNKNOWN) 3033 cmd->base.duplex = DUPLEX_FULL; 3034 } 3035 3036 static bool 3037 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3038 const unsigned long *mode) 3039 { 3040 int cnt = 0; 3041 int i; 3042 3043 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3044 if (test_bit(link_mode->mask_ethtool[i], mode)) 3045 cnt++; 3046 } 3047 3048 return cnt == link_mode->m_ethtool_len; 3049 } 3050 3051 static u32 3052 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3053 const struct ethtool_link_ksettings *cmd) 3054 { 3055 u32 ptys_proto = 0; 3056 int i; 3057 3058 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3059 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3060 cmd->link_modes.advertising)) 3061 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3062 } 3063 return ptys_proto; 3064 } 3065 3066 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3067 { 3068 u32 ptys_proto = 0; 3069 int i; 3070 3071 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3072 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3073 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3074 } 3075 return ptys_proto; 3076 } 3077 3078 static u32 3079 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3080 { 3081 u32 ptys_proto = 0; 3082 int i; 3083 3084 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3085 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3086 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3087 } 3088 return ptys_proto; 3089 } 3090 3091 static int 3092 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3093 u32 *base_speed) 3094 { 3095 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3096 u32 eth_proto_cap; 3097 int err; 3098 3099 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3100 * it from firmware. 3101 */ 3102 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3103 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3104 if (err) 3105 return err; 3106 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3107 3108 if (eth_proto_cap & 3109 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3110 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3111 return 0; 3112 } 3113 3114 if (eth_proto_cap & 3115 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3116 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3117 return 0; 3118 } 3119 3120 return -EIO; 3121 } 3122 3123 static void 3124 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3125 u8 local_port, u32 proto_admin, 3126 bool autoneg) 3127 { 3128 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3129 } 3130 3131 static void 3132 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3133 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3134 u32 *p_eth_proto_oper) 3135 { 3136 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3137 p_eth_proto_admin, p_eth_proto_oper); 3138 } 3139 3140 static const struct mlxsw_sp_port_type_speed_ops 3141 mlxsw_sp2_port_type_speed_ops = { 3142 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3143 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3144 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3145 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3146 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3147 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3148 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3149 .port_speed_base = mlxsw_sp2_port_speed_base, 3150 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3151 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3152 }; 3153 3154 static void 3155 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3156 struct ethtool_link_ksettings *cmd) 3157 { 3158 const struct mlxsw_sp_port_type_speed_ops *ops; 3159 3160 ops = mlxsw_sp->port_type_speed_ops; 3161 3162 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3163 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3164 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3165 3166 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3167 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3168 } 3169 3170 static void 3171 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3172 u32 eth_proto_admin, bool autoneg, 3173 struct ethtool_link_ksettings *cmd) 3174 { 3175 const struct mlxsw_sp_port_type_speed_ops *ops; 3176 3177 ops = mlxsw_sp->port_type_speed_ops; 3178 3179 if (!autoneg) 3180 return; 3181 3182 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3183 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3184 cmd->link_modes.advertising); 3185 } 3186 3187 static u8 3188 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3189 { 3190 switch (connector_type) { 3191 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3192 return PORT_OTHER; 3193 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3194 return PORT_NONE; 3195 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3196 return PORT_TP; 3197 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3198 return PORT_AUI; 3199 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3200 return PORT_BNC; 3201 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3202 return PORT_MII; 3203 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3204 return PORT_FIBRE; 3205 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3206 return PORT_DA; 3207 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3208 return PORT_OTHER; 3209 default: 3210 WARN_ON_ONCE(1); 3211 return PORT_OTHER; 3212 } 3213 } 3214 3215 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3216 struct ethtool_link_ksettings *cmd) 3217 { 3218 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3219 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3221 const struct mlxsw_sp_port_type_speed_ops *ops; 3222 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3223 u8 connector_type; 3224 bool autoneg; 3225 int err; 3226 3227 ops = mlxsw_sp->port_type_speed_ops; 3228 3229 autoneg = mlxsw_sp_port->link.autoneg; 3230 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3231 0, false); 3232 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3233 if (err) 3234 return err; 3235 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3236 ð_proto_admin, ð_proto_oper); 3237 3238 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3239 3240 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3241 cmd); 3242 3243 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3244 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3245 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3246 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3247 eth_proto_oper, cmd); 3248 3249 return 0; 3250 } 3251 3252 static int 3253 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3254 const struct ethtool_link_ksettings *cmd) 3255 { 3256 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3258 const struct mlxsw_sp_port_type_speed_ops *ops; 3259 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3260 u32 eth_proto_cap, eth_proto_new; 3261 bool autoneg; 3262 int err; 3263 3264 ops = mlxsw_sp->port_type_speed_ops; 3265 3266 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3267 0, false); 3268 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3269 if (err) 3270 return err; 3271 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3272 3273 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3274 if (!autoneg && cmd->base.speed == SPEED_56000) { 3275 netdev_err(dev, "56G not supported with autoneg off\n"); 3276 return -EINVAL; 3277 } 3278 eth_proto_new = autoneg ? 3279 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3280 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3281 3282 eth_proto_new = eth_proto_new & eth_proto_cap; 3283 if (!eth_proto_new) { 3284 netdev_err(dev, "No supported speed requested\n"); 3285 return -EINVAL; 3286 } 3287 3288 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3289 eth_proto_new, autoneg); 3290 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3291 if (err) 3292 return err; 3293 3294 mlxsw_sp_port->link.autoneg = autoneg; 3295 3296 if (!netif_running(dev)) 3297 return 0; 3298 3299 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3300 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3301 3302 return 0; 3303 } 3304 3305 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3306 struct ethtool_modinfo *modinfo) 3307 { 3308 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3310 int err; 3311 3312 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3313 mlxsw_sp_port->mapping.module, 3314 modinfo); 3315 3316 return err; 3317 } 3318 3319 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3320 struct ethtool_eeprom *ee, 3321 u8 *data) 3322 { 3323 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3324 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3325 int err; 3326 3327 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3328 mlxsw_sp_port->mapping.module, ee, 3329 data); 3330 3331 return err; 3332 } 3333 3334 static int 3335 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3336 { 3337 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3338 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3339 3340 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3341 } 3342 3343 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3344 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3345 .get_link = ethtool_op_get_link, 3346 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3347 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3348 .get_strings = mlxsw_sp_port_get_strings, 3349 .set_phys_id = mlxsw_sp_port_set_phys_id, 3350 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3351 .get_sset_count = mlxsw_sp_port_get_sset_count, 3352 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3353 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3354 .get_module_info = mlxsw_sp_get_module_info, 3355 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3356 .get_ts_info = mlxsw_sp_get_ts_info, 3357 }; 3358 3359 static int 3360 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3361 { 3362 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3363 const struct mlxsw_sp_port_type_speed_ops *ops; 3364 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3365 u32 eth_proto_admin; 3366 u32 upper_speed; 3367 u32 base_speed; 3368 int err; 3369 3370 ops = mlxsw_sp->port_type_speed_ops; 3371 3372 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3373 &base_speed); 3374 if (err) 3375 return err; 3376 upper_speed = base_speed * width; 3377 3378 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3379 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3380 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3381 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3382 } 3383 3384 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3385 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3386 bool dwrr, u8 dwrr_weight) 3387 { 3388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3389 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3390 3391 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3392 next_index); 3393 mlxsw_reg_qeec_de_set(qeec_pl, true); 3394 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3395 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3396 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3397 } 3398 3399 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3400 enum mlxsw_reg_qeec_hr hr, u8 index, 3401 u8 next_index, u32 maxrate) 3402 { 3403 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3404 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3405 3406 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3407 next_index); 3408 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3409 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3410 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3411 } 3412 3413 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3414 enum mlxsw_reg_qeec_hr hr, u8 index, 3415 u8 next_index, u32 minrate) 3416 { 3417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3418 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3419 3420 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3421 next_index); 3422 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3423 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3424 3425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3426 } 3427 3428 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3429 u8 switch_prio, u8 tclass) 3430 { 3431 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3432 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3433 3434 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3435 tclass); 3436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3437 } 3438 3439 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3440 { 3441 int err, i; 3442 3443 /* Setup the elements hierarcy, so that each TC is linked to 3444 * one subgroup, which are all member in the same group. 3445 */ 3446 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3447 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3448 0); 3449 if (err) 3450 return err; 3451 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3452 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3453 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3454 0, false, 0); 3455 if (err) 3456 return err; 3457 } 3458 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3459 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3460 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3461 false, 0); 3462 if (err) 3463 return err; 3464 3465 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3466 MLXSW_REG_QEEC_HIERARCY_TC, 3467 i + 8, i, 3468 true, 100); 3469 if (err) 3470 return err; 3471 } 3472 3473 /* Make sure the max shaper is disabled in all hierarchies that support 3474 * it. Note that this disables ptps (PTP shaper), but that is intended 3475 * for the initial configuration. 3476 */ 3477 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3478 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3479 MLXSW_REG_QEEC_MAS_DIS); 3480 if (err) 3481 return err; 3482 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3483 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3484 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3485 i, 0, 3486 MLXSW_REG_QEEC_MAS_DIS); 3487 if (err) 3488 return err; 3489 } 3490 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3491 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3492 MLXSW_REG_QEEC_HIERARCY_TC, 3493 i, i, 3494 MLXSW_REG_QEEC_MAS_DIS); 3495 if (err) 3496 return err; 3497 3498 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3499 MLXSW_REG_QEEC_HIERARCY_TC, 3500 i + 8, i, 3501 MLXSW_REG_QEEC_MAS_DIS); 3502 if (err) 3503 return err; 3504 } 3505 3506 /* Configure the min shaper for multicast TCs. */ 3507 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3508 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3509 MLXSW_REG_QEEC_HIERARCY_TC, 3510 i + 8, i, 3511 MLXSW_REG_QEEC_MIS_MIN); 3512 if (err) 3513 return err; 3514 } 3515 3516 /* Map all priorities to traffic class 0. */ 3517 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3518 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3519 if (err) 3520 return err; 3521 } 3522 3523 return 0; 3524 } 3525 3526 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3527 bool enable) 3528 { 3529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3530 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3531 3532 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3534 } 3535 3536 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3537 bool split, u8 module, u8 width, u8 lane) 3538 { 3539 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3540 struct mlxsw_sp_port *mlxsw_sp_port; 3541 struct net_device *dev; 3542 int err; 3543 3544 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3545 module + 1, split, lane / width, 3546 mlxsw_sp->base_mac, 3547 sizeof(mlxsw_sp->base_mac)); 3548 if (err) { 3549 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3550 local_port); 3551 return err; 3552 } 3553 3554 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3555 if (!dev) { 3556 err = -ENOMEM; 3557 goto err_alloc_etherdev; 3558 } 3559 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3560 mlxsw_sp_port = netdev_priv(dev); 3561 mlxsw_sp_port->dev = dev; 3562 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3563 mlxsw_sp_port->local_port = local_port; 3564 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3565 mlxsw_sp_port->split = split; 3566 mlxsw_sp_port->mapping.module = module; 3567 mlxsw_sp_port->mapping.width = width; 3568 mlxsw_sp_port->mapping.lane = lane; 3569 mlxsw_sp_port->link.autoneg = 1; 3570 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3571 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3572 3573 mlxsw_sp_port->pcpu_stats = 3574 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3575 if (!mlxsw_sp_port->pcpu_stats) { 3576 err = -ENOMEM; 3577 goto err_alloc_stats; 3578 } 3579 3580 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3581 GFP_KERNEL); 3582 if (!mlxsw_sp_port->sample) { 3583 err = -ENOMEM; 3584 goto err_alloc_sample; 3585 } 3586 3587 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3588 &update_stats_cache); 3589 3590 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3591 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3592 3593 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3594 if (err) { 3595 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3596 mlxsw_sp_port->local_port); 3597 goto err_port_module_map; 3598 } 3599 3600 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3601 if (err) { 3602 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3603 mlxsw_sp_port->local_port); 3604 goto err_port_swid_set; 3605 } 3606 3607 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3608 if (err) { 3609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3610 mlxsw_sp_port->local_port); 3611 goto err_dev_addr_init; 3612 } 3613 3614 netif_carrier_off(dev); 3615 3616 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3617 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3618 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3619 3620 dev->min_mtu = 0; 3621 dev->max_mtu = ETH_MAX_MTU; 3622 3623 /* Each packet needs to have a Tx header (metadata) on top all other 3624 * headers. 3625 */ 3626 dev->needed_headroom = MLXSW_TXHDR_LEN; 3627 3628 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3629 if (err) { 3630 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3631 mlxsw_sp_port->local_port); 3632 goto err_port_system_port_mapping_set; 3633 } 3634 3635 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3636 if (err) { 3637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3638 mlxsw_sp_port->local_port); 3639 goto err_port_speed_by_width_set; 3640 } 3641 3642 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3643 if (err) { 3644 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3645 mlxsw_sp_port->local_port); 3646 goto err_port_mtu_set; 3647 } 3648 3649 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3650 if (err) 3651 goto err_port_admin_status_set; 3652 3653 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3654 if (err) { 3655 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3656 mlxsw_sp_port->local_port); 3657 goto err_port_buffers_init; 3658 } 3659 3660 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3661 if (err) { 3662 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3663 mlxsw_sp_port->local_port); 3664 goto err_port_ets_init; 3665 } 3666 3667 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3668 if (err) { 3669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3670 mlxsw_sp_port->local_port); 3671 goto err_port_tc_mc_mode; 3672 } 3673 3674 /* ETS and buffers must be initialized before DCB. */ 3675 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3676 if (err) { 3677 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3678 mlxsw_sp_port->local_port); 3679 goto err_port_dcb_init; 3680 } 3681 3682 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3683 if (err) { 3684 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3685 mlxsw_sp_port->local_port); 3686 goto err_port_fids_init; 3687 } 3688 3689 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3690 if (err) { 3691 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3692 mlxsw_sp_port->local_port); 3693 goto err_port_qdiscs_init; 3694 } 3695 3696 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3697 if (err) { 3698 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3699 mlxsw_sp_port->local_port); 3700 goto err_port_nve_init; 3701 } 3702 3703 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3704 if (err) { 3705 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3706 mlxsw_sp_port->local_port); 3707 goto err_port_pvid_set; 3708 } 3709 3710 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3711 MLXSW_SP_DEFAULT_VID); 3712 if (IS_ERR(mlxsw_sp_port_vlan)) { 3713 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3714 mlxsw_sp_port->local_port); 3715 err = PTR_ERR(mlxsw_sp_port_vlan); 3716 goto err_port_vlan_create; 3717 } 3718 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3719 3720 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3721 mlxsw_sp->ptp_ops->shaper_work); 3722 3723 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3724 err = register_netdev(dev); 3725 if (err) { 3726 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3727 mlxsw_sp_port->local_port); 3728 goto err_register_netdev; 3729 } 3730 3731 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3732 mlxsw_sp_port, dev); 3733 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3734 return 0; 3735 3736 err_register_netdev: 3737 mlxsw_sp->ports[local_port] = NULL; 3738 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3739 err_port_vlan_create: 3740 err_port_pvid_set: 3741 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3742 err_port_nve_init: 3743 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3744 err_port_qdiscs_init: 3745 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3746 err_port_fids_init: 3747 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3748 err_port_dcb_init: 3749 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3750 err_port_tc_mc_mode: 3751 err_port_ets_init: 3752 err_port_buffers_init: 3753 err_port_admin_status_set: 3754 err_port_mtu_set: 3755 err_port_speed_by_width_set: 3756 err_port_system_port_mapping_set: 3757 err_dev_addr_init: 3758 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3759 err_port_swid_set: 3760 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3761 err_port_module_map: 3762 kfree(mlxsw_sp_port->sample); 3763 err_alloc_sample: 3764 free_percpu(mlxsw_sp_port->pcpu_stats); 3765 err_alloc_stats: 3766 free_netdev(dev); 3767 err_alloc_etherdev: 3768 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3769 return err; 3770 } 3771 3772 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3773 { 3774 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3775 3776 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3777 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3778 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3779 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3780 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3781 mlxsw_sp->ports[local_port] = NULL; 3782 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3783 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3784 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3785 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3786 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3787 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3788 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3789 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3790 kfree(mlxsw_sp_port->sample); 3791 free_percpu(mlxsw_sp_port->pcpu_stats); 3792 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3793 free_netdev(mlxsw_sp_port->dev); 3794 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3795 } 3796 3797 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3798 { 3799 return mlxsw_sp->ports[local_port] != NULL; 3800 } 3801 3802 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3803 { 3804 int i; 3805 3806 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3807 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3808 mlxsw_sp_port_remove(mlxsw_sp, i); 3809 kfree(mlxsw_sp->port_to_module); 3810 kfree(mlxsw_sp->ports); 3811 } 3812 3813 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3814 { 3815 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3816 u8 module, width, lane; 3817 size_t alloc_size; 3818 int i; 3819 int err; 3820 3821 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3822 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3823 if (!mlxsw_sp->ports) 3824 return -ENOMEM; 3825 3826 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3827 GFP_KERNEL); 3828 if (!mlxsw_sp->port_to_module) { 3829 err = -ENOMEM; 3830 goto err_port_to_module_alloc; 3831 } 3832 3833 for (i = 1; i < max_ports; i++) { 3834 /* Mark as invalid */ 3835 mlxsw_sp->port_to_module[i] = -1; 3836 3837 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3838 &width, &lane); 3839 if (err) 3840 goto err_port_module_info_get; 3841 if (!width) 3842 continue; 3843 mlxsw_sp->port_to_module[i] = module; 3844 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3845 module, width, lane); 3846 if (err) 3847 goto err_port_create; 3848 } 3849 return 0; 3850 3851 err_port_create: 3852 err_port_module_info_get: 3853 for (i--; i >= 1; i--) 3854 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3855 mlxsw_sp_port_remove(mlxsw_sp, i); 3856 kfree(mlxsw_sp->port_to_module); 3857 err_port_to_module_alloc: 3858 kfree(mlxsw_sp->ports); 3859 return err; 3860 } 3861 3862 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3863 { 3864 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3865 3866 return local_port - offset; 3867 } 3868 3869 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3870 u8 module, unsigned int count, u8 offset) 3871 { 3872 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3873 int err, i; 3874 3875 for (i = 0; i < count; i++) { 3876 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3877 true, module, width, i * width); 3878 if (err) 3879 goto err_port_create; 3880 } 3881 3882 return 0; 3883 3884 err_port_create: 3885 for (i--; i >= 0; i--) 3886 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3887 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3888 return err; 3889 } 3890 3891 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3892 u8 base_port, unsigned int count) 3893 { 3894 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3895 int i; 3896 3897 /* Split by four means we need to re-create two ports, otherwise 3898 * only one. 3899 */ 3900 count = count / 2; 3901 3902 for (i = 0; i < count; i++) { 3903 local_port = base_port + i * 2; 3904 if (mlxsw_sp->port_to_module[local_port] < 0) 3905 continue; 3906 module = mlxsw_sp->port_to_module[local_port]; 3907 3908 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3909 width, 0); 3910 } 3911 } 3912 3913 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3914 unsigned int count, 3915 struct netlink_ext_ack *extack) 3916 { 3917 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3918 u8 local_ports_in_1x, local_ports_in_2x, offset; 3919 struct mlxsw_sp_port *mlxsw_sp_port; 3920 u8 module, cur_width, base_port; 3921 int i; 3922 int err; 3923 3924 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3925 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3926 return -EIO; 3927 3928 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3929 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3930 3931 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3932 if (!mlxsw_sp_port) { 3933 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3934 local_port); 3935 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3936 return -EINVAL; 3937 } 3938 3939 module = mlxsw_sp_port->mapping.module; 3940 cur_width = mlxsw_sp_port->mapping.width; 3941 3942 if (count != 2 && count != 4) { 3943 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3944 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3945 return -EINVAL; 3946 } 3947 3948 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3949 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3950 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3951 return -EINVAL; 3952 } 3953 3954 /* Make sure we have enough slave (even) ports for the split. */ 3955 if (count == 2) { 3956 offset = local_ports_in_2x; 3957 base_port = local_port; 3958 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3959 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3960 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3961 return -EINVAL; 3962 } 3963 } else { 3964 offset = local_ports_in_1x; 3965 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3966 if (mlxsw_sp->ports[base_port + 1] || 3967 mlxsw_sp->ports[base_port + 3]) { 3968 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3969 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3970 return -EINVAL; 3971 } 3972 } 3973 3974 for (i = 0; i < count; i++) 3975 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3976 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3977 3978 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 3979 offset); 3980 if (err) { 3981 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3982 goto err_port_split_create; 3983 } 3984 3985 return 0; 3986 3987 err_port_split_create: 3988 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3989 return err; 3990 } 3991 3992 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3993 struct netlink_ext_ack *extack) 3994 { 3995 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3996 u8 local_ports_in_1x, local_ports_in_2x, offset; 3997 struct mlxsw_sp_port *mlxsw_sp_port; 3998 u8 cur_width, base_port; 3999 unsigned int count; 4000 int i; 4001 4002 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4003 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4004 return -EIO; 4005 4006 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4007 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4008 4009 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4010 if (!mlxsw_sp_port) { 4011 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4012 local_port); 4013 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4014 return -EINVAL; 4015 } 4016 4017 if (!mlxsw_sp_port->split) { 4018 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4019 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4020 return -EINVAL; 4021 } 4022 4023 cur_width = mlxsw_sp_port->mapping.width; 4024 count = cur_width == 1 ? 4 : 2; 4025 4026 if (count == 2) 4027 offset = local_ports_in_2x; 4028 else 4029 offset = local_ports_in_1x; 4030 4031 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4032 4033 /* Determine which ports to remove. */ 4034 if (count == 2 && local_port >= base_port + 2) 4035 base_port = base_port + 2; 4036 4037 for (i = 0; i < count; i++) 4038 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4039 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4040 4041 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4042 4043 return 0; 4044 } 4045 4046 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4047 char *pude_pl, void *priv) 4048 { 4049 struct mlxsw_sp *mlxsw_sp = priv; 4050 struct mlxsw_sp_port *mlxsw_sp_port; 4051 enum mlxsw_reg_pude_oper_status status; 4052 u8 local_port; 4053 4054 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4055 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4056 if (!mlxsw_sp_port) 4057 return; 4058 4059 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4060 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4061 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4062 netif_carrier_on(mlxsw_sp_port->dev); 4063 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4064 } else { 4065 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4066 netif_carrier_off(mlxsw_sp_port->dev); 4067 } 4068 } 4069 4070 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4071 char *mtpptr_pl, bool ingress) 4072 { 4073 u8 local_port; 4074 u8 num_rec; 4075 int i; 4076 4077 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4078 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4079 for (i = 0; i < num_rec; i++) { 4080 u8 domain_number; 4081 u8 message_type; 4082 u16 sequence_id; 4083 u64 timestamp; 4084 4085 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4086 &domain_number, &sequence_id, 4087 ×tamp); 4088 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4089 message_type, domain_number, 4090 sequence_id, timestamp); 4091 } 4092 } 4093 4094 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4095 char *mtpptr_pl, void *priv) 4096 { 4097 struct mlxsw_sp *mlxsw_sp = priv; 4098 4099 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4100 } 4101 4102 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4103 char *mtpptr_pl, void *priv) 4104 { 4105 struct mlxsw_sp *mlxsw_sp = priv; 4106 4107 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4108 } 4109 4110 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4111 u8 local_port, void *priv) 4112 { 4113 struct mlxsw_sp *mlxsw_sp = priv; 4114 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4115 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4116 4117 if (unlikely(!mlxsw_sp_port)) { 4118 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4119 local_port); 4120 return; 4121 } 4122 4123 skb->dev = mlxsw_sp_port->dev; 4124 4125 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4126 u64_stats_update_begin(&pcpu_stats->syncp); 4127 pcpu_stats->rx_packets++; 4128 pcpu_stats->rx_bytes += skb->len; 4129 u64_stats_update_end(&pcpu_stats->syncp); 4130 4131 skb->protocol = eth_type_trans(skb, skb->dev); 4132 netif_receive_skb(skb); 4133 } 4134 4135 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4136 void *priv) 4137 { 4138 skb->offload_fwd_mark = 1; 4139 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4140 } 4141 4142 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4143 u8 local_port, void *priv) 4144 { 4145 skb->offload_l3_fwd_mark = 1; 4146 skb->offload_fwd_mark = 1; 4147 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4148 } 4149 4150 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4151 void *priv) 4152 { 4153 struct mlxsw_sp *mlxsw_sp = priv; 4154 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4155 struct psample_group *psample_group; 4156 u32 size; 4157 4158 if (unlikely(!mlxsw_sp_port)) { 4159 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4160 local_port); 4161 goto out; 4162 } 4163 if (unlikely(!mlxsw_sp_port->sample)) { 4164 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4165 local_port); 4166 goto out; 4167 } 4168 4169 size = mlxsw_sp_port->sample->truncate ? 4170 mlxsw_sp_port->sample->trunc_size : skb->len; 4171 4172 rcu_read_lock(); 4173 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4174 if (!psample_group) 4175 goto out_unlock; 4176 psample_sample_packet(psample_group, skb, size, 4177 mlxsw_sp_port->dev->ifindex, 0, 4178 mlxsw_sp_port->sample->rate); 4179 out_unlock: 4180 rcu_read_unlock(); 4181 out: 4182 consume_skb(skb); 4183 } 4184 4185 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4186 void *priv) 4187 { 4188 struct mlxsw_sp *mlxsw_sp = priv; 4189 4190 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4191 } 4192 4193 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4194 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4195 _is_ctrl, SP_##_trap_group, DISCARD) 4196 4197 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4198 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4199 _is_ctrl, SP_##_trap_group, DISCARD) 4200 4201 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4202 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4203 _is_ctrl, SP_##_trap_group, DISCARD) 4204 4205 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4206 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4207 4208 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4209 /* Events */ 4210 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4211 /* L2 traps */ 4212 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4213 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4214 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4215 false, SP_LLDP, DISCARD), 4216 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4217 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4218 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4219 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4220 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4221 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4222 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4223 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4224 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4225 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4226 false), 4227 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4228 false), 4229 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4230 false), 4231 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4232 false), 4233 /* L3 traps */ 4234 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4235 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4236 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4237 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4238 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4239 false), 4240 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4241 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4242 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4243 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4244 false), 4245 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4246 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4247 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4248 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4249 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4250 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4251 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4252 false), 4253 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4254 false), 4255 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4256 false), 4257 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4258 false), 4259 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4260 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4261 false), 4262 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4263 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4264 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4265 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4266 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4267 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4268 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4269 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4270 /* PKT Sample trap */ 4271 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4272 false, SP_IP2ME, DISCARD), 4273 /* ACL trap */ 4274 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4275 /* Multicast Router Traps */ 4276 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4277 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4278 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4279 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4280 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4281 /* NVE traps */ 4282 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4283 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4284 /* PTP traps */ 4285 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4286 false, SP_PTP0, DISCARD), 4287 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4288 }; 4289 4290 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4291 /* Events */ 4292 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4293 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4294 }; 4295 4296 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4297 { 4298 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4299 enum mlxsw_reg_qpcr_ir_units ir_units; 4300 int max_cpu_policers; 4301 bool is_bytes; 4302 u8 burst_size; 4303 u32 rate; 4304 int i, err; 4305 4306 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4307 return -EIO; 4308 4309 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4310 4311 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4312 for (i = 0; i < max_cpu_policers; i++) { 4313 is_bytes = false; 4314 switch (i) { 4315 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4316 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4317 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4318 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4319 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4320 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4321 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4322 rate = 128; 4323 burst_size = 7; 4324 break; 4325 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4326 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4327 rate = 16 * 1024; 4328 burst_size = 10; 4329 break; 4330 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4331 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4332 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4333 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4334 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4335 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4336 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4337 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4338 rate = 1024; 4339 burst_size = 7; 4340 break; 4341 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4342 rate = 1024; 4343 burst_size = 7; 4344 break; 4345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4346 rate = 24 * 1024; 4347 burst_size = 12; 4348 break; 4349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4350 rate = 19 * 1024; 4351 burst_size = 12; 4352 break; 4353 default: 4354 continue; 4355 } 4356 4357 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4358 burst_size); 4359 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4360 if (err) 4361 return err; 4362 } 4363 4364 return 0; 4365 } 4366 4367 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4368 { 4369 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4370 enum mlxsw_reg_htgt_trap_group i; 4371 int max_cpu_policers; 4372 int max_trap_groups; 4373 u8 priority, tc; 4374 u16 policer_id; 4375 int err; 4376 4377 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4378 return -EIO; 4379 4380 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4381 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4382 4383 for (i = 0; i < max_trap_groups; i++) { 4384 policer_id = i; 4385 switch (i) { 4386 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4387 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4388 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4389 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4390 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4391 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4392 priority = 5; 4393 tc = 5; 4394 break; 4395 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4396 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4397 priority = 4; 4398 tc = 4; 4399 break; 4400 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4401 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4402 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4403 priority = 3; 4404 tc = 3; 4405 break; 4406 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4407 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4408 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4409 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4410 priority = 2; 4411 tc = 2; 4412 break; 4413 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4414 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4415 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4417 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4418 priority = 1; 4419 tc = 1; 4420 break; 4421 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4422 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4423 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4424 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4425 break; 4426 default: 4427 continue; 4428 } 4429 4430 if (max_cpu_policers <= policer_id && 4431 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4432 return -EIO; 4433 4434 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4435 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4436 if (err) 4437 return err; 4438 } 4439 4440 return 0; 4441 } 4442 4443 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4444 const struct mlxsw_listener listeners[], 4445 size_t listeners_count) 4446 { 4447 int i; 4448 int err; 4449 4450 for (i = 0; i < listeners_count; i++) { 4451 err = mlxsw_core_trap_register(mlxsw_sp->core, 4452 &listeners[i], 4453 mlxsw_sp); 4454 if (err) 4455 goto err_listener_register; 4456 4457 } 4458 return 0; 4459 4460 err_listener_register: 4461 for (i--; i >= 0; i--) { 4462 mlxsw_core_trap_unregister(mlxsw_sp->core, 4463 &listeners[i], 4464 mlxsw_sp); 4465 } 4466 return err; 4467 } 4468 4469 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4470 const struct mlxsw_listener listeners[], 4471 size_t listeners_count) 4472 { 4473 int i; 4474 4475 for (i = 0; i < listeners_count; i++) { 4476 mlxsw_core_trap_unregister(mlxsw_sp->core, 4477 &listeners[i], 4478 mlxsw_sp); 4479 } 4480 } 4481 4482 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4483 { 4484 int err; 4485 4486 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4487 if (err) 4488 return err; 4489 4490 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4491 if (err) 4492 return err; 4493 4494 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4495 ARRAY_SIZE(mlxsw_sp_listener)); 4496 if (err) 4497 return err; 4498 4499 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4500 mlxsw_sp->listeners_count); 4501 if (err) 4502 goto err_extra_traps_init; 4503 4504 return 0; 4505 4506 err_extra_traps_init: 4507 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4508 ARRAY_SIZE(mlxsw_sp_listener)); 4509 return err; 4510 } 4511 4512 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4513 { 4514 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4515 mlxsw_sp->listeners_count); 4516 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4517 ARRAY_SIZE(mlxsw_sp_listener)); 4518 } 4519 4520 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4521 4522 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4523 { 4524 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4525 u32 seed; 4526 int err; 4527 4528 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4529 MLXSW_SP_LAG_SEED_INIT); 4530 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4531 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4532 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4533 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4534 MLXSW_REG_SLCR_LAG_HASH_SIP | 4535 MLXSW_REG_SLCR_LAG_HASH_DIP | 4536 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4537 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4538 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4539 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4540 if (err) 4541 return err; 4542 4543 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4544 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4545 return -EIO; 4546 4547 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4548 sizeof(struct mlxsw_sp_upper), 4549 GFP_KERNEL); 4550 if (!mlxsw_sp->lags) 4551 return -ENOMEM; 4552 4553 return 0; 4554 } 4555 4556 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4557 { 4558 kfree(mlxsw_sp->lags); 4559 } 4560 4561 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4562 { 4563 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4564 4565 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4566 MLXSW_REG_HTGT_INVALID_POLICER, 4567 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4568 MLXSW_REG_HTGT_DEFAULT_TC); 4569 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4570 } 4571 4572 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4573 .clock_init = mlxsw_sp1_ptp_clock_init, 4574 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4575 .init = mlxsw_sp1_ptp_init, 4576 .fini = mlxsw_sp1_ptp_fini, 4577 .receive = mlxsw_sp1_ptp_receive, 4578 .transmitted = mlxsw_sp1_ptp_transmitted, 4579 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4580 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4581 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4582 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4583 }; 4584 4585 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4586 .clock_init = mlxsw_sp2_ptp_clock_init, 4587 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4588 .init = mlxsw_sp2_ptp_init, 4589 .fini = mlxsw_sp2_ptp_fini, 4590 .receive = mlxsw_sp2_ptp_receive, 4591 .transmitted = mlxsw_sp2_ptp_transmitted, 4592 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4593 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4594 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4595 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4596 }; 4597 4598 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4599 unsigned long event, void *ptr); 4600 4601 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4602 const struct mlxsw_bus_info *mlxsw_bus_info) 4603 { 4604 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4605 int err; 4606 4607 mlxsw_sp->core = mlxsw_core; 4608 mlxsw_sp->bus_info = mlxsw_bus_info; 4609 4610 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4611 if (err) 4612 return err; 4613 4614 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4615 if (err) { 4616 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4617 return err; 4618 } 4619 4620 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4621 if (err) { 4622 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4623 return err; 4624 } 4625 4626 err = mlxsw_sp_fids_init(mlxsw_sp); 4627 if (err) { 4628 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4629 goto err_fids_init; 4630 } 4631 4632 err = mlxsw_sp_traps_init(mlxsw_sp); 4633 if (err) { 4634 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4635 goto err_traps_init; 4636 } 4637 4638 err = mlxsw_sp_buffers_init(mlxsw_sp); 4639 if (err) { 4640 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4641 goto err_buffers_init; 4642 } 4643 4644 err = mlxsw_sp_lag_init(mlxsw_sp); 4645 if (err) { 4646 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4647 goto err_lag_init; 4648 } 4649 4650 /* Initialize SPAN before router and switchdev, so that those components 4651 * can call mlxsw_sp_span_respin(). 4652 */ 4653 err = mlxsw_sp_span_init(mlxsw_sp); 4654 if (err) { 4655 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4656 goto err_span_init; 4657 } 4658 4659 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4660 if (err) { 4661 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4662 goto err_switchdev_init; 4663 } 4664 4665 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4666 if (err) { 4667 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4668 goto err_counter_pool_init; 4669 } 4670 4671 err = mlxsw_sp_afa_init(mlxsw_sp); 4672 if (err) { 4673 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4674 goto err_afa_init; 4675 } 4676 4677 err = mlxsw_sp_nve_init(mlxsw_sp); 4678 if (err) { 4679 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4680 goto err_nve_init; 4681 } 4682 4683 err = mlxsw_sp_acl_init(mlxsw_sp); 4684 if (err) { 4685 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4686 goto err_acl_init; 4687 } 4688 4689 err = mlxsw_sp_router_init(mlxsw_sp); 4690 if (err) { 4691 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4692 goto err_router_init; 4693 } 4694 4695 if (mlxsw_sp->bus_info->read_frc_capable) { 4696 /* NULL is a valid return value from clock_init */ 4697 mlxsw_sp->clock = 4698 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4699 mlxsw_sp->bus_info->dev); 4700 if (IS_ERR(mlxsw_sp->clock)) { 4701 err = PTR_ERR(mlxsw_sp->clock); 4702 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4703 goto err_ptp_clock_init; 4704 } 4705 } 4706 4707 if (mlxsw_sp->clock) { 4708 /* NULL is a valid return value from ptp_ops->init */ 4709 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4710 if (IS_ERR(mlxsw_sp->ptp_state)) { 4711 err = PTR_ERR(mlxsw_sp->ptp_state); 4712 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4713 goto err_ptp_init; 4714 } 4715 } 4716 4717 /* Initialize netdevice notifier after router and SPAN is initialized, 4718 * so that the event handler can use router structures and call SPAN 4719 * respin. 4720 */ 4721 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4722 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4723 if (err) { 4724 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4725 goto err_netdev_notifier; 4726 } 4727 4728 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4729 if (err) { 4730 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4731 goto err_dpipe_init; 4732 } 4733 4734 err = mlxsw_sp_ports_create(mlxsw_sp); 4735 if (err) { 4736 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4737 goto err_ports_create; 4738 } 4739 4740 return 0; 4741 4742 err_ports_create: 4743 mlxsw_sp_dpipe_fini(mlxsw_sp); 4744 err_dpipe_init: 4745 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4746 err_netdev_notifier: 4747 if (mlxsw_sp->clock) 4748 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4749 err_ptp_init: 4750 if (mlxsw_sp->clock) 4751 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4752 err_ptp_clock_init: 4753 mlxsw_sp_router_fini(mlxsw_sp); 4754 err_router_init: 4755 mlxsw_sp_acl_fini(mlxsw_sp); 4756 err_acl_init: 4757 mlxsw_sp_nve_fini(mlxsw_sp); 4758 err_nve_init: 4759 mlxsw_sp_afa_fini(mlxsw_sp); 4760 err_afa_init: 4761 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4762 err_counter_pool_init: 4763 mlxsw_sp_switchdev_fini(mlxsw_sp); 4764 err_switchdev_init: 4765 mlxsw_sp_span_fini(mlxsw_sp); 4766 err_span_init: 4767 mlxsw_sp_lag_fini(mlxsw_sp); 4768 err_lag_init: 4769 mlxsw_sp_buffers_fini(mlxsw_sp); 4770 err_buffers_init: 4771 mlxsw_sp_traps_fini(mlxsw_sp); 4772 err_traps_init: 4773 mlxsw_sp_fids_fini(mlxsw_sp); 4774 err_fids_init: 4775 mlxsw_sp_kvdl_fini(mlxsw_sp); 4776 return err; 4777 } 4778 4779 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4780 const struct mlxsw_bus_info *mlxsw_bus_info) 4781 { 4782 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4783 4784 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4785 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4786 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4787 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4788 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4789 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4790 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4791 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4792 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4793 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4794 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4795 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4796 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4797 mlxsw_sp->listeners = mlxsw_sp1_listener; 4798 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4799 4800 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4801 } 4802 4803 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4804 const struct mlxsw_bus_info *mlxsw_bus_info) 4805 { 4806 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4807 4808 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4809 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4810 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4811 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4812 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4813 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4814 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4815 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4816 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4817 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4818 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4819 4820 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4821 } 4822 4823 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4824 { 4825 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4826 4827 mlxsw_sp_ports_remove(mlxsw_sp); 4828 mlxsw_sp_dpipe_fini(mlxsw_sp); 4829 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4830 if (mlxsw_sp->clock) { 4831 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4832 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4833 } 4834 mlxsw_sp_router_fini(mlxsw_sp); 4835 mlxsw_sp_acl_fini(mlxsw_sp); 4836 mlxsw_sp_nve_fini(mlxsw_sp); 4837 mlxsw_sp_afa_fini(mlxsw_sp); 4838 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4839 mlxsw_sp_switchdev_fini(mlxsw_sp); 4840 mlxsw_sp_span_fini(mlxsw_sp); 4841 mlxsw_sp_lag_fini(mlxsw_sp); 4842 mlxsw_sp_buffers_fini(mlxsw_sp); 4843 mlxsw_sp_traps_fini(mlxsw_sp); 4844 mlxsw_sp_fids_fini(mlxsw_sp); 4845 mlxsw_sp_kvdl_fini(mlxsw_sp); 4846 } 4847 4848 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4849 * 802.1Q FIDs 4850 */ 4851 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4852 VLAN_VID_MASK - 1) 4853 4854 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4855 .used_max_mid = 1, 4856 .max_mid = MLXSW_SP_MID_MAX, 4857 .used_flood_tables = 1, 4858 .used_flood_mode = 1, 4859 .flood_mode = 3, 4860 .max_fid_flood_tables = 3, 4861 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4862 .used_max_ib_mc = 1, 4863 .max_ib_mc = 0, 4864 .used_max_pkey = 1, 4865 .max_pkey = 0, 4866 .used_kvd_sizes = 1, 4867 .kvd_hash_single_parts = 59, 4868 .kvd_hash_double_parts = 41, 4869 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4870 .swid_config = { 4871 { 4872 .used_type = 1, 4873 .type = MLXSW_PORT_SWID_TYPE_ETH, 4874 } 4875 }, 4876 }; 4877 4878 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4879 .used_max_mid = 1, 4880 .max_mid = MLXSW_SP_MID_MAX, 4881 .used_flood_tables = 1, 4882 .used_flood_mode = 1, 4883 .flood_mode = 3, 4884 .max_fid_flood_tables = 3, 4885 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4886 .used_max_ib_mc = 1, 4887 .max_ib_mc = 0, 4888 .used_max_pkey = 1, 4889 .max_pkey = 0, 4890 .swid_config = { 4891 { 4892 .used_type = 1, 4893 .type = MLXSW_PORT_SWID_TYPE_ETH, 4894 } 4895 }, 4896 }; 4897 4898 static void 4899 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4900 struct devlink_resource_size_params *kvd_size_params, 4901 struct devlink_resource_size_params *linear_size_params, 4902 struct devlink_resource_size_params *hash_double_size_params, 4903 struct devlink_resource_size_params *hash_single_size_params) 4904 { 4905 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4906 KVD_SINGLE_MIN_SIZE); 4907 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4908 KVD_DOUBLE_MIN_SIZE); 4909 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4910 u32 linear_size_min = 0; 4911 4912 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4913 MLXSW_SP_KVD_GRANULARITY, 4914 DEVLINK_RESOURCE_UNIT_ENTRY); 4915 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4916 kvd_size - single_size_min - 4917 double_size_min, 4918 MLXSW_SP_KVD_GRANULARITY, 4919 DEVLINK_RESOURCE_UNIT_ENTRY); 4920 devlink_resource_size_params_init(hash_double_size_params, 4921 double_size_min, 4922 kvd_size - single_size_min - 4923 linear_size_min, 4924 MLXSW_SP_KVD_GRANULARITY, 4925 DEVLINK_RESOURCE_UNIT_ENTRY); 4926 devlink_resource_size_params_init(hash_single_size_params, 4927 single_size_min, 4928 kvd_size - double_size_min - 4929 linear_size_min, 4930 MLXSW_SP_KVD_GRANULARITY, 4931 DEVLINK_RESOURCE_UNIT_ENTRY); 4932 } 4933 4934 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4935 { 4936 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4937 struct devlink_resource_size_params hash_single_size_params; 4938 struct devlink_resource_size_params hash_double_size_params; 4939 struct devlink_resource_size_params linear_size_params; 4940 struct devlink_resource_size_params kvd_size_params; 4941 u32 kvd_size, single_size, double_size, linear_size; 4942 const struct mlxsw_config_profile *profile; 4943 int err; 4944 4945 profile = &mlxsw_sp1_config_profile; 4946 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4947 return -EIO; 4948 4949 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4950 &linear_size_params, 4951 &hash_double_size_params, 4952 &hash_single_size_params); 4953 4954 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4955 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4956 kvd_size, MLXSW_SP_RESOURCE_KVD, 4957 DEVLINK_RESOURCE_ID_PARENT_TOP, 4958 &kvd_size_params); 4959 if (err) 4960 return err; 4961 4962 linear_size = profile->kvd_linear_size; 4963 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4964 linear_size, 4965 MLXSW_SP_RESOURCE_KVD_LINEAR, 4966 MLXSW_SP_RESOURCE_KVD, 4967 &linear_size_params); 4968 if (err) 4969 return err; 4970 4971 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4972 if (err) 4973 return err; 4974 4975 double_size = kvd_size - linear_size; 4976 double_size *= profile->kvd_hash_double_parts; 4977 double_size /= profile->kvd_hash_double_parts + 4978 profile->kvd_hash_single_parts; 4979 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4980 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4981 double_size, 4982 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4983 MLXSW_SP_RESOURCE_KVD, 4984 &hash_double_size_params); 4985 if (err) 4986 return err; 4987 4988 single_size = kvd_size - double_size - linear_size; 4989 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4990 single_size, 4991 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4992 MLXSW_SP_RESOURCE_KVD, 4993 &hash_single_size_params); 4994 if (err) 4995 return err; 4996 4997 return 0; 4998 } 4999 5000 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5001 { 5002 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5003 } 5004 5005 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5006 { 5007 return 0; 5008 } 5009 5010 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5011 const struct mlxsw_config_profile *profile, 5012 u64 *p_single_size, u64 *p_double_size, 5013 u64 *p_linear_size) 5014 { 5015 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5016 u32 double_size; 5017 int err; 5018 5019 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5020 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5021 return -EIO; 5022 5023 /* The hash part is what left of the kvd without the 5024 * linear part. It is split to the single size and 5025 * double size by the parts ratio from the profile. 5026 * Both sizes must be a multiplications of the 5027 * granularity from the profile. In case the user 5028 * provided the sizes they are obtained via devlink. 5029 */ 5030 err = devlink_resource_size_get(devlink, 5031 MLXSW_SP_RESOURCE_KVD_LINEAR, 5032 p_linear_size); 5033 if (err) 5034 *p_linear_size = profile->kvd_linear_size; 5035 5036 err = devlink_resource_size_get(devlink, 5037 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5038 p_double_size); 5039 if (err) { 5040 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5041 *p_linear_size; 5042 double_size *= profile->kvd_hash_double_parts; 5043 double_size /= profile->kvd_hash_double_parts + 5044 profile->kvd_hash_single_parts; 5045 *p_double_size = rounddown(double_size, 5046 MLXSW_SP_KVD_GRANULARITY); 5047 } 5048 5049 err = devlink_resource_size_get(devlink, 5050 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5051 p_single_size); 5052 if (err) 5053 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5054 *p_double_size - *p_linear_size; 5055 5056 /* Check results are legal. */ 5057 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5058 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5059 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5060 return -EIO; 5061 5062 return 0; 5063 } 5064 5065 static int 5066 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5067 union devlink_param_value val, 5068 struct netlink_ext_ack *extack) 5069 { 5070 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5071 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5072 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5073 return -EINVAL; 5074 } 5075 5076 return 0; 5077 } 5078 5079 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5080 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5081 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5082 NULL, NULL, 5083 mlxsw_sp_devlink_param_fw_load_policy_validate), 5084 }; 5085 5086 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5087 { 5088 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5089 union devlink_param_value value; 5090 int err; 5091 5092 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5093 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5094 if (err) 5095 return err; 5096 5097 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5098 devlink_param_driverinit_value_set(devlink, 5099 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5100 value); 5101 return 0; 5102 } 5103 5104 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5105 { 5106 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5107 mlxsw_sp_devlink_params, 5108 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5109 } 5110 5111 static int 5112 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5113 struct devlink_param_gset_ctx *ctx) 5114 { 5115 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5116 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5117 5118 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5119 return 0; 5120 } 5121 5122 static int 5123 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5124 struct devlink_param_gset_ctx *ctx) 5125 { 5126 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5127 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5128 5129 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5130 } 5131 5132 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5133 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5134 "acl_region_rehash_interval", 5135 DEVLINK_PARAM_TYPE_U32, 5136 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5137 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5138 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5139 NULL), 5140 }; 5141 5142 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5143 { 5144 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5145 union devlink_param_value value; 5146 int err; 5147 5148 err = mlxsw_sp_params_register(mlxsw_core); 5149 if (err) 5150 return err; 5151 5152 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5153 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5154 if (err) 5155 goto err_devlink_params_register; 5156 5157 value.vu32 = 0; 5158 devlink_param_driverinit_value_set(devlink, 5159 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5160 value); 5161 return 0; 5162 5163 err_devlink_params_register: 5164 mlxsw_sp_params_unregister(mlxsw_core); 5165 return err; 5166 } 5167 5168 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5169 { 5170 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5171 mlxsw_sp2_devlink_params, 5172 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5173 mlxsw_sp_params_unregister(mlxsw_core); 5174 } 5175 5176 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5177 struct sk_buff *skb, u8 local_port) 5178 { 5179 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5180 5181 skb_pull(skb, MLXSW_TXHDR_LEN); 5182 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5183 } 5184 5185 static struct mlxsw_driver mlxsw_sp1_driver = { 5186 .kind = mlxsw_sp1_driver_name, 5187 .priv_size = sizeof(struct mlxsw_sp), 5188 .init = mlxsw_sp1_init, 5189 .fini = mlxsw_sp_fini, 5190 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5191 .port_split = mlxsw_sp_port_split, 5192 .port_unsplit = mlxsw_sp_port_unsplit, 5193 .sb_pool_get = mlxsw_sp_sb_pool_get, 5194 .sb_pool_set = mlxsw_sp_sb_pool_set, 5195 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5196 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5197 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5198 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5199 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5200 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5201 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5202 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5203 .flash_update = mlxsw_sp_flash_update, 5204 .txhdr_construct = mlxsw_sp_txhdr_construct, 5205 .resources_register = mlxsw_sp1_resources_register, 5206 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5207 .params_register = mlxsw_sp_params_register, 5208 .params_unregister = mlxsw_sp_params_unregister, 5209 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5210 .txhdr_len = MLXSW_TXHDR_LEN, 5211 .profile = &mlxsw_sp1_config_profile, 5212 .res_query_enabled = true, 5213 }; 5214 5215 static struct mlxsw_driver mlxsw_sp2_driver = { 5216 .kind = mlxsw_sp2_driver_name, 5217 .priv_size = sizeof(struct mlxsw_sp), 5218 .init = mlxsw_sp2_init, 5219 .fini = mlxsw_sp_fini, 5220 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5221 .port_split = mlxsw_sp_port_split, 5222 .port_unsplit = mlxsw_sp_port_unsplit, 5223 .sb_pool_get = mlxsw_sp_sb_pool_get, 5224 .sb_pool_set = mlxsw_sp_sb_pool_set, 5225 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5226 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5227 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5228 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5229 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5230 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5231 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5232 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5233 .flash_update = mlxsw_sp_flash_update, 5234 .txhdr_construct = mlxsw_sp_txhdr_construct, 5235 .resources_register = mlxsw_sp2_resources_register, 5236 .params_register = mlxsw_sp2_params_register, 5237 .params_unregister = mlxsw_sp2_params_unregister, 5238 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5239 .txhdr_len = MLXSW_TXHDR_LEN, 5240 .profile = &mlxsw_sp2_config_profile, 5241 .res_query_enabled = true, 5242 }; 5243 5244 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5245 { 5246 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5247 } 5248 5249 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5250 { 5251 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5252 int ret = 0; 5253 5254 if (mlxsw_sp_port_dev_check(lower_dev)) { 5255 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5256 ret = 1; 5257 } 5258 5259 return ret; 5260 } 5261 5262 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5263 { 5264 struct mlxsw_sp_port *mlxsw_sp_port; 5265 5266 if (mlxsw_sp_port_dev_check(dev)) 5267 return netdev_priv(dev); 5268 5269 mlxsw_sp_port = NULL; 5270 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5271 5272 return mlxsw_sp_port; 5273 } 5274 5275 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5276 { 5277 struct mlxsw_sp_port *mlxsw_sp_port; 5278 5279 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5280 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5281 } 5282 5283 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5284 { 5285 struct mlxsw_sp_port *mlxsw_sp_port; 5286 5287 if (mlxsw_sp_port_dev_check(dev)) 5288 return netdev_priv(dev); 5289 5290 mlxsw_sp_port = NULL; 5291 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5292 &mlxsw_sp_port); 5293 5294 return mlxsw_sp_port; 5295 } 5296 5297 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5298 { 5299 struct mlxsw_sp_port *mlxsw_sp_port; 5300 5301 rcu_read_lock(); 5302 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5303 if (mlxsw_sp_port) 5304 dev_hold(mlxsw_sp_port->dev); 5305 rcu_read_unlock(); 5306 return mlxsw_sp_port; 5307 } 5308 5309 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5310 { 5311 dev_put(mlxsw_sp_port->dev); 5312 } 5313 5314 static void 5315 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5316 struct net_device *lag_dev) 5317 { 5318 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5319 struct net_device *upper_dev; 5320 struct list_head *iter; 5321 5322 if (netif_is_bridge_port(lag_dev)) 5323 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5324 5325 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5326 if (!netif_is_bridge_port(upper_dev)) 5327 continue; 5328 br_dev = netdev_master_upper_dev_get(upper_dev); 5329 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5330 } 5331 } 5332 5333 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5334 { 5335 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5336 5337 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5338 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5339 } 5340 5341 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5342 { 5343 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5344 5345 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5346 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5347 } 5348 5349 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5350 u16 lag_id, u8 port_index) 5351 { 5352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5353 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5354 5355 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5356 lag_id, port_index); 5357 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5358 } 5359 5360 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5361 u16 lag_id) 5362 { 5363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5364 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5365 5366 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5367 lag_id); 5368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5369 } 5370 5371 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5372 u16 lag_id) 5373 { 5374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5375 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5376 5377 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5378 lag_id); 5379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5380 } 5381 5382 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5383 u16 lag_id) 5384 { 5385 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5386 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5387 5388 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5389 lag_id); 5390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5391 } 5392 5393 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5394 struct net_device *lag_dev, 5395 u16 *p_lag_id) 5396 { 5397 struct mlxsw_sp_upper *lag; 5398 int free_lag_id = -1; 5399 u64 max_lag; 5400 int i; 5401 5402 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5403 for (i = 0; i < max_lag; i++) { 5404 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5405 if (lag->ref_count) { 5406 if (lag->dev == lag_dev) { 5407 *p_lag_id = i; 5408 return 0; 5409 } 5410 } else if (free_lag_id < 0) { 5411 free_lag_id = i; 5412 } 5413 } 5414 if (free_lag_id < 0) 5415 return -EBUSY; 5416 *p_lag_id = free_lag_id; 5417 return 0; 5418 } 5419 5420 static bool 5421 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5422 struct net_device *lag_dev, 5423 struct netdev_lag_upper_info *lag_upper_info, 5424 struct netlink_ext_ack *extack) 5425 { 5426 u16 lag_id; 5427 5428 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5429 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5430 return false; 5431 } 5432 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5433 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5434 return false; 5435 } 5436 return true; 5437 } 5438 5439 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5440 u16 lag_id, u8 *p_port_index) 5441 { 5442 u64 max_lag_members; 5443 int i; 5444 5445 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5446 MAX_LAG_MEMBERS); 5447 for (i = 0; i < max_lag_members; i++) { 5448 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5449 *p_port_index = i; 5450 return 0; 5451 } 5452 } 5453 return -EBUSY; 5454 } 5455 5456 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5457 struct net_device *lag_dev) 5458 { 5459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5460 struct mlxsw_sp_upper *lag; 5461 u16 lag_id; 5462 u8 port_index; 5463 int err; 5464 5465 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5466 if (err) 5467 return err; 5468 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5469 if (!lag->ref_count) { 5470 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5471 if (err) 5472 return err; 5473 lag->dev = lag_dev; 5474 } 5475 5476 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5477 if (err) 5478 return err; 5479 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5480 if (err) 5481 goto err_col_port_add; 5482 5483 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5484 mlxsw_sp_port->local_port); 5485 mlxsw_sp_port->lag_id = lag_id; 5486 mlxsw_sp_port->lagged = 1; 5487 lag->ref_count++; 5488 5489 /* Port is no longer usable as a router interface */ 5490 if (mlxsw_sp_port->default_vlan->fid) 5491 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5492 5493 return 0; 5494 5495 err_col_port_add: 5496 if (!lag->ref_count) 5497 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5498 return err; 5499 } 5500 5501 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5502 struct net_device *lag_dev) 5503 { 5504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5505 u16 lag_id = mlxsw_sp_port->lag_id; 5506 struct mlxsw_sp_upper *lag; 5507 5508 if (!mlxsw_sp_port->lagged) 5509 return; 5510 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5511 WARN_ON(lag->ref_count == 0); 5512 5513 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5514 5515 /* Any VLANs configured on the port are no longer valid */ 5516 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5517 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5518 /* Make the LAG and its directly linked uppers leave bridges they 5519 * are memeber in 5520 */ 5521 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5522 5523 if (lag->ref_count == 1) 5524 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5525 5526 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5527 mlxsw_sp_port->local_port); 5528 mlxsw_sp_port->lagged = 0; 5529 lag->ref_count--; 5530 5531 /* Make sure untagged frames are allowed to ingress */ 5532 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5533 } 5534 5535 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5536 u16 lag_id) 5537 { 5538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5539 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5540 5541 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5542 mlxsw_sp_port->local_port); 5543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5544 } 5545 5546 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5547 u16 lag_id) 5548 { 5549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5550 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5551 5552 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5553 mlxsw_sp_port->local_port); 5554 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5555 } 5556 5557 static int 5558 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5559 { 5560 int err; 5561 5562 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5563 mlxsw_sp_port->lag_id); 5564 if (err) 5565 return err; 5566 5567 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5568 if (err) 5569 goto err_dist_port_add; 5570 5571 return 0; 5572 5573 err_dist_port_add: 5574 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5575 return err; 5576 } 5577 5578 static int 5579 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5580 { 5581 int err; 5582 5583 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5584 mlxsw_sp_port->lag_id); 5585 if (err) 5586 return err; 5587 5588 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5589 mlxsw_sp_port->lag_id); 5590 if (err) 5591 goto err_col_port_disable; 5592 5593 return 0; 5594 5595 err_col_port_disable: 5596 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5597 return err; 5598 } 5599 5600 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5601 struct netdev_lag_lower_state_info *info) 5602 { 5603 if (info->tx_enabled) 5604 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5605 else 5606 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5607 } 5608 5609 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5610 bool enable) 5611 { 5612 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5613 enum mlxsw_reg_spms_state spms_state; 5614 char *spms_pl; 5615 u16 vid; 5616 int err; 5617 5618 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5619 MLXSW_REG_SPMS_STATE_DISCARDING; 5620 5621 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5622 if (!spms_pl) 5623 return -ENOMEM; 5624 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5625 5626 for (vid = 0; vid < VLAN_N_VID; vid++) 5627 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5628 5629 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5630 kfree(spms_pl); 5631 return err; 5632 } 5633 5634 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5635 { 5636 u16 vid = 1; 5637 int err; 5638 5639 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5640 if (err) 5641 return err; 5642 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5643 if (err) 5644 goto err_port_stp_set; 5645 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5646 true, false); 5647 if (err) 5648 goto err_port_vlan_set; 5649 5650 for (; vid <= VLAN_N_VID - 1; vid++) { 5651 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5652 vid, false); 5653 if (err) 5654 goto err_vid_learning_set; 5655 } 5656 5657 return 0; 5658 5659 err_vid_learning_set: 5660 for (vid--; vid >= 1; vid--) 5661 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5662 err_port_vlan_set: 5663 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5664 err_port_stp_set: 5665 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5666 return err; 5667 } 5668 5669 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5670 { 5671 u16 vid; 5672 5673 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5674 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5675 vid, true); 5676 5677 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5678 false, false); 5679 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5680 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5681 } 5682 5683 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5684 { 5685 unsigned int num_vxlans = 0; 5686 struct net_device *dev; 5687 struct list_head *iter; 5688 5689 netdev_for_each_lower_dev(br_dev, dev, iter) { 5690 if (netif_is_vxlan(dev)) 5691 num_vxlans++; 5692 } 5693 5694 return num_vxlans > 1; 5695 } 5696 5697 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5698 { 5699 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5700 struct net_device *dev; 5701 struct list_head *iter; 5702 5703 netdev_for_each_lower_dev(br_dev, dev, iter) { 5704 u16 pvid; 5705 int err; 5706 5707 if (!netif_is_vxlan(dev)) 5708 continue; 5709 5710 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5711 if (err || !pvid) 5712 continue; 5713 5714 if (test_and_set_bit(pvid, vlans)) 5715 return false; 5716 } 5717 5718 return true; 5719 } 5720 5721 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5722 struct netlink_ext_ack *extack) 5723 { 5724 if (br_multicast_enabled(br_dev)) { 5725 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5726 return false; 5727 } 5728 5729 if (!br_vlan_enabled(br_dev) && 5730 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5731 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5732 return false; 5733 } 5734 5735 if (br_vlan_enabled(br_dev) && 5736 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5737 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5738 return false; 5739 } 5740 5741 return true; 5742 } 5743 5744 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5745 struct net_device *dev, 5746 unsigned long event, void *ptr) 5747 { 5748 struct netdev_notifier_changeupper_info *info; 5749 struct mlxsw_sp_port *mlxsw_sp_port; 5750 struct netlink_ext_ack *extack; 5751 struct net_device *upper_dev; 5752 struct mlxsw_sp *mlxsw_sp; 5753 int err = 0; 5754 5755 mlxsw_sp_port = netdev_priv(dev); 5756 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5757 info = ptr; 5758 extack = netdev_notifier_info_to_extack(&info->info); 5759 5760 switch (event) { 5761 case NETDEV_PRECHANGEUPPER: 5762 upper_dev = info->upper_dev; 5763 if (!is_vlan_dev(upper_dev) && 5764 !netif_is_lag_master(upper_dev) && 5765 !netif_is_bridge_master(upper_dev) && 5766 !netif_is_ovs_master(upper_dev) && 5767 !netif_is_macvlan(upper_dev)) { 5768 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5769 return -EINVAL; 5770 } 5771 if (!info->linking) 5772 break; 5773 if (netif_is_bridge_master(upper_dev) && 5774 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5775 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5776 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5777 return -EOPNOTSUPP; 5778 if (netdev_has_any_upper_dev(upper_dev) && 5779 (!netif_is_bridge_master(upper_dev) || 5780 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5781 upper_dev))) { 5782 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5783 return -EINVAL; 5784 } 5785 if (netif_is_lag_master(upper_dev) && 5786 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5787 info->upper_info, extack)) 5788 return -EINVAL; 5789 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5790 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5791 return -EINVAL; 5792 } 5793 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5794 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5795 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5796 return -EINVAL; 5797 } 5798 if (netif_is_macvlan(upper_dev) && 5799 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5800 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5801 return -EOPNOTSUPP; 5802 } 5803 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5804 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5805 return -EINVAL; 5806 } 5807 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5808 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5809 return -EINVAL; 5810 } 5811 break; 5812 case NETDEV_CHANGEUPPER: 5813 upper_dev = info->upper_dev; 5814 if (netif_is_bridge_master(upper_dev)) { 5815 if (info->linking) 5816 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5817 lower_dev, 5818 upper_dev, 5819 extack); 5820 else 5821 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5822 lower_dev, 5823 upper_dev); 5824 } else if (netif_is_lag_master(upper_dev)) { 5825 if (info->linking) { 5826 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5827 upper_dev); 5828 } else { 5829 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5830 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5831 upper_dev); 5832 } 5833 } else if (netif_is_ovs_master(upper_dev)) { 5834 if (info->linking) 5835 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5836 else 5837 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5838 } else if (netif_is_macvlan(upper_dev)) { 5839 if (!info->linking) 5840 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5841 } else if (is_vlan_dev(upper_dev)) { 5842 struct net_device *br_dev; 5843 5844 if (!netif_is_bridge_port(upper_dev)) 5845 break; 5846 if (info->linking) 5847 break; 5848 br_dev = netdev_master_upper_dev_get(upper_dev); 5849 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5850 br_dev); 5851 } 5852 break; 5853 } 5854 5855 return err; 5856 } 5857 5858 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5859 unsigned long event, void *ptr) 5860 { 5861 struct netdev_notifier_changelowerstate_info *info; 5862 struct mlxsw_sp_port *mlxsw_sp_port; 5863 int err; 5864 5865 mlxsw_sp_port = netdev_priv(dev); 5866 info = ptr; 5867 5868 switch (event) { 5869 case NETDEV_CHANGELOWERSTATE: 5870 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5871 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5872 info->lower_state_info); 5873 if (err) 5874 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5875 } 5876 break; 5877 } 5878 5879 return 0; 5880 } 5881 5882 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5883 struct net_device *port_dev, 5884 unsigned long event, void *ptr) 5885 { 5886 switch (event) { 5887 case NETDEV_PRECHANGEUPPER: 5888 case NETDEV_CHANGEUPPER: 5889 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5890 event, ptr); 5891 case NETDEV_CHANGELOWERSTATE: 5892 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5893 ptr); 5894 } 5895 5896 return 0; 5897 } 5898 5899 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5900 unsigned long event, void *ptr) 5901 { 5902 struct net_device *dev; 5903 struct list_head *iter; 5904 int ret; 5905 5906 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5907 if (mlxsw_sp_port_dev_check(dev)) { 5908 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5909 ptr); 5910 if (ret) 5911 return ret; 5912 } 5913 } 5914 5915 return 0; 5916 } 5917 5918 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5919 struct net_device *dev, 5920 unsigned long event, void *ptr, 5921 u16 vid) 5922 { 5923 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5924 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5925 struct netdev_notifier_changeupper_info *info = ptr; 5926 struct netlink_ext_ack *extack; 5927 struct net_device *upper_dev; 5928 int err = 0; 5929 5930 extack = netdev_notifier_info_to_extack(&info->info); 5931 5932 switch (event) { 5933 case NETDEV_PRECHANGEUPPER: 5934 upper_dev = info->upper_dev; 5935 if (!netif_is_bridge_master(upper_dev) && 5936 !netif_is_macvlan(upper_dev)) { 5937 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5938 return -EINVAL; 5939 } 5940 if (!info->linking) 5941 break; 5942 if (netif_is_bridge_master(upper_dev) && 5943 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5944 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5945 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5946 return -EOPNOTSUPP; 5947 if (netdev_has_any_upper_dev(upper_dev) && 5948 (!netif_is_bridge_master(upper_dev) || 5949 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5950 upper_dev))) { 5951 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5952 return -EINVAL; 5953 } 5954 if (netif_is_macvlan(upper_dev) && 5955 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5956 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5957 return -EOPNOTSUPP; 5958 } 5959 break; 5960 case NETDEV_CHANGEUPPER: 5961 upper_dev = info->upper_dev; 5962 if (netif_is_bridge_master(upper_dev)) { 5963 if (info->linking) 5964 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5965 vlan_dev, 5966 upper_dev, 5967 extack); 5968 else 5969 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5970 vlan_dev, 5971 upper_dev); 5972 } else if (netif_is_macvlan(upper_dev)) { 5973 if (!info->linking) 5974 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5975 } else { 5976 err = -EINVAL; 5977 WARN_ON(1); 5978 } 5979 break; 5980 } 5981 5982 return err; 5983 } 5984 5985 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 5986 struct net_device *lag_dev, 5987 unsigned long event, 5988 void *ptr, u16 vid) 5989 { 5990 struct net_device *dev; 5991 struct list_head *iter; 5992 int ret; 5993 5994 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5995 if (mlxsw_sp_port_dev_check(dev)) { 5996 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 5997 event, ptr, 5998 vid); 5999 if (ret) 6000 return ret; 6001 } 6002 } 6003 6004 return 0; 6005 } 6006 6007 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6008 struct net_device *br_dev, 6009 unsigned long event, void *ptr, 6010 u16 vid) 6011 { 6012 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6013 struct netdev_notifier_changeupper_info *info = ptr; 6014 struct netlink_ext_ack *extack; 6015 struct net_device *upper_dev; 6016 6017 if (!mlxsw_sp) 6018 return 0; 6019 6020 extack = netdev_notifier_info_to_extack(&info->info); 6021 6022 switch (event) { 6023 case NETDEV_PRECHANGEUPPER: 6024 upper_dev = info->upper_dev; 6025 if (!netif_is_macvlan(upper_dev)) { 6026 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6027 return -EOPNOTSUPP; 6028 } 6029 if (!info->linking) 6030 break; 6031 if (netif_is_macvlan(upper_dev) && 6032 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6033 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6034 return -EOPNOTSUPP; 6035 } 6036 break; 6037 case NETDEV_CHANGEUPPER: 6038 upper_dev = info->upper_dev; 6039 if (info->linking) 6040 break; 6041 if (netif_is_macvlan(upper_dev)) 6042 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6043 break; 6044 } 6045 6046 return 0; 6047 } 6048 6049 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6050 unsigned long event, void *ptr) 6051 { 6052 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6053 u16 vid = vlan_dev_vlan_id(vlan_dev); 6054 6055 if (mlxsw_sp_port_dev_check(real_dev)) 6056 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6057 event, ptr, vid); 6058 else if (netif_is_lag_master(real_dev)) 6059 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6060 real_dev, event, 6061 ptr, vid); 6062 else if (netif_is_bridge_master(real_dev)) 6063 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6064 event, ptr, vid); 6065 6066 return 0; 6067 } 6068 6069 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6070 unsigned long event, void *ptr) 6071 { 6072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6073 struct netdev_notifier_changeupper_info *info = ptr; 6074 struct netlink_ext_ack *extack; 6075 struct net_device *upper_dev; 6076 6077 if (!mlxsw_sp) 6078 return 0; 6079 6080 extack = netdev_notifier_info_to_extack(&info->info); 6081 6082 switch (event) { 6083 case NETDEV_PRECHANGEUPPER: 6084 upper_dev = info->upper_dev; 6085 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6086 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6087 return -EOPNOTSUPP; 6088 } 6089 if (!info->linking) 6090 break; 6091 if (netif_is_macvlan(upper_dev) && 6092 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6093 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6094 return -EOPNOTSUPP; 6095 } 6096 break; 6097 case NETDEV_CHANGEUPPER: 6098 upper_dev = info->upper_dev; 6099 if (info->linking) 6100 break; 6101 if (is_vlan_dev(upper_dev)) 6102 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6103 if (netif_is_macvlan(upper_dev)) 6104 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6105 break; 6106 } 6107 6108 return 0; 6109 } 6110 6111 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6112 unsigned long event, void *ptr) 6113 { 6114 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6115 struct netdev_notifier_changeupper_info *info = ptr; 6116 struct netlink_ext_ack *extack; 6117 6118 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6119 return 0; 6120 6121 extack = netdev_notifier_info_to_extack(&info->info); 6122 6123 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6124 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6125 6126 return -EOPNOTSUPP; 6127 } 6128 6129 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6130 { 6131 struct netdev_notifier_changeupper_info *info = ptr; 6132 6133 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6134 return false; 6135 return netif_is_l3_master(info->upper_dev); 6136 } 6137 6138 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6139 struct net_device *dev, 6140 unsigned long event, void *ptr) 6141 { 6142 struct netdev_notifier_changeupper_info *cu_info; 6143 struct netdev_notifier_info *info = ptr; 6144 struct netlink_ext_ack *extack; 6145 struct net_device *upper_dev; 6146 6147 extack = netdev_notifier_info_to_extack(info); 6148 6149 switch (event) { 6150 case NETDEV_CHANGEUPPER: 6151 cu_info = container_of(info, 6152 struct netdev_notifier_changeupper_info, 6153 info); 6154 upper_dev = cu_info->upper_dev; 6155 if (!netif_is_bridge_master(upper_dev)) 6156 return 0; 6157 if (!mlxsw_sp_lower_get(upper_dev)) 6158 return 0; 6159 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6160 return -EOPNOTSUPP; 6161 if (cu_info->linking) { 6162 if (!netif_running(dev)) 6163 return 0; 6164 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6165 * device needs to be mapped to a VLAN, but at this 6166 * point no VLANs are configured on the VxLAN device 6167 */ 6168 if (br_vlan_enabled(upper_dev)) 6169 return 0; 6170 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6171 dev, 0, extack); 6172 } else { 6173 /* VLANs were already flushed, which triggered the 6174 * necessary cleanup 6175 */ 6176 if (br_vlan_enabled(upper_dev)) 6177 return 0; 6178 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6179 } 6180 break; 6181 case NETDEV_PRE_UP: 6182 upper_dev = netdev_master_upper_dev_get(dev); 6183 if (!upper_dev) 6184 return 0; 6185 if (!netif_is_bridge_master(upper_dev)) 6186 return 0; 6187 if (!mlxsw_sp_lower_get(upper_dev)) 6188 return 0; 6189 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6190 extack); 6191 case NETDEV_DOWN: 6192 upper_dev = netdev_master_upper_dev_get(dev); 6193 if (!upper_dev) 6194 return 0; 6195 if (!netif_is_bridge_master(upper_dev)) 6196 return 0; 6197 if (!mlxsw_sp_lower_get(upper_dev)) 6198 return 0; 6199 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6200 break; 6201 } 6202 6203 return 0; 6204 } 6205 6206 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6207 unsigned long event, void *ptr) 6208 { 6209 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6210 struct mlxsw_sp_span_entry *span_entry; 6211 struct mlxsw_sp *mlxsw_sp; 6212 int err = 0; 6213 6214 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6215 if (event == NETDEV_UNREGISTER) { 6216 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6217 if (span_entry) 6218 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6219 } 6220 mlxsw_sp_span_respin(mlxsw_sp); 6221 6222 if (netif_is_vxlan(dev)) 6223 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6224 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6225 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6226 event, ptr); 6227 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6228 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6229 event, ptr); 6230 else if (event == NETDEV_PRE_CHANGEADDR || 6231 event == NETDEV_CHANGEADDR || 6232 event == NETDEV_CHANGEMTU) 6233 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6234 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6235 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6236 else if (mlxsw_sp_port_dev_check(dev)) 6237 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6238 else if (netif_is_lag_master(dev)) 6239 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6240 else if (is_vlan_dev(dev)) 6241 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6242 else if (netif_is_bridge_master(dev)) 6243 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6244 else if (netif_is_macvlan(dev)) 6245 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6246 6247 return notifier_from_errno(err); 6248 } 6249 6250 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6251 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6252 }; 6253 6254 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6255 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6256 }; 6257 6258 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6259 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6260 {0, }, 6261 }; 6262 6263 static struct pci_driver mlxsw_sp1_pci_driver = { 6264 .name = mlxsw_sp1_driver_name, 6265 .id_table = mlxsw_sp1_pci_id_table, 6266 }; 6267 6268 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6269 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6270 {0, }, 6271 }; 6272 6273 static struct pci_driver mlxsw_sp2_pci_driver = { 6274 .name = mlxsw_sp2_driver_name, 6275 .id_table = mlxsw_sp2_pci_id_table, 6276 }; 6277 6278 static int __init mlxsw_sp_module_init(void) 6279 { 6280 int err; 6281 6282 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6283 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6284 6285 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6286 if (err) 6287 goto err_sp1_core_driver_register; 6288 6289 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6290 if (err) 6291 goto err_sp2_core_driver_register; 6292 6293 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6294 if (err) 6295 goto err_sp1_pci_driver_register; 6296 6297 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6298 if (err) 6299 goto err_sp2_pci_driver_register; 6300 6301 return 0; 6302 6303 err_sp2_pci_driver_register: 6304 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6305 err_sp1_pci_driver_register: 6306 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6307 err_sp2_core_driver_register: 6308 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6309 err_sp1_core_driver_register: 6310 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6311 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6312 return err; 6313 } 6314 6315 static void __exit mlxsw_sp_module_exit(void) 6316 { 6317 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6318 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6319 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6320 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6321 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6322 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6323 } 6324 6325 module_init(mlxsw_sp_module_init); 6326 module_exit(mlxsw_sp_module_exit); 6327 6328 MODULE_LICENSE("Dual BSD/GPL"); 6329 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6330 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6331 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6332 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6333 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6334