1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 struct mlxsw_sp_ptp_ops { 151 struct mlxsw_sp_ptp_clock * 152 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 153 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 154 155 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 156 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 157 158 /* Notify a driver that a packet that might be PTP was received. Driver 159 * is responsible for freeing the passed-in SKB. 160 */ 161 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 162 u8 local_port); 163 164 /* Notify a driver that a timestamped packet was transmitted. Driver 165 * is responsible for freeing the passed-in SKB. 166 */ 167 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 168 u8 local_port); 169 170 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 171 struct hwtstamp_config *config); 172 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 173 struct hwtstamp_config *config); 174 void (*shaper_work)(struct work_struct *work); 175 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 176 struct ethtool_ts_info *info); 177 }; 178 179 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 180 u16 component_index, u32 *p_max_size, 181 u8 *p_align_bits, u16 *p_max_write_size) 182 { 183 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 184 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 185 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 186 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 187 int err; 188 189 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 191 if (err) 192 return err; 193 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 194 p_max_write_size); 195 196 *p_align_bits = max_t(u8, *p_align_bits, 2); 197 *p_max_write_size = min_t(u16, *p_max_write_size, 198 MLXSW_REG_MCDA_MAX_DATA_LEN); 199 return 0; 200 } 201 202 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcc_pl[MLXSW_REG_MCC_LEN]; 208 u8 control_state; 209 int err; 210 211 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 213 if (err) 214 return err; 215 216 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 217 if (control_state != MLXFW_FSM_STATE_IDLE) 218 return -EBUSY; 219 220 mlxsw_reg_mcc_pack(mcc_pl, 221 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 222 0, *fwhandle, 0); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 224 } 225 226 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 227 u32 fwhandle, u16 component_index, 228 u32 component_size) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcc_pl[MLXSW_REG_MCC_LEN]; 234 235 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 236 component_index, fwhandle, component_size); 237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 238 } 239 240 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 241 u32 fwhandle, u8 *data, u16 size, 242 u32 offset) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcda_pl[MLXSW_REG_MCDA_LEN]; 248 249 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 251 } 252 253 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 254 u32 fwhandle, u16 component_index) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 262 component_index, fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcc_pl[MLXSW_REG_MCC_LEN]; 272 273 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 274 fwhandle, 0); 275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 276 } 277 278 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 279 enum mlxfw_fsm_state *fsm_state, 280 enum mlxfw_fsm_state_err *fsm_state_err) 281 { 282 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 283 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 285 char mcc_pl[MLXSW_REG_MCC_LEN]; 286 u8 control_state; 287 u8 error_code; 288 int err; 289 290 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 292 if (err) 293 return err; 294 295 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 296 *fsm_state = control_state; 297 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 298 MLXFW_FSM_STATE_ERR_MAX); 299 return 0; 300 } 301 302 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 310 fwhandle, 0); 311 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 312 } 313 314 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 315 { 316 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 317 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 319 char mcc_pl[MLXSW_REG_MCC_LEN]; 320 321 mlxsw_reg_mcc_pack(mcc_pl, 322 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 323 fwhandle, 0); 324 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 325 } 326 327 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 328 const char *msg, const char *comp_name, 329 u32 done_bytes, u32 total_bytes) 330 { 331 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 332 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 333 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 334 335 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 336 msg, comp_name, 337 done_bytes, total_bytes); 338 } 339 340 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 341 .component_query = mlxsw_sp_component_query, 342 .fsm_lock = mlxsw_sp_fsm_lock, 343 .fsm_component_update = mlxsw_sp_fsm_component_update, 344 .fsm_block_download = mlxsw_sp_fsm_block_download, 345 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 346 .fsm_activate = mlxsw_sp_fsm_activate, 347 .fsm_query_state = mlxsw_sp_fsm_query_state, 348 .fsm_cancel = mlxsw_sp_fsm_cancel, 349 .fsm_release = mlxsw_sp_fsm_release, 350 .status_notify = mlxsw_sp_status_notify, 351 }; 352 353 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 354 const struct firmware *firmware, 355 struct netlink_ext_ack *extack) 356 { 357 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 358 .mlxfw_dev = { 359 .ops = &mlxsw_sp_mlxfw_dev_ops, 360 .psid = mlxsw_sp->bus_info->psid, 361 .psid_size = strlen(mlxsw_sp->bus_info->psid), 362 }, 363 .mlxsw_sp = mlxsw_sp 364 }; 365 int err; 366 367 mlxsw_core_fw_flash_start(mlxsw_sp->core); 368 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 369 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 370 firmware, extack); 371 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 372 mlxsw_core_fw_flash_end(mlxsw_sp->core); 373 374 return err; 375 } 376 377 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 378 { 379 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 380 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 381 const char *fw_filename = mlxsw_sp->fw_filename; 382 union devlink_param_value value; 383 const struct firmware *firmware; 384 int err; 385 386 /* Don't check if driver does not require it */ 387 if (!req_rev || !fw_filename) 388 return 0; 389 390 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 391 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 392 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 393 &value); 394 if (err) 395 return err; 396 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 397 return 0; 398 399 /* Validate driver & FW are compatible */ 400 if (rev->major != req_rev->major) { 401 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 402 rev->major, req_rev->major); 403 return -EINVAL; 404 } 405 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 406 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 407 (rev->minor > req_rev->minor || 408 (rev->minor == req_rev->minor && 409 rev->subminor >= req_rev->subminor))) 410 return 0; 411 412 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 413 rev->major, rev->minor, rev->subminor); 414 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 415 fw_filename); 416 417 err = request_firmware_direct(&firmware, fw_filename, 418 mlxsw_sp->bus_info->dev); 419 if (err) { 420 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 421 fw_filename); 422 return err; 423 } 424 425 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 426 release_firmware(firmware); 427 if (err) 428 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 429 430 /* On FW flash success, tell the caller FW reset is needed 431 * if current FW supports it. 432 */ 433 if (rev->minor >= req_rev->can_reset_minor) 434 return err ? err : -EAGAIN; 435 else 436 return 0; 437 } 438 439 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 440 const char *file_name, const char *component, 441 struct netlink_ext_ack *extack) 442 { 443 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 444 const struct firmware *firmware; 445 int err; 446 447 if (component) 448 return -EOPNOTSUPP; 449 450 err = request_firmware_direct(&firmware, file_name, 451 mlxsw_sp->bus_info->dev); 452 if (err) 453 return err; 454 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 455 release_firmware(firmware); 456 457 return err; 458 } 459 460 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 461 unsigned int counter_index, u64 *packets, 462 u64 *bytes) 463 { 464 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 465 int err; 466 467 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 468 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 469 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 470 if (err) 471 return err; 472 if (packets) 473 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 474 if (bytes) 475 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 476 return 0; 477 } 478 479 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index) 481 { 482 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 483 484 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 485 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 486 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 487 } 488 489 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 490 unsigned int *p_counter_index) 491 { 492 int err; 493 494 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 495 p_counter_index); 496 if (err) 497 return err; 498 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 499 if (err) 500 goto err_counter_clear; 501 return 0; 502 503 err_counter_clear: 504 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 505 *p_counter_index); 506 return err; 507 } 508 509 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 510 unsigned int counter_index) 511 { 512 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 513 counter_index); 514 } 515 516 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 517 const struct mlxsw_tx_info *tx_info) 518 { 519 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 520 521 memset(txhdr, 0, MLXSW_TXHDR_LEN); 522 523 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 524 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 525 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 526 mlxsw_tx_hdr_swid_set(txhdr, 0); 527 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 528 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 529 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 530 } 531 532 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 533 { 534 switch (state) { 535 case BR_STATE_FORWARDING: 536 return MLXSW_REG_SPMS_STATE_FORWARDING; 537 case BR_STATE_LEARNING: 538 return MLXSW_REG_SPMS_STATE_LEARNING; 539 case BR_STATE_LISTENING: /* fall-through */ 540 case BR_STATE_DISABLED: /* fall-through */ 541 case BR_STATE_BLOCKING: 542 return MLXSW_REG_SPMS_STATE_DISCARDING; 543 default: 544 BUG(); 545 } 546 } 547 548 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 549 u8 state) 550 { 551 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char *spms_pl; 554 int err; 555 556 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 557 if (!spms_pl) 558 return -ENOMEM; 559 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 560 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 561 562 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 563 kfree(spms_pl); 564 return err; 565 } 566 567 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 568 { 569 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 570 int err; 571 572 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 573 if (err) 574 return err; 575 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 576 return 0; 577 } 578 579 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 580 bool enable, u32 rate) 581 { 582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 583 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 584 585 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 587 } 588 589 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 590 bool is_up) 591 { 592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 593 char paos_pl[MLXSW_REG_PAOS_LEN]; 594 595 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 596 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 597 MLXSW_PORT_ADMIN_STATUS_DOWN); 598 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 599 } 600 601 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 602 unsigned char *addr) 603 { 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 char ppad_pl[MLXSW_REG_PPAD_LEN]; 606 607 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 608 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 610 } 611 612 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 613 { 614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 615 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 616 617 ether_addr_copy(addr, mlxsw_sp->base_mac); 618 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 619 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 620 } 621 622 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 623 { 624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 625 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 626 int max_mtu; 627 int err; 628 629 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 630 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 631 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 632 if (err) 633 return err; 634 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 635 636 if (mtu > max_mtu) 637 return -EINVAL; 638 639 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 640 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 641 } 642 643 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 644 { 645 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 646 char pspa_pl[MLXSW_REG_PSPA_LEN]; 647 648 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 650 } 651 652 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 653 { 654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 655 char svpe_pl[MLXSW_REG_SVPE_LEN]; 656 657 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 659 } 660 661 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 662 bool learn_enable) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char *spvmlr_pl; 666 int err; 667 668 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 669 if (!spvmlr_pl) 670 return -ENOMEM; 671 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 672 learn_enable); 673 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 674 kfree(spvmlr_pl); 675 return err; 676 } 677 678 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 679 u16 vid) 680 { 681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 682 char spvid_pl[MLXSW_REG_SPVID_LEN]; 683 684 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 686 } 687 688 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 689 bool allow) 690 { 691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 692 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 693 694 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 696 } 697 698 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 699 { 700 int err; 701 702 if (!vid) { 703 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 704 if (err) 705 return err; 706 } else { 707 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 708 if (err) 709 return err; 710 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 711 if (err) 712 goto err_port_allow_untagged_set; 713 } 714 715 mlxsw_sp_port->pvid = vid; 716 return 0; 717 718 err_port_allow_untagged_set: 719 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 720 return err; 721 } 722 723 static int 724 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 725 { 726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 727 char sspr_pl[MLXSW_REG_SSPR_LEN]; 728 729 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 731 } 732 733 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 734 u8 local_port, u8 *p_module, 735 u8 *p_width, u8 *p_lane) 736 { 737 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 738 int err; 739 740 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 741 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 742 if (err) 743 return err; 744 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 745 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 746 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 747 return 0; 748 } 749 750 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 751 u8 module, u8 width, u8 lane) 752 { 753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 754 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 755 int i; 756 757 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 758 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 759 for (i = 0; i < width; i++) { 760 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 761 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 762 } 763 764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 } 766 767 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 768 { 769 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 770 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 771 772 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 773 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 774 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 775 } 776 777 static int mlxsw_sp_port_open(struct net_device *dev) 778 { 779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 780 int err; 781 782 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 783 if (err) 784 return err; 785 netif_start_queue(dev); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_stop(struct net_device *dev) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 793 netif_stop_queue(dev); 794 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 795 } 796 797 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 798 struct net_device *dev) 799 { 800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 801 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 802 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 803 const struct mlxsw_tx_info tx_info = { 804 .local_port = mlxsw_sp_port->local_port, 805 .is_emad = false, 806 }; 807 u64 len; 808 int err; 809 810 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 811 812 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 813 return NETDEV_TX_BUSY; 814 815 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 816 struct sk_buff *skb_orig = skb; 817 818 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 819 if (!skb) { 820 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 821 dev_kfree_skb_any(skb_orig); 822 return NETDEV_TX_OK; 823 } 824 dev_consume_skb_any(skb_orig); 825 } 826 827 if (eth_skb_pad(skb)) { 828 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 829 return NETDEV_TX_OK; 830 } 831 832 mlxsw_sp_txhdr_construct(skb, &tx_info); 833 /* TX header is consumed by HW on the way so we shouldn't count its 834 * bytes as being sent. 835 */ 836 len = skb->len - MLXSW_TXHDR_LEN; 837 838 /* Due to a race we might fail here because of a full queue. In that 839 * unlikely case we simply drop the packet. 840 */ 841 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 842 843 if (!err) { 844 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 845 u64_stats_update_begin(&pcpu_stats->syncp); 846 pcpu_stats->tx_packets++; 847 pcpu_stats->tx_bytes += len; 848 u64_stats_update_end(&pcpu_stats->syncp); 849 } else { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 dev_kfree_skb_any(skb); 852 } 853 return NETDEV_TX_OK; 854 } 855 856 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 857 { 858 } 859 860 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 861 { 862 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 863 struct sockaddr *addr = p; 864 int err; 865 866 if (!is_valid_ether_addr(addr->sa_data)) 867 return -EADDRNOTAVAIL; 868 869 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 870 if (err) 871 return err; 872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 873 return 0; 874 } 875 876 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 877 int mtu) 878 { 879 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 880 } 881 882 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 883 884 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 885 u16 delay) 886 { 887 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 888 BITS_PER_BYTE)); 889 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 890 mtu); 891 } 892 893 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 894 * Assumes 100m cable and maximum MTU. 895 */ 896 #define MLXSW_SP_PAUSE_DELAY 58752 897 898 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 899 u16 delay, bool pfc, bool pause) 900 { 901 if (pfc) 902 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 903 else if (pause) 904 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 905 else 906 return 0; 907 } 908 909 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 910 bool lossy) 911 { 912 if (lossy) 913 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 914 else 915 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 916 thres); 917 } 918 919 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 920 u8 *prio_tc, bool pause_en, 921 struct ieee_pfc *my_pfc) 922 { 923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 924 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 925 u16 delay = !!my_pfc ? my_pfc->delay : 0; 926 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 927 u32 taken_headroom_cells = 0; 928 u32 max_headroom_cells; 929 int i, j, err; 930 931 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 932 933 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 934 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 935 if (err) 936 return err; 937 938 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 939 bool configure = false; 940 bool pfc = false; 941 u16 thres_cells; 942 u16 delay_cells; 943 u16 total_cells; 944 bool lossy; 945 946 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 947 if (prio_tc[j] == i) { 948 pfc = pfc_en & BIT(j); 949 configure = true; 950 break; 951 } 952 } 953 954 if (!configure) 955 continue; 956 957 lossy = !(pfc || pause_en); 958 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 960 pfc, pause_en); 961 total_cells = thres_cells + delay_cells; 962 963 taken_headroom_cells += total_cells; 964 if (taken_headroom_cells > max_headroom_cells) 965 return -ENOBUFS; 966 967 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 968 thres_cells, lossy); 969 } 970 971 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 972 } 973 974 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 975 int mtu, bool pause_en) 976 { 977 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 978 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 979 struct ieee_pfc *my_pfc; 980 u8 *prio_tc; 981 982 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 983 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 984 985 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 986 pause_en, my_pfc); 987 } 988 989 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 990 { 991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 992 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 993 int err; 994 995 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 996 if (err) 997 return err; 998 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 999 if (err) 1000 goto err_span_port_mtu_update; 1001 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1002 if (err) 1003 goto err_port_mtu_set; 1004 dev->mtu = mtu; 1005 return 0; 1006 1007 err_port_mtu_set: 1008 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1009 err_span_port_mtu_update: 1010 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1011 return err; 1012 } 1013 1014 static int 1015 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1016 struct rtnl_link_stats64 *stats) 1017 { 1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1019 struct mlxsw_sp_port_pcpu_stats *p; 1020 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1021 u32 tx_dropped = 0; 1022 unsigned int start; 1023 int i; 1024 1025 for_each_possible_cpu(i) { 1026 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1027 do { 1028 start = u64_stats_fetch_begin_irq(&p->syncp); 1029 rx_packets = p->rx_packets; 1030 rx_bytes = p->rx_bytes; 1031 tx_packets = p->tx_packets; 1032 tx_bytes = p->tx_bytes; 1033 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1034 1035 stats->rx_packets += rx_packets; 1036 stats->rx_bytes += rx_bytes; 1037 stats->tx_packets += tx_packets; 1038 stats->tx_bytes += tx_bytes; 1039 /* tx_dropped is u32, updated without syncp protection. */ 1040 tx_dropped += p->tx_dropped; 1041 } 1042 stats->tx_dropped = tx_dropped; 1043 return 0; 1044 } 1045 1046 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1047 { 1048 switch (attr_id) { 1049 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1057 void *sp) 1058 { 1059 switch (attr_id) { 1060 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1061 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1062 } 1063 1064 return -EINVAL; 1065 } 1066 1067 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1068 int prio, char *ppcnt_pl) 1069 { 1070 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1071 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1072 1073 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1074 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1075 } 1076 1077 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1078 struct rtnl_link_stats64 *stats) 1079 { 1080 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1081 int err; 1082 1083 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1084 0, ppcnt_pl); 1085 if (err) 1086 goto out; 1087 1088 stats->tx_packets = 1089 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1090 stats->rx_packets = 1091 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1092 stats->tx_bytes = 1093 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1094 stats->rx_bytes = 1095 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1096 stats->multicast = 1097 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1098 1099 stats->rx_crc_errors = 1100 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1101 stats->rx_frame_errors = 1102 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1103 1104 stats->rx_length_errors = ( 1105 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1106 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1108 1109 stats->rx_errors = (stats->rx_crc_errors + 1110 stats->rx_frame_errors + stats->rx_length_errors); 1111 1112 out: 1113 return err; 1114 } 1115 1116 static void 1117 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1118 struct mlxsw_sp_port_xstats *xstats) 1119 { 1120 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1121 int err, i; 1122 1123 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1124 ppcnt_pl); 1125 if (!err) 1126 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1127 1128 for (i = 0; i < TC_MAX_QUEUE; i++) { 1129 err = mlxsw_sp_port_get_stats_raw(dev, 1130 MLXSW_REG_PPCNT_TC_CONG_TC, 1131 i, ppcnt_pl); 1132 if (!err) 1133 xstats->wred_drop[i] = 1134 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1137 i, ppcnt_pl); 1138 if (err) 1139 continue; 1140 1141 xstats->backlog[i] = 1142 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1143 xstats->tail_drop[i] = 1144 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1145 } 1146 1147 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1148 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1149 i, ppcnt_pl); 1150 if (err) 1151 continue; 1152 1153 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1154 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1155 } 1156 } 1157 1158 static void update_stats_cache(struct work_struct *work) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = 1161 container_of(work, struct mlxsw_sp_port, 1162 periodic_hw_stats.update_dw.work); 1163 1164 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1165 goto out; 1166 1167 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1168 &mlxsw_sp_port->periodic_hw_stats.stats); 1169 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1170 &mlxsw_sp_port->periodic_hw_stats.xstats); 1171 1172 out: 1173 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1174 MLXSW_HW_STATS_UPDATE_TIME); 1175 } 1176 1177 /* Return the stats from a cache that is updated periodically, 1178 * as this function might get called in an atomic context. 1179 */ 1180 static void 1181 mlxsw_sp_port_get_stats64(struct net_device *dev, 1182 struct rtnl_link_stats64 *stats) 1183 { 1184 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1185 1186 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1187 } 1188 1189 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1190 u16 vid_begin, u16 vid_end, 1191 bool is_member, bool untagged) 1192 { 1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1194 char *spvm_pl; 1195 int err; 1196 1197 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1198 if (!spvm_pl) 1199 return -ENOMEM; 1200 1201 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1202 vid_end, is_member, untagged); 1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1204 kfree(spvm_pl); 1205 return err; 1206 } 1207 1208 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1209 u16 vid_end, bool is_member, bool untagged) 1210 { 1211 u16 vid, vid_e; 1212 int err; 1213 1214 for (vid = vid_begin; vid <= vid_end; 1215 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1216 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1217 vid_end); 1218 1219 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1220 is_member, untagged); 1221 if (err) 1222 return err; 1223 } 1224 1225 return 0; 1226 } 1227 1228 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1229 bool flush_default) 1230 { 1231 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1232 1233 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1234 &mlxsw_sp_port->vlans_list, list) { 1235 if (!flush_default && 1236 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1237 continue; 1238 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1239 } 1240 } 1241 1242 static void 1243 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1244 { 1245 if (mlxsw_sp_port_vlan->bridge_port) 1246 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1247 else if (mlxsw_sp_port_vlan->fid) 1248 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1249 } 1250 1251 struct mlxsw_sp_port_vlan * 1252 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1253 { 1254 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1255 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1256 int err; 1257 1258 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1259 if (mlxsw_sp_port_vlan) 1260 return ERR_PTR(-EEXIST); 1261 1262 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1263 if (err) 1264 return ERR_PTR(err); 1265 1266 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1267 if (!mlxsw_sp_port_vlan) { 1268 err = -ENOMEM; 1269 goto err_port_vlan_alloc; 1270 } 1271 1272 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1273 mlxsw_sp_port_vlan->vid = vid; 1274 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1275 1276 return mlxsw_sp_port_vlan; 1277 1278 err_port_vlan_alloc: 1279 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1280 return ERR_PTR(err); 1281 } 1282 1283 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1284 { 1285 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1286 u16 vid = mlxsw_sp_port_vlan->vid; 1287 1288 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1289 list_del(&mlxsw_sp_port_vlan->list); 1290 kfree(mlxsw_sp_port_vlan); 1291 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1292 } 1293 1294 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1295 __be16 __always_unused proto, u16 vid) 1296 { 1297 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1298 1299 /* VLAN 0 is added to HW filter when device goes up, but it is 1300 * reserved in our case, so simply return. 1301 */ 1302 if (!vid) 1303 return 0; 1304 1305 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1306 } 1307 1308 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1309 __be16 __always_unused proto, u16 vid) 1310 { 1311 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1312 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1313 1314 /* VLAN 0 is removed from HW filter when device goes down, but 1315 * it is reserved in our case, so simply return. 1316 */ 1317 if (!vid) 1318 return 0; 1319 1320 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1321 if (!mlxsw_sp_port_vlan) 1322 return 0; 1323 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1324 1325 return 0; 1326 } 1327 1328 static struct mlxsw_sp_port_mall_tc_entry * 1329 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1330 unsigned long cookie) { 1331 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1332 1333 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1334 if (mall_tc_entry->cookie == cookie) 1335 return mall_tc_entry; 1336 1337 return NULL; 1338 } 1339 1340 static int 1341 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1343 const struct flow_action_entry *act, 1344 bool ingress) 1345 { 1346 enum mlxsw_sp_span_type span_type; 1347 1348 if (!act->dev) { 1349 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1350 return -EINVAL; 1351 } 1352 1353 mirror->ingress = ingress; 1354 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1355 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1356 true, &mirror->span_id); 1357 } 1358 1359 static void 1360 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1361 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1362 { 1363 enum mlxsw_sp_span_type span_type; 1364 1365 span_type = mirror->ingress ? 1366 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1367 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1368 span_type, true); 1369 } 1370 1371 static int 1372 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1373 struct tc_cls_matchall_offload *cls, 1374 const struct flow_action_entry *act, 1375 bool ingress) 1376 { 1377 int err; 1378 1379 if (!mlxsw_sp_port->sample) 1380 return -EOPNOTSUPP; 1381 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1382 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1383 return -EEXIST; 1384 } 1385 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1386 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1387 return -EOPNOTSUPP; 1388 } 1389 1390 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1391 act->sample.psample_group); 1392 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1393 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1394 mlxsw_sp_port->sample->rate = act->sample.rate; 1395 1396 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1397 if (err) 1398 goto err_port_sample_set; 1399 return 0; 1400 1401 err_port_sample_set: 1402 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1403 return err; 1404 } 1405 1406 static void 1407 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1408 { 1409 if (!mlxsw_sp_port->sample) 1410 return; 1411 1412 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1413 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1414 } 1415 1416 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1417 struct tc_cls_matchall_offload *f, 1418 bool ingress) 1419 { 1420 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1421 __be16 protocol = f->common.protocol; 1422 struct flow_action_entry *act; 1423 int err; 1424 1425 if (!flow_offload_has_one_action(&f->rule->action)) { 1426 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1427 return -EOPNOTSUPP; 1428 } 1429 1430 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1431 if (!mall_tc_entry) 1432 return -ENOMEM; 1433 mall_tc_entry->cookie = f->cookie; 1434 1435 act = &f->rule->action.entries[0]; 1436 1437 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1438 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1439 1440 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1441 mirror = &mall_tc_entry->mirror; 1442 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1443 mirror, act, 1444 ingress); 1445 } else if (act->id == FLOW_ACTION_SAMPLE && 1446 protocol == htons(ETH_P_ALL)) { 1447 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1448 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1449 act, ingress); 1450 } else { 1451 err = -EOPNOTSUPP; 1452 } 1453 1454 if (err) 1455 goto err_add_action; 1456 1457 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1458 return 0; 1459 1460 err_add_action: 1461 kfree(mall_tc_entry); 1462 return err; 1463 } 1464 1465 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1466 struct tc_cls_matchall_offload *f) 1467 { 1468 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1469 1470 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1471 f->cookie); 1472 if (!mall_tc_entry) { 1473 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1474 return; 1475 } 1476 list_del(&mall_tc_entry->list); 1477 1478 switch (mall_tc_entry->type) { 1479 case MLXSW_SP_PORT_MALL_MIRROR: 1480 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1481 &mall_tc_entry->mirror); 1482 break; 1483 case MLXSW_SP_PORT_MALL_SAMPLE: 1484 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1485 break; 1486 default: 1487 WARN_ON(1); 1488 } 1489 1490 kfree(mall_tc_entry); 1491 } 1492 1493 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1494 struct tc_cls_matchall_offload *f, 1495 bool ingress) 1496 { 1497 switch (f->command) { 1498 case TC_CLSMATCHALL_REPLACE: 1499 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1500 ingress); 1501 case TC_CLSMATCHALL_DESTROY: 1502 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1503 return 0; 1504 default: 1505 return -EOPNOTSUPP; 1506 } 1507 } 1508 1509 static int 1510 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1511 struct flow_cls_offload *f) 1512 { 1513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1514 1515 switch (f->command) { 1516 case FLOW_CLS_REPLACE: 1517 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1518 case FLOW_CLS_DESTROY: 1519 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1520 return 0; 1521 case FLOW_CLS_STATS: 1522 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1523 case FLOW_CLS_TMPLT_CREATE: 1524 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1525 case FLOW_CLS_TMPLT_DESTROY: 1526 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1527 return 0; 1528 default: 1529 return -EOPNOTSUPP; 1530 } 1531 } 1532 1533 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1534 void *type_data, 1535 void *cb_priv, bool ingress) 1536 { 1537 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1538 1539 switch (type) { 1540 case TC_SETUP_CLSMATCHALL: 1541 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1542 type_data)) 1543 return -EOPNOTSUPP; 1544 1545 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1546 ingress); 1547 case TC_SETUP_CLSFLOWER: 1548 return 0; 1549 default: 1550 return -EOPNOTSUPP; 1551 } 1552 } 1553 1554 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1555 void *type_data, 1556 void *cb_priv) 1557 { 1558 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1559 cb_priv, true); 1560 } 1561 1562 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1563 void *type_data, 1564 void *cb_priv) 1565 { 1566 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1567 cb_priv, false); 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1571 void *type_data, void *cb_priv) 1572 { 1573 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1574 1575 switch (type) { 1576 case TC_SETUP_CLSMATCHALL: 1577 return 0; 1578 case TC_SETUP_CLSFLOWER: 1579 if (mlxsw_sp_acl_block_disabled(acl_block)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1583 default: 1584 return -EOPNOTSUPP; 1585 } 1586 } 1587 1588 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1589 { 1590 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1591 1592 mlxsw_sp_acl_block_destroy(acl_block); 1593 } 1594 1595 static LIST_HEAD(mlxsw_sp_block_cb_list); 1596 1597 static int 1598 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1599 struct flow_block_offload *f, bool ingress) 1600 { 1601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1602 struct mlxsw_sp_acl_block *acl_block; 1603 struct flow_block_cb *block_cb; 1604 bool register_block = false; 1605 int err; 1606 1607 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1608 mlxsw_sp); 1609 if (!block_cb) { 1610 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1611 if (!acl_block) 1612 return -ENOMEM; 1613 block_cb = flow_block_cb_alloc(f->net, 1614 mlxsw_sp_setup_tc_block_cb_flower, 1615 mlxsw_sp, acl_block, 1616 mlxsw_sp_tc_block_flower_release); 1617 if (IS_ERR(block_cb)) { 1618 mlxsw_sp_acl_block_destroy(acl_block); 1619 err = PTR_ERR(block_cb); 1620 goto err_cb_register; 1621 } 1622 register_block = true; 1623 } else { 1624 acl_block = flow_block_cb_priv(block_cb); 1625 } 1626 flow_block_cb_incref(block_cb); 1627 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1628 mlxsw_sp_port, ingress); 1629 if (err) 1630 goto err_block_bind; 1631 1632 if (ingress) 1633 mlxsw_sp_port->ing_acl_block = acl_block; 1634 else 1635 mlxsw_sp_port->eg_acl_block = acl_block; 1636 1637 if (register_block) { 1638 flow_block_cb_add(block_cb, f); 1639 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1640 } 1641 1642 return 0; 1643 1644 err_block_bind: 1645 if (!flow_block_cb_decref(block_cb)) 1646 flow_block_cb_free(block_cb); 1647 err_cb_register: 1648 return err; 1649 } 1650 1651 static void 1652 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1653 struct flow_block_offload *f, bool ingress) 1654 { 1655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1656 struct mlxsw_sp_acl_block *acl_block; 1657 struct flow_block_cb *block_cb; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1661 mlxsw_sp); 1662 if (!block_cb) 1663 return; 1664 1665 if (ingress) 1666 mlxsw_sp_port->ing_acl_block = NULL; 1667 else 1668 mlxsw_sp_port->eg_acl_block = NULL; 1669 1670 acl_block = flow_block_cb_priv(block_cb); 1671 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1672 mlxsw_sp_port, ingress); 1673 if (!err && !flow_block_cb_decref(block_cb)) { 1674 flow_block_cb_remove(block_cb, f); 1675 list_del(&block_cb->driver_list); 1676 } 1677 } 1678 1679 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1680 struct flow_block_offload *f) 1681 { 1682 struct flow_block_cb *block_cb; 1683 tc_setup_cb_t *cb; 1684 bool ingress; 1685 int err; 1686 1687 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1688 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1689 ingress = true; 1690 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1691 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1692 ingress = false; 1693 } else { 1694 return -EOPNOTSUPP; 1695 } 1696 1697 f->driver_block_list = &mlxsw_sp_block_cb_list; 1698 1699 switch (f->command) { 1700 case FLOW_BLOCK_BIND: 1701 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1702 &mlxsw_sp_block_cb_list)) 1703 return -EBUSY; 1704 1705 block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, 1706 mlxsw_sp_port, NULL); 1707 if (IS_ERR(block_cb)) 1708 return PTR_ERR(block_cb); 1709 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1710 ingress); 1711 if (err) { 1712 flow_block_cb_free(block_cb); 1713 return err; 1714 } 1715 flow_block_cb_add(block_cb, f); 1716 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1717 return 0; 1718 case FLOW_BLOCK_UNBIND: 1719 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1720 f, ingress); 1721 block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); 1722 if (!block_cb) 1723 return -ENOENT; 1724 1725 flow_block_cb_remove(block_cb, f); 1726 list_del(&block_cb->driver_list); 1727 return 0; 1728 default: 1729 return -EOPNOTSUPP; 1730 } 1731 } 1732 1733 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1734 void *type_data) 1735 { 1736 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1737 1738 switch (type) { 1739 case TC_SETUP_BLOCK: 1740 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1741 case TC_SETUP_QDISC_RED: 1742 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1743 case TC_SETUP_QDISC_PRIO: 1744 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1745 default: 1746 return -EOPNOTSUPP; 1747 } 1748 } 1749 1750 1751 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1752 { 1753 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1754 1755 if (!enable) { 1756 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1757 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1758 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1759 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1760 return -EINVAL; 1761 } 1762 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1763 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1764 } else { 1765 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1766 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1767 } 1768 return 0; 1769 } 1770 1771 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1772 { 1773 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1774 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1775 int err; 1776 1777 if (netif_running(dev)) 1778 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1779 1780 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1781 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1782 pplr_pl); 1783 1784 if (netif_running(dev)) 1785 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1786 1787 return err; 1788 } 1789 1790 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1791 1792 static int mlxsw_sp_handle_feature(struct net_device *dev, 1793 netdev_features_t wanted_features, 1794 netdev_features_t feature, 1795 mlxsw_sp_feature_handler feature_handler) 1796 { 1797 netdev_features_t changes = wanted_features ^ dev->features; 1798 bool enable = !!(wanted_features & feature); 1799 int err; 1800 1801 if (!(changes & feature)) 1802 return 0; 1803 1804 err = feature_handler(dev, enable); 1805 if (err) { 1806 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1807 enable ? "Enable" : "Disable", &feature, err); 1808 return err; 1809 } 1810 1811 if (enable) 1812 dev->features |= feature; 1813 else 1814 dev->features &= ~feature; 1815 1816 return 0; 1817 } 1818 static int mlxsw_sp_set_features(struct net_device *dev, 1819 netdev_features_t features) 1820 { 1821 netdev_features_t oper_features = dev->features; 1822 int err = 0; 1823 1824 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1825 mlxsw_sp_feature_hw_tc); 1826 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1827 mlxsw_sp_feature_loopback); 1828 1829 if (err) { 1830 dev->features = oper_features; 1831 return -EINVAL; 1832 } 1833 1834 return 0; 1835 } 1836 1837 static struct devlink_port * 1838 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1839 { 1840 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1842 1843 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1844 mlxsw_sp_port->local_port); 1845 } 1846 1847 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1848 struct ifreq *ifr) 1849 { 1850 struct hwtstamp_config config; 1851 int err; 1852 1853 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1854 return -EFAULT; 1855 1856 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1857 &config); 1858 if (err) 1859 return err; 1860 1861 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1862 return -EFAULT; 1863 1864 return 0; 1865 } 1866 1867 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1868 struct ifreq *ifr) 1869 { 1870 struct hwtstamp_config config; 1871 int err; 1872 1873 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1874 &config); 1875 if (err) 1876 return err; 1877 1878 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1879 return -EFAULT; 1880 1881 return 0; 1882 } 1883 1884 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1885 { 1886 struct hwtstamp_config config = {0}; 1887 1888 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1889 } 1890 1891 static int 1892 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1893 { 1894 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1895 1896 switch (cmd) { 1897 case SIOCSHWTSTAMP: 1898 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1899 case SIOCGHWTSTAMP: 1900 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1901 default: 1902 return -EOPNOTSUPP; 1903 } 1904 } 1905 1906 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1907 .ndo_open = mlxsw_sp_port_open, 1908 .ndo_stop = mlxsw_sp_port_stop, 1909 .ndo_start_xmit = mlxsw_sp_port_xmit, 1910 .ndo_setup_tc = mlxsw_sp_setup_tc, 1911 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1912 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1913 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1914 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1915 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1916 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1917 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1918 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1919 .ndo_set_features = mlxsw_sp_set_features, 1920 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1921 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1922 }; 1923 1924 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1925 struct ethtool_drvinfo *drvinfo) 1926 { 1927 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1928 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1929 1930 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1931 sizeof(drvinfo->driver)); 1932 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1933 sizeof(drvinfo->version)); 1934 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1935 "%d.%d.%d", 1936 mlxsw_sp->bus_info->fw_rev.major, 1937 mlxsw_sp->bus_info->fw_rev.minor, 1938 mlxsw_sp->bus_info->fw_rev.subminor); 1939 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1940 sizeof(drvinfo->bus_info)); 1941 } 1942 1943 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1944 struct ethtool_pauseparam *pause) 1945 { 1946 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1947 1948 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1949 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1950 } 1951 1952 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1953 struct ethtool_pauseparam *pause) 1954 { 1955 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1956 1957 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1958 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1959 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1960 1961 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1962 pfcc_pl); 1963 } 1964 1965 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1966 struct ethtool_pauseparam *pause) 1967 { 1968 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1969 bool pause_en = pause->tx_pause || pause->rx_pause; 1970 int err; 1971 1972 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1973 netdev_err(dev, "PFC already enabled on port\n"); 1974 return -EINVAL; 1975 } 1976 1977 if (pause->autoneg) { 1978 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1979 return -EINVAL; 1980 } 1981 1982 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1983 if (err) { 1984 netdev_err(dev, "Failed to configure port's headroom\n"); 1985 return err; 1986 } 1987 1988 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1989 if (err) { 1990 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1991 goto err_port_pause_configure; 1992 } 1993 1994 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1995 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1996 1997 return 0; 1998 1999 err_port_pause_configure: 2000 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2001 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2002 return err; 2003 } 2004 2005 struct mlxsw_sp_port_hw_stats { 2006 char str[ETH_GSTRING_LEN]; 2007 u64 (*getter)(const char *payload); 2008 bool cells_bytes; 2009 }; 2010 2011 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2012 { 2013 .str = "a_frames_transmitted_ok", 2014 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2015 }, 2016 { 2017 .str = "a_frames_received_ok", 2018 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2019 }, 2020 { 2021 .str = "a_frame_check_sequence_errors", 2022 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2023 }, 2024 { 2025 .str = "a_alignment_errors", 2026 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2027 }, 2028 { 2029 .str = "a_octets_transmitted_ok", 2030 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2031 }, 2032 { 2033 .str = "a_octets_received_ok", 2034 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2035 }, 2036 { 2037 .str = "a_multicast_frames_xmitted_ok", 2038 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2039 }, 2040 { 2041 .str = "a_broadcast_frames_xmitted_ok", 2042 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2043 }, 2044 { 2045 .str = "a_multicast_frames_received_ok", 2046 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2047 }, 2048 { 2049 .str = "a_broadcast_frames_received_ok", 2050 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2051 }, 2052 { 2053 .str = "a_in_range_length_errors", 2054 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2055 }, 2056 { 2057 .str = "a_out_of_range_length_field", 2058 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2059 }, 2060 { 2061 .str = "a_frame_too_long_errors", 2062 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2063 }, 2064 { 2065 .str = "a_symbol_error_during_carrier", 2066 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2067 }, 2068 { 2069 .str = "a_mac_control_frames_transmitted", 2070 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2071 }, 2072 { 2073 .str = "a_mac_control_frames_received", 2074 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2075 }, 2076 { 2077 .str = "a_unsupported_opcodes_received", 2078 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2079 }, 2080 { 2081 .str = "a_pause_mac_ctrl_frames_received", 2082 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2083 }, 2084 { 2085 .str = "a_pause_mac_ctrl_frames_xmitted", 2086 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2087 }, 2088 }; 2089 2090 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2091 2092 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2093 { 2094 .str = "if_in_discards", 2095 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2096 }, 2097 { 2098 .str = "if_out_discards", 2099 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2100 }, 2101 { 2102 .str = "if_out_errors", 2103 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2104 }, 2105 }; 2106 2107 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2108 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2109 2110 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2111 { 2112 .str = "ether_stats_undersize_pkts", 2113 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2114 }, 2115 { 2116 .str = "ether_stats_oversize_pkts", 2117 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2118 }, 2119 { 2120 .str = "ether_stats_fragments", 2121 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2122 }, 2123 { 2124 .str = "ether_pkts64octets", 2125 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2126 }, 2127 { 2128 .str = "ether_pkts65to127octets", 2129 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2130 }, 2131 { 2132 .str = "ether_pkts128to255octets", 2133 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2134 }, 2135 { 2136 .str = "ether_pkts256to511octets", 2137 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2138 }, 2139 { 2140 .str = "ether_pkts512to1023octets", 2141 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2142 }, 2143 { 2144 .str = "ether_pkts1024to1518octets", 2145 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2146 }, 2147 { 2148 .str = "ether_pkts1519to2047octets", 2149 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2150 }, 2151 { 2152 .str = "ether_pkts2048to4095octets", 2153 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2154 }, 2155 { 2156 .str = "ether_pkts4096to8191octets", 2157 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2158 }, 2159 { 2160 .str = "ether_pkts8192to10239octets", 2161 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2162 }, 2163 }; 2164 2165 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2166 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2167 2168 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2169 { 2170 .str = "dot3stats_fcs_errors", 2171 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2172 }, 2173 { 2174 .str = "dot3stats_symbol_errors", 2175 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2176 }, 2177 { 2178 .str = "dot3control_in_unknown_opcodes", 2179 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2180 }, 2181 { 2182 .str = "dot3in_pause_frames", 2183 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2184 }, 2185 }; 2186 2187 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2188 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2189 2190 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2191 { 2192 .str = "discard_ingress_general", 2193 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2194 }, 2195 { 2196 .str = "discard_ingress_policy_engine", 2197 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2198 }, 2199 { 2200 .str = "discard_ingress_vlan_membership", 2201 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2202 }, 2203 { 2204 .str = "discard_ingress_tag_frame_type", 2205 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2206 }, 2207 { 2208 .str = "discard_egress_vlan_membership", 2209 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2210 }, 2211 { 2212 .str = "discard_loopback_filter", 2213 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2214 }, 2215 { 2216 .str = "discard_egress_general", 2217 .getter = mlxsw_reg_ppcnt_egress_general_get, 2218 }, 2219 { 2220 .str = "discard_egress_hoq", 2221 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2222 }, 2223 { 2224 .str = "discard_egress_policy_engine", 2225 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2226 }, 2227 { 2228 .str = "discard_ingress_tx_link_down", 2229 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2230 }, 2231 { 2232 .str = "discard_egress_stp_filter", 2233 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2234 }, 2235 { 2236 .str = "discard_egress_sll", 2237 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2238 }, 2239 }; 2240 2241 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2242 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2243 2244 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2245 { 2246 .str = "rx_octets_prio", 2247 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2248 }, 2249 { 2250 .str = "rx_frames_prio", 2251 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2252 }, 2253 { 2254 .str = "tx_octets_prio", 2255 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2256 }, 2257 { 2258 .str = "tx_frames_prio", 2259 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2260 }, 2261 { 2262 .str = "rx_pause_prio", 2263 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2264 }, 2265 { 2266 .str = "rx_pause_duration_prio", 2267 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2268 }, 2269 { 2270 .str = "tx_pause_prio", 2271 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2272 }, 2273 { 2274 .str = "tx_pause_duration_prio", 2275 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2276 }, 2277 }; 2278 2279 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2280 2281 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2282 { 2283 .str = "tc_transmit_queue_tc", 2284 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2285 .cells_bytes = true, 2286 }, 2287 { 2288 .str = "tc_no_buffer_discard_uc_tc", 2289 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2290 }, 2291 }; 2292 2293 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2294 2295 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2296 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2297 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2298 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2299 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2300 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2301 IEEE_8021QAZ_MAX_TCS) + \ 2302 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2303 TC_MAX_QUEUE)) 2304 2305 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2306 { 2307 int i; 2308 2309 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2310 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2311 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2312 *p += ETH_GSTRING_LEN; 2313 } 2314 } 2315 2316 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2317 { 2318 int i; 2319 2320 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2321 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2322 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2323 *p += ETH_GSTRING_LEN; 2324 } 2325 } 2326 2327 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2328 u32 stringset, u8 *data) 2329 { 2330 u8 *p = data; 2331 int i; 2332 2333 switch (stringset) { 2334 case ETH_SS_STATS: 2335 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2336 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2337 ETH_GSTRING_LEN); 2338 p += ETH_GSTRING_LEN; 2339 } 2340 2341 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2342 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2343 ETH_GSTRING_LEN); 2344 p += ETH_GSTRING_LEN; 2345 } 2346 2347 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2348 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2349 ETH_GSTRING_LEN); 2350 p += ETH_GSTRING_LEN; 2351 } 2352 2353 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2354 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2355 ETH_GSTRING_LEN); 2356 p += ETH_GSTRING_LEN; 2357 } 2358 2359 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2360 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2361 ETH_GSTRING_LEN); 2362 p += ETH_GSTRING_LEN; 2363 } 2364 2365 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2366 mlxsw_sp_port_get_prio_strings(&p, i); 2367 2368 for (i = 0; i < TC_MAX_QUEUE; i++) 2369 mlxsw_sp_port_get_tc_strings(&p, i); 2370 2371 break; 2372 } 2373 } 2374 2375 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2376 enum ethtool_phys_id_state state) 2377 { 2378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2379 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2380 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2381 bool active; 2382 2383 switch (state) { 2384 case ETHTOOL_ID_ACTIVE: 2385 active = true; 2386 break; 2387 case ETHTOOL_ID_INACTIVE: 2388 active = false; 2389 break; 2390 default: 2391 return -EOPNOTSUPP; 2392 } 2393 2394 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2396 } 2397 2398 static int 2399 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2400 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2401 { 2402 switch (grp) { 2403 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2404 *p_hw_stats = mlxsw_sp_port_hw_stats; 2405 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2406 break; 2407 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2408 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2409 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2410 break; 2411 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2412 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2413 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2414 break; 2415 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2416 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2417 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2418 break; 2419 case MLXSW_REG_PPCNT_DISCARD_CNT: 2420 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2421 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2422 break; 2423 case MLXSW_REG_PPCNT_PRIO_CNT: 2424 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2425 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2426 break; 2427 case MLXSW_REG_PPCNT_TC_CNT: 2428 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2429 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2430 break; 2431 default: 2432 WARN_ON(1); 2433 return -EOPNOTSUPP; 2434 } 2435 return 0; 2436 } 2437 2438 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2439 enum mlxsw_reg_ppcnt_grp grp, int prio, 2440 u64 *data, int data_index) 2441 { 2442 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2444 struct mlxsw_sp_port_hw_stats *hw_stats; 2445 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2446 int i, len; 2447 int err; 2448 2449 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2450 if (err) 2451 return; 2452 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2453 for (i = 0; i < len; i++) { 2454 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2455 if (!hw_stats[i].cells_bytes) 2456 continue; 2457 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2458 data[data_index + i]); 2459 } 2460 } 2461 2462 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2463 struct ethtool_stats *stats, u64 *data) 2464 { 2465 int i, data_index = 0; 2466 2467 /* IEEE 802.3 Counters */ 2468 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2469 data, data_index); 2470 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2471 2472 /* RFC 2863 Counters */ 2473 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2474 data, data_index); 2475 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2476 2477 /* RFC 2819 Counters */ 2478 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2479 data, data_index); 2480 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2481 2482 /* RFC 3635 Counters */ 2483 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2484 data, data_index); 2485 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2486 2487 /* Discard Counters */ 2488 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2489 data, data_index); 2490 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2491 2492 /* Per-Priority Counters */ 2493 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2494 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2495 data, data_index); 2496 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2497 } 2498 2499 /* Per-TC Counters */ 2500 for (i = 0; i < TC_MAX_QUEUE; i++) { 2501 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2502 data, data_index); 2503 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2504 } 2505 } 2506 2507 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2508 { 2509 switch (sset) { 2510 case ETH_SS_STATS: 2511 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2512 default: 2513 return -EOPNOTSUPP; 2514 } 2515 } 2516 2517 struct mlxsw_sp1_port_link_mode { 2518 enum ethtool_link_mode_bit_indices mask_ethtool; 2519 u32 mask; 2520 u32 speed; 2521 }; 2522 2523 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2524 { 2525 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2526 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2527 .speed = SPEED_100, 2528 }, 2529 { 2530 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2531 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2532 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2533 .speed = SPEED_1000, 2534 }, 2535 { 2536 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2537 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2538 .speed = SPEED_10000, 2539 }, 2540 { 2541 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2542 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2543 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2544 .speed = SPEED_10000, 2545 }, 2546 { 2547 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2548 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2549 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2550 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2551 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2552 .speed = SPEED_10000, 2553 }, 2554 { 2555 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2556 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2557 .speed = SPEED_20000, 2558 }, 2559 { 2560 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2561 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2562 .speed = SPEED_40000, 2563 }, 2564 { 2565 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2566 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2567 .speed = SPEED_40000, 2568 }, 2569 { 2570 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2571 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2572 .speed = SPEED_40000, 2573 }, 2574 { 2575 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2576 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2577 .speed = SPEED_40000, 2578 }, 2579 { 2580 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2581 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2582 .speed = SPEED_25000, 2583 }, 2584 { 2585 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2586 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2587 .speed = SPEED_25000, 2588 }, 2589 { 2590 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2591 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2592 .speed = SPEED_25000, 2593 }, 2594 { 2595 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2596 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2597 .speed = SPEED_50000, 2598 }, 2599 { 2600 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2601 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2602 .speed = SPEED_50000, 2603 }, 2604 { 2605 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2606 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2607 .speed = SPEED_50000, 2608 }, 2609 { 2610 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2611 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2612 .speed = SPEED_56000, 2613 }, 2614 { 2615 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2616 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2617 .speed = SPEED_56000, 2618 }, 2619 { 2620 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2621 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2622 .speed = SPEED_56000, 2623 }, 2624 { 2625 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2626 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2627 .speed = SPEED_56000, 2628 }, 2629 { 2630 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2631 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2632 .speed = SPEED_100000, 2633 }, 2634 { 2635 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2636 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2637 .speed = SPEED_100000, 2638 }, 2639 { 2640 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2641 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2642 .speed = SPEED_100000, 2643 }, 2644 { 2645 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2646 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2647 .speed = SPEED_100000, 2648 }, 2649 }; 2650 2651 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2652 2653 static void 2654 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2655 u32 ptys_eth_proto, 2656 struct ethtool_link_ksettings *cmd) 2657 { 2658 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2659 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2660 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2661 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2662 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2663 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2664 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2665 2666 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2667 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2668 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2669 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2670 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2671 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2672 } 2673 2674 static void 2675 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2676 unsigned long *mode) 2677 { 2678 int i; 2679 2680 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2681 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2682 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2683 mode); 2684 } 2685 } 2686 2687 static u32 2688 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2689 { 2690 int i; 2691 2692 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2693 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2694 return mlxsw_sp1_port_link_mode[i].speed; 2695 } 2696 2697 return SPEED_UNKNOWN; 2698 } 2699 2700 static void 2701 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2702 u32 ptys_eth_proto, 2703 struct ethtool_link_ksettings *cmd) 2704 { 2705 cmd->base.speed = SPEED_UNKNOWN; 2706 cmd->base.duplex = DUPLEX_UNKNOWN; 2707 2708 if (!carrier_ok) 2709 return; 2710 2711 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2712 if (cmd->base.speed != SPEED_UNKNOWN) 2713 cmd->base.duplex = DUPLEX_FULL; 2714 } 2715 2716 static u32 2717 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2718 const struct ethtool_link_ksettings *cmd) 2719 { 2720 u32 ptys_proto = 0; 2721 int i; 2722 2723 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2724 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2725 cmd->link_modes.advertising)) 2726 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2727 } 2728 return ptys_proto; 2729 } 2730 2731 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2732 { 2733 u32 ptys_proto = 0; 2734 int i; 2735 2736 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2737 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2738 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2739 } 2740 return ptys_proto; 2741 } 2742 2743 static u32 2744 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2745 { 2746 u32 ptys_proto = 0; 2747 int i; 2748 2749 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2750 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2751 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2752 } 2753 return ptys_proto; 2754 } 2755 2756 static int 2757 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2758 u32 *base_speed) 2759 { 2760 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2761 return 0; 2762 } 2763 2764 static void 2765 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2766 u8 local_port, u32 proto_admin, bool autoneg) 2767 { 2768 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2769 } 2770 2771 static void 2772 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2773 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2774 u32 *p_eth_proto_oper) 2775 { 2776 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2777 p_eth_proto_oper); 2778 } 2779 2780 static const struct mlxsw_sp_port_type_speed_ops 2781 mlxsw_sp1_port_type_speed_ops = { 2782 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2783 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2784 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2785 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2786 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2787 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2788 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2789 .port_speed_base = mlxsw_sp1_port_speed_base, 2790 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2791 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2792 }; 2793 2794 static const enum ethtool_link_mode_bit_indices 2795 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2796 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2797 }; 2798 2799 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2800 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2801 2802 static const enum ethtool_link_mode_bit_indices 2803 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2804 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2805 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2806 }; 2807 2808 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2809 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2810 2811 static const enum ethtool_link_mode_bit_indices 2812 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2813 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2814 }; 2815 2816 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2817 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2818 2819 static const enum ethtool_link_mode_bit_indices 2820 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2821 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2822 }; 2823 2824 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2825 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2826 2827 static const enum ethtool_link_mode_bit_indices 2828 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2829 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2830 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2831 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2832 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2833 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2834 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2835 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2836 }; 2837 2838 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2839 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2840 2841 static const enum ethtool_link_mode_bit_indices 2842 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2843 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2844 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2845 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2846 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2847 }; 2848 2849 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2850 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2851 2852 static const enum ethtool_link_mode_bit_indices 2853 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2854 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2855 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2856 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2857 }; 2858 2859 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2860 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2861 2862 static const enum ethtool_link_mode_bit_indices 2863 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2864 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2865 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2866 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2867 }; 2868 2869 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2870 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2871 2872 static const enum ethtool_link_mode_bit_indices 2873 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2874 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2875 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2876 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2877 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2878 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2879 }; 2880 2881 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2882 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2883 2884 static const enum ethtool_link_mode_bit_indices 2885 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2886 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2887 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2888 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2889 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2890 }; 2891 2892 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2893 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2894 2895 static const enum ethtool_link_mode_bit_indices 2896 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2897 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2898 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2899 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2900 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2901 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2902 }; 2903 2904 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2905 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2906 2907 static const enum ethtool_link_mode_bit_indices 2908 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2909 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2910 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2911 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2912 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2913 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2914 }; 2915 2916 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2917 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2918 2919 struct mlxsw_sp2_port_link_mode { 2920 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2921 int m_ethtool_len; 2922 u32 mask; 2923 u32 speed; 2924 }; 2925 2926 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2927 { 2928 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2929 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2930 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2931 .speed = SPEED_100, 2932 }, 2933 { 2934 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2935 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2936 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2937 .speed = SPEED_1000, 2938 }, 2939 { 2940 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2941 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2942 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2943 .speed = SPEED_2500, 2944 }, 2945 { 2946 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2947 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2948 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2949 .speed = SPEED_5000, 2950 }, 2951 { 2952 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2953 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2954 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2955 .speed = SPEED_10000, 2956 }, 2957 { 2958 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2959 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2960 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2961 .speed = SPEED_40000, 2962 }, 2963 { 2964 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2965 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2966 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2967 .speed = SPEED_25000, 2968 }, 2969 { 2970 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2971 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2972 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2973 .speed = SPEED_50000, 2974 }, 2975 { 2976 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2977 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2978 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2979 .speed = SPEED_50000, 2980 }, 2981 { 2982 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2983 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2984 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2985 .speed = SPEED_100000, 2986 }, 2987 { 2988 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2989 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2990 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2991 .speed = SPEED_100000, 2992 }, 2993 { 2994 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2995 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2996 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2997 .speed = SPEED_200000, 2998 }, 2999 }; 3000 3001 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3002 3003 static void 3004 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3005 u32 ptys_eth_proto, 3006 struct ethtool_link_ksettings *cmd) 3007 { 3008 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3009 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3010 } 3011 3012 static void 3013 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3014 unsigned long *mode) 3015 { 3016 int i; 3017 3018 for (i = 0; i < link_mode->m_ethtool_len; i++) 3019 __set_bit(link_mode->mask_ethtool[i], mode); 3020 } 3021 3022 static void 3023 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3024 unsigned long *mode) 3025 { 3026 int i; 3027 3028 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3029 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3030 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3031 mode); 3032 } 3033 } 3034 3035 static u32 3036 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3037 { 3038 int i; 3039 3040 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3041 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3042 return mlxsw_sp2_port_link_mode[i].speed; 3043 } 3044 3045 return SPEED_UNKNOWN; 3046 } 3047 3048 static void 3049 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3050 u32 ptys_eth_proto, 3051 struct ethtool_link_ksettings *cmd) 3052 { 3053 cmd->base.speed = SPEED_UNKNOWN; 3054 cmd->base.duplex = DUPLEX_UNKNOWN; 3055 3056 if (!carrier_ok) 3057 return; 3058 3059 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3060 if (cmd->base.speed != SPEED_UNKNOWN) 3061 cmd->base.duplex = DUPLEX_FULL; 3062 } 3063 3064 static bool 3065 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3066 const unsigned long *mode) 3067 { 3068 int cnt = 0; 3069 int i; 3070 3071 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3072 if (test_bit(link_mode->mask_ethtool[i], mode)) 3073 cnt++; 3074 } 3075 3076 return cnt == link_mode->m_ethtool_len; 3077 } 3078 3079 static u32 3080 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3081 const struct ethtool_link_ksettings *cmd) 3082 { 3083 u32 ptys_proto = 0; 3084 int i; 3085 3086 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3087 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3088 cmd->link_modes.advertising)) 3089 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3090 } 3091 return ptys_proto; 3092 } 3093 3094 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3095 { 3096 u32 ptys_proto = 0; 3097 int i; 3098 3099 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3100 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3101 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3102 } 3103 return ptys_proto; 3104 } 3105 3106 static u32 3107 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3108 { 3109 u32 ptys_proto = 0; 3110 int i; 3111 3112 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3113 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3114 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3115 } 3116 return ptys_proto; 3117 } 3118 3119 static int 3120 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3121 u32 *base_speed) 3122 { 3123 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3124 u32 eth_proto_cap; 3125 int err; 3126 3127 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3128 * it from firmware. 3129 */ 3130 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3131 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3132 if (err) 3133 return err; 3134 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3135 3136 if (eth_proto_cap & 3137 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3138 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3139 return 0; 3140 } 3141 3142 if (eth_proto_cap & 3143 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3144 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3145 return 0; 3146 } 3147 3148 return -EIO; 3149 } 3150 3151 static void 3152 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3153 u8 local_port, u32 proto_admin, 3154 bool autoneg) 3155 { 3156 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3157 } 3158 3159 static void 3160 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3161 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3162 u32 *p_eth_proto_oper) 3163 { 3164 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3165 p_eth_proto_admin, p_eth_proto_oper); 3166 } 3167 3168 static const struct mlxsw_sp_port_type_speed_ops 3169 mlxsw_sp2_port_type_speed_ops = { 3170 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3171 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3172 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3173 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3174 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3175 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3176 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3177 .port_speed_base = mlxsw_sp2_port_speed_base, 3178 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3179 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3180 }; 3181 3182 static void 3183 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3184 struct ethtool_link_ksettings *cmd) 3185 { 3186 const struct mlxsw_sp_port_type_speed_ops *ops; 3187 3188 ops = mlxsw_sp->port_type_speed_ops; 3189 3190 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3191 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3192 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3193 3194 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3195 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3196 } 3197 3198 static void 3199 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3200 u32 eth_proto_admin, bool autoneg, 3201 struct ethtool_link_ksettings *cmd) 3202 { 3203 const struct mlxsw_sp_port_type_speed_ops *ops; 3204 3205 ops = mlxsw_sp->port_type_speed_ops; 3206 3207 if (!autoneg) 3208 return; 3209 3210 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3211 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3212 cmd->link_modes.advertising); 3213 } 3214 3215 static u8 3216 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3217 { 3218 switch (connector_type) { 3219 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3220 return PORT_OTHER; 3221 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3222 return PORT_NONE; 3223 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3224 return PORT_TP; 3225 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3226 return PORT_AUI; 3227 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3228 return PORT_BNC; 3229 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3230 return PORT_MII; 3231 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3232 return PORT_FIBRE; 3233 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3234 return PORT_DA; 3235 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3236 return PORT_OTHER; 3237 default: 3238 WARN_ON_ONCE(1); 3239 return PORT_OTHER; 3240 } 3241 } 3242 3243 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3244 struct ethtool_link_ksettings *cmd) 3245 { 3246 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3247 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3248 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3249 const struct mlxsw_sp_port_type_speed_ops *ops; 3250 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3251 u8 connector_type; 3252 bool autoneg; 3253 int err; 3254 3255 ops = mlxsw_sp->port_type_speed_ops; 3256 3257 autoneg = mlxsw_sp_port->link.autoneg; 3258 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3259 0, false); 3260 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3261 if (err) 3262 return err; 3263 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3264 ð_proto_admin, ð_proto_oper); 3265 3266 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3267 3268 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3269 cmd); 3270 3271 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3272 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3273 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3274 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3275 eth_proto_oper, cmd); 3276 3277 return 0; 3278 } 3279 3280 static int 3281 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3282 const struct ethtool_link_ksettings *cmd) 3283 { 3284 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3286 const struct mlxsw_sp_port_type_speed_ops *ops; 3287 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3288 u32 eth_proto_cap, eth_proto_new; 3289 bool autoneg; 3290 int err; 3291 3292 ops = mlxsw_sp->port_type_speed_ops; 3293 3294 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3295 0, false); 3296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3297 if (err) 3298 return err; 3299 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3300 3301 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3302 if (!autoneg && cmd->base.speed == SPEED_56000) { 3303 netdev_err(dev, "56G not supported with autoneg off\n"); 3304 return -EINVAL; 3305 } 3306 eth_proto_new = autoneg ? 3307 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3308 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3309 3310 eth_proto_new = eth_proto_new & eth_proto_cap; 3311 if (!eth_proto_new) { 3312 netdev_err(dev, "No supported speed requested\n"); 3313 return -EINVAL; 3314 } 3315 3316 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3317 eth_proto_new, autoneg); 3318 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3319 if (err) 3320 return err; 3321 3322 mlxsw_sp_port->link.autoneg = autoneg; 3323 3324 if (!netif_running(dev)) 3325 return 0; 3326 3327 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3328 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3329 3330 return 0; 3331 } 3332 3333 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3334 struct ethtool_modinfo *modinfo) 3335 { 3336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3338 int err; 3339 3340 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3341 mlxsw_sp_port->mapping.module, 3342 modinfo); 3343 3344 return err; 3345 } 3346 3347 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3348 struct ethtool_eeprom *ee, 3349 u8 *data) 3350 { 3351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3353 int err; 3354 3355 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3356 mlxsw_sp_port->mapping.module, ee, 3357 data); 3358 3359 return err; 3360 } 3361 3362 static int 3363 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3364 { 3365 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3366 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3367 3368 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3369 } 3370 3371 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3372 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3373 .get_link = ethtool_op_get_link, 3374 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3375 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3376 .get_strings = mlxsw_sp_port_get_strings, 3377 .set_phys_id = mlxsw_sp_port_set_phys_id, 3378 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3379 .get_sset_count = mlxsw_sp_port_get_sset_count, 3380 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3381 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3382 .get_module_info = mlxsw_sp_get_module_info, 3383 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3384 .get_ts_info = mlxsw_sp_get_ts_info, 3385 }; 3386 3387 static int 3388 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3389 { 3390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3391 const struct mlxsw_sp_port_type_speed_ops *ops; 3392 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3393 u32 eth_proto_admin; 3394 u32 upper_speed; 3395 u32 base_speed; 3396 int err; 3397 3398 ops = mlxsw_sp->port_type_speed_ops; 3399 3400 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3401 &base_speed); 3402 if (err) 3403 return err; 3404 upper_speed = base_speed * width; 3405 3406 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3407 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3408 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3409 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3410 } 3411 3412 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3413 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3414 bool dwrr, u8 dwrr_weight) 3415 { 3416 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3417 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3418 3419 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3420 next_index); 3421 mlxsw_reg_qeec_de_set(qeec_pl, true); 3422 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3423 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3425 } 3426 3427 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3428 enum mlxsw_reg_qeec_hr hr, u8 index, 3429 u8 next_index, u32 maxrate) 3430 { 3431 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3432 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3433 3434 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3435 next_index); 3436 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3437 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3438 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3439 } 3440 3441 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3442 enum mlxsw_reg_qeec_hr hr, u8 index, 3443 u8 next_index, u32 minrate) 3444 { 3445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3446 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3447 3448 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3449 next_index); 3450 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3451 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3452 3453 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3454 } 3455 3456 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3457 u8 switch_prio, u8 tclass) 3458 { 3459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3460 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3461 3462 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3463 tclass); 3464 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3465 } 3466 3467 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3468 { 3469 int err, i; 3470 3471 /* Setup the elements hierarcy, so that each TC is linked to 3472 * one subgroup, which are all member in the same group. 3473 */ 3474 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3475 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3476 0); 3477 if (err) 3478 return err; 3479 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3480 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3481 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3482 0, false, 0); 3483 if (err) 3484 return err; 3485 } 3486 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3487 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3488 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3489 false, 0); 3490 if (err) 3491 return err; 3492 3493 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3494 MLXSW_REG_QEEC_HIERARCY_TC, 3495 i + 8, i, 3496 true, 100); 3497 if (err) 3498 return err; 3499 } 3500 3501 /* Make sure the max shaper is disabled in all hierarchies that support 3502 * it. Note that this disables ptps (PTP shaper), but that is intended 3503 * for the initial configuration. 3504 */ 3505 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3506 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3507 MLXSW_REG_QEEC_MAS_DIS); 3508 if (err) 3509 return err; 3510 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3511 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3512 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3513 i, 0, 3514 MLXSW_REG_QEEC_MAS_DIS); 3515 if (err) 3516 return err; 3517 } 3518 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3519 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3520 MLXSW_REG_QEEC_HIERARCY_TC, 3521 i, i, 3522 MLXSW_REG_QEEC_MAS_DIS); 3523 if (err) 3524 return err; 3525 3526 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3527 MLXSW_REG_QEEC_HIERARCY_TC, 3528 i + 8, i, 3529 MLXSW_REG_QEEC_MAS_DIS); 3530 if (err) 3531 return err; 3532 } 3533 3534 /* Configure the min shaper for multicast TCs. */ 3535 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3536 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3537 MLXSW_REG_QEEC_HIERARCY_TC, 3538 i + 8, i, 3539 MLXSW_REG_QEEC_MIS_MIN); 3540 if (err) 3541 return err; 3542 } 3543 3544 /* Map all priorities to traffic class 0. */ 3545 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3546 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3547 if (err) 3548 return err; 3549 } 3550 3551 return 0; 3552 } 3553 3554 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3555 bool enable) 3556 { 3557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3558 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3559 3560 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3562 } 3563 3564 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3565 bool split, u8 module, u8 width, u8 lane) 3566 { 3567 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3568 struct mlxsw_sp_port *mlxsw_sp_port; 3569 struct net_device *dev; 3570 int err; 3571 3572 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3573 module + 1, split, lane / width, 3574 mlxsw_sp->base_mac, 3575 sizeof(mlxsw_sp->base_mac)); 3576 if (err) { 3577 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3578 local_port); 3579 return err; 3580 } 3581 3582 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3583 if (!dev) { 3584 err = -ENOMEM; 3585 goto err_alloc_etherdev; 3586 } 3587 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3588 mlxsw_sp_port = netdev_priv(dev); 3589 mlxsw_sp_port->dev = dev; 3590 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3591 mlxsw_sp_port->local_port = local_port; 3592 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3593 mlxsw_sp_port->split = split; 3594 mlxsw_sp_port->mapping.module = module; 3595 mlxsw_sp_port->mapping.width = width; 3596 mlxsw_sp_port->mapping.lane = lane; 3597 mlxsw_sp_port->link.autoneg = 1; 3598 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3599 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3600 3601 mlxsw_sp_port->pcpu_stats = 3602 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3603 if (!mlxsw_sp_port->pcpu_stats) { 3604 err = -ENOMEM; 3605 goto err_alloc_stats; 3606 } 3607 3608 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3609 GFP_KERNEL); 3610 if (!mlxsw_sp_port->sample) { 3611 err = -ENOMEM; 3612 goto err_alloc_sample; 3613 } 3614 3615 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3616 &update_stats_cache); 3617 3618 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3619 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3620 3621 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3622 if (err) { 3623 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3624 mlxsw_sp_port->local_port); 3625 goto err_port_module_map; 3626 } 3627 3628 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3629 if (err) { 3630 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3631 mlxsw_sp_port->local_port); 3632 goto err_port_swid_set; 3633 } 3634 3635 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3636 if (err) { 3637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3638 mlxsw_sp_port->local_port); 3639 goto err_dev_addr_init; 3640 } 3641 3642 netif_carrier_off(dev); 3643 3644 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3645 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3646 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3647 3648 dev->min_mtu = 0; 3649 dev->max_mtu = ETH_MAX_MTU; 3650 3651 /* Each packet needs to have a Tx header (metadata) on top all other 3652 * headers. 3653 */ 3654 dev->needed_headroom = MLXSW_TXHDR_LEN; 3655 3656 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3657 if (err) { 3658 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3659 mlxsw_sp_port->local_port); 3660 goto err_port_system_port_mapping_set; 3661 } 3662 3663 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3664 if (err) { 3665 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3666 mlxsw_sp_port->local_port); 3667 goto err_port_speed_by_width_set; 3668 } 3669 3670 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3671 if (err) { 3672 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3673 mlxsw_sp_port->local_port); 3674 goto err_port_mtu_set; 3675 } 3676 3677 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3678 if (err) 3679 goto err_port_admin_status_set; 3680 3681 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3682 if (err) { 3683 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3684 mlxsw_sp_port->local_port); 3685 goto err_port_buffers_init; 3686 } 3687 3688 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3689 if (err) { 3690 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3691 mlxsw_sp_port->local_port); 3692 goto err_port_ets_init; 3693 } 3694 3695 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3696 if (err) { 3697 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3698 mlxsw_sp_port->local_port); 3699 goto err_port_tc_mc_mode; 3700 } 3701 3702 /* ETS and buffers must be initialized before DCB. */ 3703 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3704 if (err) { 3705 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3706 mlxsw_sp_port->local_port); 3707 goto err_port_dcb_init; 3708 } 3709 3710 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3711 if (err) { 3712 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3713 mlxsw_sp_port->local_port); 3714 goto err_port_fids_init; 3715 } 3716 3717 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3718 if (err) { 3719 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3720 mlxsw_sp_port->local_port); 3721 goto err_port_qdiscs_init; 3722 } 3723 3724 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3725 if (err) { 3726 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3727 mlxsw_sp_port->local_port); 3728 goto err_port_nve_init; 3729 } 3730 3731 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3732 if (err) { 3733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3734 mlxsw_sp_port->local_port); 3735 goto err_port_pvid_set; 3736 } 3737 3738 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3739 MLXSW_SP_DEFAULT_VID); 3740 if (IS_ERR(mlxsw_sp_port_vlan)) { 3741 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3742 mlxsw_sp_port->local_port); 3743 err = PTR_ERR(mlxsw_sp_port_vlan); 3744 goto err_port_vlan_create; 3745 } 3746 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3747 3748 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3749 mlxsw_sp->ptp_ops->shaper_work); 3750 3751 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3752 err = register_netdev(dev); 3753 if (err) { 3754 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3755 mlxsw_sp_port->local_port); 3756 goto err_register_netdev; 3757 } 3758 3759 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3760 mlxsw_sp_port, dev); 3761 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3762 return 0; 3763 3764 err_register_netdev: 3765 mlxsw_sp->ports[local_port] = NULL; 3766 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3767 err_port_vlan_create: 3768 err_port_pvid_set: 3769 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3770 err_port_nve_init: 3771 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3772 err_port_qdiscs_init: 3773 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3774 err_port_fids_init: 3775 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3776 err_port_dcb_init: 3777 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3778 err_port_tc_mc_mode: 3779 err_port_ets_init: 3780 err_port_buffers_init: 3781 err_port_admin_status_set: 3782 err_port_mtu_set: 3783 err_port_speed_by_width_set: 3784 err_port_system_port_mapping_set: 3785 err_dev_addr_init: 3786 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3787 err_port_swid_set: 3788 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3789 err_port_module_map: 3790 kfree(mlxsw_sp_port->sample); 3791 err_alloc_sample: 3792 free_percpu(mlxsw_sp_port->pcpu_stats); 3793 err_alloc_stats: 3794 free_netdev(dev); 3795 err_alloc_etherdev: 3796 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3797 return err; 3798 } 3799 3800 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3801 { 3802 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3803 3804 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3805 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3806 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3807 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3808 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3809 mlxsw_sp->ports[local_port] = NULL; 3810 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3811 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3812 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3813 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3814 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3815 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3816 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3817 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3818 kfree(mlxsw_sp_port->sample); 3819 free_percpu(mlxsw_sp_port->pcpu_stats); 3820 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3821 free_netdev(mlxsw_sp_port->dev); 3822 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3823 } 3824 3825 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3826 { 3827 return mlxsw_sp->ports[local_port] != NULL; 3828 } 3829 3830 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3831 { 3832 int i; 3833 3834 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3835 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3836 mlxsw_sp_port_remove(mlxsw_sp, i); 3837 kfree(mlxsw_sp->port_to_module); 3838 kfree(mlxsw_sp->ports); 3839 } 3840 3841 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3842 { 3843 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3844 u8 module, width, lane; 3845 size_t alloc_size; 3846 int i; 3847 int err; 3848 3849 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3850 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3851 if (!mlxsw_sp->ports) 3852 return -ENOMEM; 3853 3854 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3855 GFP_KERNEL); 3856 if (!mlxsw_sp->port_to_module) { 3857 err = -ENOMEM; 3858 goto err_port_to_module_alloc; 3859 } 3860 3861 for (i = 1; i < max_ports; i++) { 3862 /* Mark as invalid */ 3863 mlxsw_sp->port_to_module[i] = -1; 3864 3865 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3866 &width, &lane); 3867 if (err) 3868 goto err_port_module_info_get; 3869 if (!width) 3870 continue; 3871 mlxsw_sp->port_to_module[i] = module; 3872 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3873 module, width, lane); 3874 if (err) 3875 goto err_port_create; 3876 } 3877 return 0; 3878 3879 err_port_create: 3880 err_port_module_info_get: 3881 for (i--; i >= 1; i--) 3882 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3883 mlxsw_sp_port_remove(mlxsw_sp, i); 3884 kfree(mlxsw_sp->port_to_module); 3885 err_port_to_module_alloc: 3886 kfree(mlxsw_sp->ports); 3887 return err; 3888 } 3889 3890 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3891 { 3892 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3893 3894 return local_port - offset; 3895 } 3896 3897 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3898 u8 module, unsigned int count, u8 offset) 3899 { 3900 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3901 int err, i; 3902 3903 for (i = 0; i < count; i++) { 3904 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3905 true, module, width, i * width); 3906 if (err) 3907 goto err_port_create; 3908 } 3909 3910 return 0; 3911 3912 err_port_create: 3913 for (i--; i >= 0; i--) 3914 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3915 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3916 return err; 3917 } 3918 3919 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3920 u8 base_port, unsigned int count) 3921 { 3922 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3923 int i; 3924 3925 /* Split by four means we need to re-create two ports, otherwise 3926 * only one. 3927 */ 3928 count = count / 2; 3929 3930 for (i = 0; i < count; i++) { 3931 local_port = base_port + i * 2; 3932 if (mlxsw_sp->port_to_module[local_port] < 0) 3933 continue; 3934 module = mlxsw_sp->port_to_module[local_port]; 3935 3936 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3937 width, 0); 3938 } 3939 } 3940 3941 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3942 unsigned int count, 3943 struct netlink_ext_ack *extack) 3944 { 3945 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3946 u8 local_ports_in_1x, local_ports_in_2x, offset; 3947 struct mlxsw_sp_port *mlxsw_sp_port; 3948 u8 module, cur_width, base_port; 3949 int i; 3950 int err; 3951 3952 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3953 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3954 return -EIO; 3955 3956 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3957 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3958 3959 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3960 if (!mlxsw_sp_port) { 3961 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3962 local_port); 3963 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3964 return -EINVAL; 3965 } 3966 3967 module = mlxsw_sp_port->mapping.module; 3968 cur_width = mlxsw_sp_port->mapping.width; 3969 3970 if (count != 2 && count != 4) { 3971 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3972 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3973 return -EINVAL; 3974 } 3975 3976 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3977 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3978 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3979 return -EINVAL; 3980 } 3981 3982 /* Make sure we have enough slave (even) ports for the split. */ 3983 if (count == 2) { 3984 offset = local_ports_in_2x; 3985 base_port = local_port; 3986 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3987 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3988 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3989 return -EINVAL; 3990 } 3991 } else { 3992 offset = local_ports_in_1x; 3993 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3994 if (mlxsw_sp->ports[base_port + 1] || 3995 mlxsw_sp->ports[base_port + 3]) { 3996 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3997 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3998 return -EINVAL; 3999 } 4000 } 4001 4002 for (i = 0; i < count; i++) 4003 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4004 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4005 4006 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4007 offset); 4008 if (err) { 4009 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4010 goto err_port_split_create; 4011 } 4012 4013 return 0; 4014 4015 err_port_split_create: 4016 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4017 return err; 4018 } 4019 4020 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4021 struct netlink_ext_ack *extack) 4022 { 4023 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4024 u8 local_ports_in_1x, local_ports_in_2x, offset; 4025 struct mlxsw_sp_port *mlxsw_sp_port; 4026 u8 cur_width, base_port; 4027 unsigned int count; 4028 int i; 4029 4030 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4031 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4032 return -EIO; 4033 4034 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4035 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4036 4037 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4038 if (!mlxsw_sp_port) { 4039 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4040 local_port); 4041 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4042 return -EINVAL; 4043 } 4044 4045 if (!mlxsw_sp_port->split) { 4046 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4047 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4048 return -EINVAL; 4049 } 4050 4051 cur_width = mlxsw_sp_port->mapping.width; 4052 count = cur_width == 1 ? 4 : 2; 4053 4054 if (count == 2) 4055 offset = local_ports_in_2x; 4056 else 4057 offset = local_ports_in_1x; 4058 4059 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4060 4061 /* Determine which ports to remove. */ 4062 if (count == 2 && local_port >= base_port + 2) 4063 base_port = base_port + 2; 4064 4065 for (i = 0; i < count; i++) 4066 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4067 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4068 4069 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4070 4071 return 0; 4072 } 4073 4074 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4075 char *pude_pl, void *priv) 4076 { 4077 struct mlxsw_sp *mlxsw_sp = priv; 4078 struct mlxsw_sp_port *mlxsw_sp_port; 4079 enum mlxsw_reg_pude_oper_status status; 4080 u8 local_port; 4081 4082 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4083 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4084 if (!mlxsw_sp_port) 4085 return; 4086 4087 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4088 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4089 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4090 netif_carrier_on(mlxsw_sp_port->dev); 4091 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4092 } else { 4093 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4094 netif_carrier_off(mlxsw_sp_port->dev); 4095 } 4096 } 4097 4098 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4099 char *mtpptr_pl, bool ingress) 4100 { 4101 u8 local_port; 4102 u8 num_rec; 4103 int i; 4104 4105 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4106 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4107 for (i = 0; i < num_rec; i++) { 4108 u8 domain_number; 4109 u8 message_type; 4110 u16 sequence_id; 4111 u64 timestamp; 4112 4113 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4114 &domain_number, &sequence_id, 4115 ×tamp); 4116 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4117 message_type, domain_number, 4118 sequence_id, timestamp); 4119 } 4120 } 4121 4122 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4123 char *mtpptr_pl, void *priv) 4124 { 4125 struct mlxsw_sp *mlxsw_sp = priv; 4126 4127 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4128 } 4129 4130 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4131 char *mtpptr_pl, void *priv) 4132 { 4133 struct mlxsw_sp *mlxsw_sp = priv; 4134 4135 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4136 } 4137 4138 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4139 u8 local_port, void *priv) 4140 { 4141 struct mlxsw_sp *mlxsw_sp = priv; 4142 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4143 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4144 4145 if (unlikely(!mlxsw_sp_port)) { 4146 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4147 local_port); 4148 return; 4149 } 4150 4151 skb->dev = mlxsw_sp_port->dev; 4152 4153 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4154 u64_stats_update_begin(&pcpu_stats->syncp); 4155 pcpu_stats->rx_packets++; 4156 pcpu_stats->rx_bytes += skb->len; 4157 u64_stats_update_end(&pcpu_stats->syncp); 4158 4159 skb->protocol = eth_type_trans(skb, skb->dev); 4160 netif_receive_skb(skb); 4161 } 4162 4163 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4164 void *priv) 4165 { 4166 skb->offload_fwd_mark = 1; 4167 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4168 } 4169 4170 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4171 u8 local_port, void *priv) 4172 { 4173 skb->offload_l3_fwd_mark = 1; 4174 skb->offload_fwd_mark = 1; 4175 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4176 } 4177 4178 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4179 void *priv) 4180 { 4181 struct mlxsw_sp *mlxsw_sp = priv; 4182 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4183 struct psample_group *psample_group; 4184 u32 size; 4185 4186 if (unlikely(!mlxsw_sp_port)) { 4187 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4188 local_port); 4189 goto out; 4190 } 4191 if (unlikely(!mlxsw_sp_port->sample)) { 4192 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4193 local_port); 4194 goto out; 4195 } 4196 4197 size = mlxsw_sp_port->sample->truncate ? 4198 mlxsw_sp_port->sample->trunc_size : skb->len; 4199 4200 rcu_read_lock(); 4201 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4202 if (!psample_group) 4203 goto out_unlock; 4204 psample_sample_packet(psample_group, skb, size, 4205 mlxsw_sp_port->dev->ifindex, 0, 4206 mlxsw_sp_port->sample->rate); 4207 out_unlock: 4208 rcu_read_unlock(); 4209 out: 4210 consume_skb(skb); 4211 } 4212 4213 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4214 void *priv) 4215 { 4216 struct mlxsw_sp *mlxsw_sp = priv; 4217 4218 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4219 } 4220 4221 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4222 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4223 _is_ctrl, SP_##_trap_group, DISCARD) 4224 4225 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4226 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4227 _is_ctrl, SP_##_trap_group, DISCARD) 4228 4229 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4230 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4231 _is_ctrl, SP_##_trap_group, DISCARD) 4232 4233 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4234 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4235 4236 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4237 /* Events */ 4238 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4239 /* L2 traps */ 4240 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4241 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4242 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4243 false, SP_LLDP, DISCARD), 4244 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4245 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4246 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4247 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4248 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4249 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4250 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4251 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4252 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4253 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4254 false), 4255 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4256 false), 4257 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4258 false), 4259 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4260 false), 4261 /* L3 traps */ 4262 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4263 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4264 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4265 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4266 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4267 false), 4268 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4269 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4270 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4271 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4272 false), 4273 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4274 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4275 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4276 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4277 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4278 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4279 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4280 false), 4281 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4282 false), 4283 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4284 false), 4285 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4286 false), 4287 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4288 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4289 false), 4290 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4291 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4292 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4293 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4294 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4295 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4296 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4297 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4298 /* PKT Sample trap */ 4299 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4300 false, SP_IP2ME, DISCARD), 4301 /* ACL trap */ 4302 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4303 /* Multicast Router Traps */ 4304 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4305 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4306 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4307 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4308 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4309 /* NVE traps */ 4310 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4311 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4312 /* PTP traps */ 4313 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4314 false, SP_PTP0, DISCARD), 4315 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4316 }; 4317 4318 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4319 /* Events */ 4320 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4321 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4322 }; 4323 4324 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4325 { 4326 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4327 enum mlxsw_reg_qpcr_ir_units ir_units; 4328 int max_cpu_policers; 4329 bool is_bytes; 4330 u8 burst_size; 4331 u32 rate; 4332 int i, err; 4333 4334 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4335 return -EIO; 4336 4337 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4338 4339 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4340 for (i = 0; i < max_cpu_policers; i++) { 4341 is_bytes = false; 4342 switch (i) { 4343 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4344 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4348 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4350 rate = 128; 4351 burst_size = 7; 4352 break; 4353 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4354 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4355 rate = 16 * 1024; 4356 burst_size = 10; 4357 break; 4358 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4359 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4360 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4361 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4362 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4363 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4364 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4365 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4366 rate = 1024; 4367 burst_size = 7; 4368 break; 4369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4370 rate = 1024; 4371 burst_size = 7; 4372 break; 4373 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4374 rate = 24 * 1024; 4375 burst_size = 12; 4376 break; 4377 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4378 rate = 19 * 1024; 4379 burst_size = 12; 4380 break; 4381 default: 4382 continue; 4383 } 4384 4385 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4386 burst_size); 4387 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4388 if (err) 4389 return err; 4390 } 4391 4392 return 0; 4393 } 4394 4395 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4396 { 4397 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4398 enum mlxsw_reg_htgt_trap_group i; 4399 int max_cpu_policers; 4400 int max_trap_groups; 4401 u8 priority, tc; 4402 u16 policer_id; 4403 int err; 4404 4405 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4406 return -EIO; 4407 4408 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4409 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4410 4411 for (i = 0; i < max_trap_groups; i++) { 4412 policer_id = i; 4413 switch (i) { 4414 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4415 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4417 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4418 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4419 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4420 priority = 5; 4421 tc = 5; 4422 break; 4423 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4424 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4425 priority = 4; 4426 tc = 4; 4427 break; 4428 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4429 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4431 priority = 3; 4432 tc = 3; 4433 break; 4434 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4435 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4436 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4438 priority = 2; 4439 tc = 2; 4440 break; 4441 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4446 priority = 1; 4447 tc = 1; 4448 break; 4449 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4450 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4451 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4452 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4453 break; 4454 default: 4455 continue; 4456 } 4457 4458 if (max_cpu_policers <= policer_id && 4459 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4460 return -EIO; 4461 4462 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4463 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4464 if (err) 4465 return err; 4466 } 4467 4468 return 0; 4469 } 4470 4471 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4472 const struct mlxsw_listener listeners[], 4473 size_t listeners_count) 4474 { 4475 int i; 4476 int err; 4477 4478 for (i = 0; i < listeners_count; i++) { 4479 err = mlxsw_core_trap_register(mlxsw_sp->core, 4480 &listeners[i], 4481 mlxsw_sp); 4482 if (err) 4483 goto err_listener_register; 4484 4485 } 4486 return 0; 4487 4488 err_listener_register: 4489 for (i--; i >= 0; i--) { 4490 mlxsw_core_trap_unregister(mlxsw_sp->core, 4491 &listeners[i], 4492 mlxsw_sp); 4493 } 4494 return err; 4495 } 4496 4497 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4498 const struct mlxsw_listener listeners[], 4499 size_t listeners_count) 4500 { 4501 int i; 4502 4503 for (i = 0; i < listeners_count; i++) { 4504 mlxsw_core_trap_unregister(mlxsw_sp->core, 4505 &listeners[i], 4506 mlxsw_sp); 4507 } 4508 } 4509 4510 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4511 { 4512 int err; 4513 4514 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4515 if (err) 4516 return err; 4517 4518 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4519 if (err) 4520 return err; 4521 4522 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4523 ARRAY_SIZE(mlxsw_sp_listener)); 4524 if (err) 4525 return err; 4526 4527 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4528 mlxsw_sp->listeners_count); 4529 if (err) 4530 goto err_extra_traps_init; 4531 4532 return 0; 4533 4534 err_extra_traps_init: 4535 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4536 ARRAY_SIZE(mlxsw_sp_listener)); 4537 return err; 4538 } 4539 4540 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4541 { 4542 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4543 mlxsw_sp->listeners_count); 4544 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4545 ARRAY_SIZE(mlxsw_sp_listener)); 4546 } 4547 4548 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4549 4550 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4551 { 4552 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4553 u32 seed; 4554 int err; 4555 4556 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4557 MLXSW_SP_LAG_SEED_INIT); 4558 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4559 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4560 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4561 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4562 MLXSW_REG_SLCR_LAG_HASH_SIP | 4563 MLXSW_REG_SLCR_LAG_HASH_DIP | 4564 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4565 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4566 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4567 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4568 if (err) 4569 return err; 4570 4571 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4572 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4573 return -EIO; 4574 4575 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4576 sizeof(struct mlxsw_sp_upper), 4577 GFP_KERNEL); 4578 if (!mlxsw_sp->lags) 4579 return -ENOMEM; 4580 4581 return 0; 4582 } 4583 4584 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4585 { 4586 kfree(mlxsw_sp->lags); 4587 } 4588 4589 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4590 { 4591 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4592 4593 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4594 MLXSW_REG_HTGT_INVALID_POLICER, 4595 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4596 MLXSW_REG_HTGT_DEFAULT_TC); 4597 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4598 } 4599 4600 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4601 .clock_init = mlxsw_sp1_ptp_clock_init, 4602 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4603 .init = mlxsw_sp1_ptp_init, 4604 .fini = mlxsw_sp1_ptp_fini, 4605 .receive = mlxsw_sp1_ptp_receive, 4606 .transmitted = mlxsw_sp1_ptp_transmitted, 4607 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4608 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4609 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4610 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4611 }; 4612 4613 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4614 .clock_init = mlxsw_sp2_ptp_clock_init, 4615 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4616 .init = mlxsw_sp2_ptp_init, 4617 .fini = mlxsw_sp2_ptp_fini, 4618 .receive = mlxsw_sp2_ptp_receive, 4619 .transmitted = mlxsw_sp2_ptp_transmitted, 4620 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4621 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4622 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4623 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4624 }; 4625 4626 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4627 unsigned long event, void *ptr); 4628 4629 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4630 const struct mlxsw_bus_info *mlxsw_bus_info) 4631 { 4632 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4633 int err; 4634 4635 mlxsw_sp->core = mlxsw_core; 4636 mlxsw_sp->bus_info = mlxsw_bus_info; 4637 4638 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4639 if (err) 4640 return err; 4641 4642 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4643 if (err) { 4644 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4645 return err; 4646 } 4647 4648 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4649 if (err) { 4650 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4651 return err; 4652 } 4653 4654 err = mlxsw_sp_fids_init(mlxsw_sp); 4655 if (err) { 4656 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4657 goto err_fids_init; 4658 } 4659 4660 err = mlxsw_sp_traps_init(mlxsw_sp); 4661 if (err) { 4662 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4663 goto err_traps_init; 4664 } 4665 4666 err = mlxsw_sp_buffers_init(mlxsw_sp); 4667 if (err) { 4668 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4669 goto err_buffers_init; 4670 } 4671 4672 err = mlxsw_sp_lag_init(mlxsw_sp); 4673 if (err) { 4674 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4675 goto err_lag_init; 4676 } 4677 4678 /* Initialize SPAN before router and switchdev, so that those components 4679 * can call mlxsw_sp_span_respin(). 4680 */ 4681 err = mlxsw_sp_span_init(mlxsw_sp); 4682 if (err) { 4683 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4684 goto err_span_init; 4685 } 4686 4687 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4688 if (err) { 4689 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4690 goto err_switchdev_init; 4691 } 4692 4693 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4694 if (err) { 4695 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4696 goto err_counter_pool_init; 4697 } 4698 4699 err = mlxsw_sp_afa_init(mlxsw_sp); 4700 if (err) { 4701 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4702 goto err_afa_init; 4703 } 4704 4705 err = mlxsw_sp_nve_init(mlxsw_sp); 4706 if (err) { 4707 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4708 goto err_nve_init; 4709 } 4710 4711 err = mlxsw_sp_acl_init(mlxsw_sp); 4712 if (err) { 4713 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4714 goto err_acl_init; 4715 } 4716 4717 err = mlxsw_sp_router_init(mlxsw_sp); 4718 if (err) { 4719 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4720 goto err_router_init; 4721 } 4722 4723 if (mlxsw_sp->bus_info->read_frc_capable) { 4724 /* NULL is a valid return value from clock_init */ 4725 mlxsw_sp->clock = 4726 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4727 mlxsw_sp->bus_info->dev); 4728 if (IS_ERR(mlxsw_sp->clock)) { 4729 err = PTR_ERR(mlxsw_sp->clock); 4730 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4731 goto err_ptp_clock_init; 4732 } 4733 } 4734 4735 if (mlxsw_sp->clock) { 4736 /* NULL is a valid return value from ptp_ops->init */ 4737 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4738 if (IS_ERR(mlxsw_sp->ptp_state)) { 4739 err = PTR_ERR(mlxsw_sp->ptp_state); 4740 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4741 goto err_ptp_init; 4742 } 4743 } 4744 4745 /* Initialize netdevice notifier after router and SPAN is initialized, 4746 * so that the event handler can use router structures and call SPAN 4747 * respin. 4748 */ 4749 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4750 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4751 if (err) { 4752 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4753 goto err_netdev_notifier; 4754 } 4755 4756 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4757 if (err) { 4758 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4759 goto err_dpipe_init; 4760 } 4761 4762 err = mlxsw_sp_ports_create(mlxsw_sp); 4763 if (err) { 4764 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4765 goto err_ports_create; 4766 } 4767 4768 return 0; 4769 4770 err_ports_create: 4771 mlxsw_sp_dpipe_fini(mlxsw_sp); 4772 err_dpipe_init: 4773 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4774 err_netdev_notifier: 4775 if (mlxsw_sp->clock) 4776 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4777 err_ptp_init: 4778 if (mlxsw_sp->clock) 4779 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4780 err_ptp_clock_init: 4781 mlxsw_sp_router_fini(mlxsw_sp); 4782 err_router_init: 4783 mlxsw_sp_acl_fini(mlxsw_sp); 4784 err_acl_init: 4785 mlxsw_sp_nve_fini(mlxsw_sp); 4786 err_nve_init: 4787 mlxsw_sp_afa_fini(mlxsw_sp); 4788 err_afa_init: 4789 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4790 err_counter_pool_init: 4791 mlxsw_sp_switchdev_fini(mlxsw_sp); 4792 err_switchdev_init: 4793 mlxsw_sp_span_fini(mlxsw_sp); 4794 err_span_init: 4795 mlxsw_sp_lag_fini(mlxsw_sp); 4796 err_lag_init: 4797 mlxsw_sp_buffers_fini(mlxsw_sp); 4798 err_buffers_init: 4799 mlxsw_sp_traps_fini(mlxsw_sp); 4800 err_traps_init: 4801 mlxsw_sp_fids_fini(mlxsw_sp); 4802 err_fids_init: 4803 mlxsw_sp_kvdl_fini(mlxsw_sp); 4804 return err; 4805 } 4806 4807 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4808 const struct mlxsw_bus_info *mlxsw_bus_info) 4809 { 4810 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4811 4812 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4813 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4814 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4815 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4816 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4817 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4818 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4819 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4820 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4821 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4822 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4823 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4824 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4825 mlxsw_sp->listeners = mlxsw_sp1_listener; 4826 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4827 4828 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4829 } 4830 4831 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4832 const struct mlxsw_bus_info *mlxsw_bus_info) 4833 { 4834 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4835 4836 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4837 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4838 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4839 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4840 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4841 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4842 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4843 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4844 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4845 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4846 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4847 4848 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4849 } 4850 4851 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4852 { 4853 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4854 4855 mlxsw_sp_ports_remove(mlxsw_sp); 4856 mlxsw_sp_dpipe_fini(mlxsw_sp); 4857 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4858 if (mlxsw_sp->clock) { 4859 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4860 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4861 } 4862 mlxsw_sp_router_fini(mlxsw_sp); 4863 mlxsw_sp_acl_fini(mlxsw_sp); 4864 mlxsw_sp_nve_fini(mlxsw_sp); 4865 mlxsw_sp_afa_fini(mlxsw_sp); 4866 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4867 mlxsw_sp_switchdev_fini(mlxsw_sp); 4868 mlxsw_sp_span_fini(mlxsw_sp); 4869 mlxsw_sp_lag_fini(mlxsw_sp); 4870 mlxsw_sp_buffers_fini(mlxsw_sp); 4871 mlxsw_sp_traps_fini(mlxsw_sp); 4872 mlxsw_sp_fids_fini(mlxsw_sp); 4873 mlxsw_sp_kvdl_fini(mlxsw_sp); 4874 } 4875 4876 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4877 * 802.1Q FIDs 4878 */ 4879 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4880 VLAN_VID_MASK - 1) 4881 4882 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4883 .used_max_mid = 1, 4884 .max_mid = MLXSW_SP_MID_MAX, 4885 .used_flood_tables = 1, 4886 .used_flood_mode = 1, 4887 .flood_mode = 3, 4888 .max_fid_flood_tables = 3, 4889 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4890 .used_max_ib_mc = 1, 4891 .max_ib_mc = 0, 4892 .used_max_pkey = 1, 4893 .max_pkey = 0, 4894 .used_kvd_sizes = 1, 4895 .kvd_hash_single_parts = 59, 4896 .kvd_hash_double_parts = 41, 4897 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4898 .swid_config = { 4899 { 4900 .used_type = 1, 4901 .type = MLXSW_PORT_SWID_TYPE_ETH, 4902 } 4903 }, 4904 }; 4905 4906 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4907 .used_max_mid = 1, 4908 .max_mid = MLXSW_SP_MID_MAX, 4909 .used_flood_tables = 1, 4910 .used_flood_mode = 1, 4911 .flood_mode = 3, 4912 .max_fid_flood_tables = 3, 4913 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4914 .used_max_ib_mc = 1, 4915 .max_ib_mc = 0, 4916 .used_max_pkey = 1, 4917 .max_pkey = 0, 4918 .swid_config = { 4919 { 4920 .used_type = 1, 4921 .type = MLXSW_PORT_SWID_TYPE_ETH, 4922 } 4923 }, 4924 }; 4925 4926 static void 4927 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4928 struct devlink_resource_size_params *kvd_size_params, 4929 struct devlink_resource_size_params *linear_size_params, 4930 struct devlink_resource_size_params *hash_double_size_params, 4931 struct devlink_resource_size_params *hash_single_size_params) 4932 { 4933 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4934 KVD_SINGLE_MIN_SIZE); 4935 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4936 KVD_DOUBLE_MIN_SIZE); 4937 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4938 u32 linear_size_min = 0; 4939 4940 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4941 MLXSW_SP_KVD_GRANULARITY, 4942 DEVLINK_RESOURCE_UNIT_ENTRY); 4943 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4944 kvd_size - single_size_min - 4945 double_size_min, 4946 MLXSW_SP_KVD_GRANULARITY, 4947 DEVLINK_RESOURCE_UNIT_ENTRY); 4948 devlink_resource_size_params_init(hash_double_size_params, 4949 double_size_min, 4950 kvd_size - single_size_min - 4951 linear_size_min, 4952 MLXSW_SP_KVD_GRANULARITY, 4953 DEVLINK_RESOURCE_UNIT_ENTRY); 4954 devlink_resource_size_params_init(hash_single_size_params, 4955 single_size_min, 4956 kvd_size - double_size_min - 4957 linear_size_min, 4958 MLXSW_SP_KVD_GRANULARITY, 4959 DEVLINK_RESOURCE_UNIT_ENTRY); 4960 } 4961 4962 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4963 { 4964 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4965 struct devlink_resource_size_params hash_single_size_params; 4966 struct devlink_resource_size_params hash_double_size_params; 4967 struct devlink_resource_size_params linear_size_params; 4968 struct devlink_resource_size_params kvd_size_params; 4969 u32 kvd_size, single_size, double_size, linear_size; 4970 const struct mlxsw_config_profile *profile; 4971 int err; 4972 4973 profile = &mlxsw_sp1_config_profile; 4974 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4975 return -EIO; 4976 4977 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4978 &linear_size_params, 4979 &hash_double_size_params, 4980 &hash_single_size_params); 4981 4982 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4983 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4984 kvd_size, MLXSW_SP_RESOURCE_KVD, 4985 DEVLINK_RESOURCE_ID_PARENT_TOP, 4986 &kvd_size_params); 4987 if (err) 4988 return err; 4989 4990 linear_size = profile->kvd_linear_size; 4991 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4992 linear_size, 4993 MLXSW_SP_RESOURCE_KVD_LINEAR, 4994 MLXSW_SP_RESOURCE_KVD, 4995 &linear_size_params); 4996 if (err) 4997 return err; 4998 4999 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5000 if (err) 5001 return err; 5002 5003 double_size = kvd_size - linear_size; 5004 double_size *= profile->kvd_hash_double_parts; 5005 double_size /= profile->kvd_hash_double_parts + 5006 profile->kvd_hash_single_parts; 5007 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5008 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5009 double_size, 5010 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5011 MLXSW_SP_RESOURCE_KVD, 5012 &hash_double_size_params); 5013 if (err) 5014 return err; 5015 5016 single_size = kvd_size - double_size - linear_size; 5017 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5018 single_size, 5019 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5020 MLXSW_SP_RESOURCE_KVD, 5021 &hash_single_size_params); 5022 if (err) 5023 return err; 5024 5025 return 0; 5026 } 5027 5028 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5029 { 5030 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5031 } 5032 5033 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5034 { 5035 return 0; 5036 } 5037 5038 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5039 const struct mlxsw_config_profile *profile, 5040 u64 *p_single_size, u64 *p_double_size, 5041 u64 *p_linear_size) 5042 { 5043 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5044 u32 double_size; 5045 int err; 5046 5047 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5048 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5049 return -EIO; 5050 5051 /* The hash part is what left of the kvd without the 5052 * linear part. It is split to the single size and 5053 * double size by the parts ratio from the profile. 5054 * Both sizes must be a multiplications of the 5055 * granularity from the profile. In case the user 5056 * provided the sizes they are obtained via devlink. 5057 */ 5058 err = devlink_resource_size_get(devlink, 5059 MLXSW_SP_RESOURCE_KVD_LINEAR, 5060 p_linear_size); 5061 if (err) 5062 *p_linear_size = profile->kvd_linear_size; 5063 5064 err = devlink_resource_size_get(devlink, 5065 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5066 p_double_size); 5067 if (err) { 5068 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5069 *p_linear_size; 5070 double_size *= profile->kvd_hash_double_parts; 5071 double_size /= profile->kvd_hash_double_parts + 5072 profile->kvd_hash_single_parts; 5073 *p_double_size = rounddown(double_size, 5074 MLXSW_SP_KVD_GRANULARITY); 5075 } 5076 5077 err = devlink_resource_size_get(devlink, 5078 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5079 p_single_size); 5080 if (err) 5081 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5082 *p_double_size - *p_linear_size; 5083 5084 /* Check results are legal. */ 5085 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5086 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5087 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5088 return -EIO; 5089 5090 return 0; 5091 } 5092 5093 static int 5094 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5095 union devlink_param_value val, 5096 struct netlink_ext_ack *extack) 5097 { 5098 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5099 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5100 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5101 return -EINVAL; 5102 } 5103 5104 return 0; 5105 } 5106 5107 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5108 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5109 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5110 NULL, NULL, 5111 mlxsw_sp_devlink_param_fw_load_policy_validate), 5112 }; 5113 5114 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5115 { 5116 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5117 union devlink_param_value value; 5118 int err; 5119 5120 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5121 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5122 if (err) 5123 return err; 5124 5125 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5126 devlink_param_driverinit_value_set(devlink, 5127 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5128 value); 5129 return 0; 5130 } 5131 5132 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5133 { 5134 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5135 mlxsw_sp_devlink_params, 5136 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5137 } 5138 5139 static int 5140 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5141 struct devlink_param_gset_ctx *ctx) 5142 { 5143 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5144 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5145 5146 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5147 return 0; 5148 } 5149 5150 static int 5151 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5152 struct devlink_param_gset_ctx *ctx) 5153 { 5154 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5155 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5156 5157 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5158 } 5159 5160 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5161 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5162 "acl_region_rehash_interval", 5163 DEVLINK_PARAM_TYPE_U32, 5164 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5165 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5166 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5167 NULL), 5168 }; 5169 5170 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5171 { 5172 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5173 union devlink_param_value value; 5174 int err; 5175 5176 err = mlxsw_sp_params_register(mlxsw_core); 5177 if (err) 5178 return err; 5179 5180 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5181 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5182 if (err) 5183 goto err_devlink_params_register; 5184 5185 value.vu32 = 0; 5186 devlink_param_driverinit_value_set(devlink, 5187 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5188 value); 5189 return 0; 5190 5191 err_devlink_params_register: 5192 mlxsw_sp_params_unregister(mlxsw_core); 5193 return err; 5194 } 5195 5196 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5197 { 5198 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5199 mlxsw_sp2_devlink_params, 5200 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5201 mlxsw_sp_params_unregister(mlxsw_core); 5202 } 5203 5204 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5205 struct sk_buff *skb, u8 local_port) 5206 { 5207 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5208 5209 skb_pull(skb, MLXSW_TXHDR_LEN); 5210 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5211 } 5212 5213 static struct mlxsw_driver mlxsw_sp1_driver = { 5214 .kind = mlxsw_sp1_driver_name, 5215 .priv_size = sizeof(struct mlxsw_sp), 5216 .init = mlxsw_sp1_init, 5217 .fini = mlxsw_sp_fini, 5218 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5219 .port_split = mlxsw_sp_port_split, 5220 .port_unsplit = mlxsw_sp_port_unsplit, 5221 .sb_pool_get = mlxsw_sp_sb_pool_get, 5222 .sb_pool_set = mlxsw_sp_sb_pool_set, 5223 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5224 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5225 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5226 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5227 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5228 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5229 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5230 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5231 .flash_update = mlxsw_sp_flash_update, 5232 .txhdr_construct = mlxsw_sp_txhdr_construct, 5233 .resources_register = mlxsw_sp1_resources_register, 5234 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5235 .params_register = mlxsw_sp_params_register, 5236 .params_unregister = mlxsw_sp_params_unregister, 5237 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5238 .txhdr_len = MLXSW_TXHDR_LEN, 5239 .profile = &mlxsw_sp1_config_profile, 5240 .res_query_enabled = true, 5241 }; 5242 5243 static struct mlxsw_driver mlxsw_sp2_driver = { 5244 .kind = mlxsw_sp2_driver_name, 5245 .priv_size = sizeof(struct mlxsw_sp), 5246 .init = mlxsw_sp2_init, 5247 .fini = mlxsw_sp_fini, 5248 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5249 .port_split = mlxsw_sp_port_split, 5250 .port_unsplit = mlxsw_sp_port_unsplit, 5251 .sb_pool_get = mlxsw_sp_sb_pool_get, 5252 .sb_pool_set = mlxsw_sp_sb_pool_set, 5253 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5254 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5255 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5256 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5257 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5258 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5259 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5260 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5261 .flash_update = mlxsw_sp_flash_update, 5262 .txhdr_construct = mlxsw_sp_txhdr_construct, 5263 .resources_register = mlxsw_sp2_resources_register, 5264 .params_register = mlxsw_sp2_params_register, 5265 .params_unregister = mlxsw_sp2_params_unregister, 5266 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5267 .txhdr_len = MLXSW_TXHDR_LEN, 5268 .profile = &mlxsw_sp2_config_profile, 5269 .res_query_enabled = true, 5270 }; 5271 5272 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5273 { 5274 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5275 } 5276 5277 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5278 { 5279 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5280 int ret = 0; 5281 5282 if (mlxsw_sp_port_dev_check(lower_dev)) { 5283 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5284 ret = 1; 5285 } 5286 5287 return ret; 5288 } 5289 5290 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5291 { 5292 struct mlxsw_sp_port *mlxsw_sp_port; 5293 5294 if (mlxsw_sp_port_dev_check(dev)) 5295 return netdev_priv(dev); 5296 5297 mlxsw_sp_port = NULL; 5298 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5299 5300 return mlxsw_sp_port; 5301 } 5302 5303 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5304 { 5305 struct mlxsw_sp_port *mlxsw_sp_port; 5306 5307 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5308 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5309 } 5310 5311 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5312 { 5313 struct mlxsw_sp_port *mlxsw_sp_port; 5314 5315 if (mlxsw_sp_port_dev_check(dev)) 5316 return netdev_priv(dev); 5317 5318 mlxsw_sp_port = NULL; 5319 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5320 &mlxsw_sp_port); 5321 5322 return mlxsw_sp_port; 5323 } 5324 5325 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5326 { 5327 struct mlxsw_sp_port *mlxsw_sp_port; 5328 5329 rcu_read_lock(); 5330 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5331 if (mlxsw_sp_port) 5332 dev_hold(mlxsw_sp_port->dev); 5333 rcu_read_unlock(); 5334 return mlxsw_sp_port; 5335 } 5336 5337 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5338 { 5339 dev_put(mlxsw_sp_port->dev); 5340 } 5341 5342 static void 5343 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5344 struct net_device *lag_dev) 5345 { 5346 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5347 struct net_device *upper_dev; 5348 struct list_head *iter; 5349 5350 if (netif_is_bridge_port(lag_dev)) 5351 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5352 5353 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5354 if (!netif_is_bridge_port(upper_dev)) 5355 continue; 5356 br_dev = netdev_master_upper_dev_get(upper_dev); 5357 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5358 } 5359 } 5360 5361 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5362 { 5363 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5364 5365 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5366 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5367 } 5368 5369 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5370 { 5371 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5372 5373 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5374 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5375 } 5376 5377 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5378 u16 lag_id, u8 port_index) 5379 { 5380 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5381 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5382 5383 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5384 lag_id, port_index); 5385 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5386 } 5387 5388 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5389 u16 lag_id) 5390 { 5391 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5392 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5393 5394 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5395 lag_id); 5396 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5397 } 5398 5399 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5400 u16 lag_id) 5401 { 5402 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5403 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5404 5405 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5406 lag_id); 5407 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5408 } 5409 5410 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5411 u16 lag_id) 5412 { 5413 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5414 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5415 5416 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5417 lag_id); 5418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5419 } 5420 5421 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5422 struct net_device *lag_dev, 5423 u16 *p_lag_id) 5424 { 5425 struct mlxsw_sp_upper *lag; 5426 int free_lag_id = -1; 5427 u64 max_lag; 5428 int i; 5429 5430 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5431 for (i = 0; i < max_lag; i++) { 5432 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5433 if (lag->ref_count) { 5434 if (lag->dev == lag_dev) { 5435 *p_lag_id = i; 5436 return 0; 5437 } 5438 } else if (free_lag_id < 0) { 5439 free_lag_id = i; 5440 } 5441 } 5442 if (free_lag_id < 0) 5443 return -EBUSY; 5444 *p_lag_id = free_lag_id; 5445 return 0; 5446 } 5447 5448 static bool 5449 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5450 struct net_device *lag_dev, 5451 struct netdev_lag_upper_info *lag_upper_info, 5452 struct netlink_ext_ack *extack) 5453 { 5454 u16 lag_id; 5455 5456 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5457 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5458 return false; 5459 } 5460 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5461 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5462 return false; 5463 } 5464 return true; 5465 } 5466 5467 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5468 u16 lag_id, u8 *p_port_index) 5469 { 5470 u64 max_lag_members; 5471 int i; 5472 5473 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5474 MAX_LAG_MEMBERS); 5475 for (i = 0; i < max_lag_members; i++) { 5476 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5477 *p_port_index = i; 5478 return 0; 5479 } 5480 } 5481 return -EBUSY; 5482 } 5483 5484 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5485 struct net_device *lag_dev) 5486 { 5487 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5488 struct mlxsw_sp_upper *lag; 5489 u16 lag_id; 5490 u8 port_index; 5491 int err; 5492 5493 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5494 if (err) 5495 return err; 5496 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5497 if (!lag->ref_count) { 5498 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5499 if (err) 5500 return err; 5501 lag->dev = lag_dev; 5502 } 5503 5504 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5505 if (err) 5506 return err; 5507 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5508 if (err) 5509 goto err_col_port_add; 5510 5511 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5512 mlxsw_sp_port->local_port); 5513 mlxsw_sp_port->lag_id = lag_id; 5514 mlxsw_sp_port->lagged = 1; 5515 lag->ref_count++; 5516 5517 /* Port is no longer usable as a router interface */ 5518 if (mlxsw_sp_port->default_vlan->fid) 5519 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5520 5521 return 0; 5522 5523 err_col_port_add: 5524 if (!lag->ref_count) 5525 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5526 return err; 5527 } 5528 5529 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5530 struct net_device *lag_dev) 5531 { 5532 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5533 u16 lag_id = mlxsw_sp_port->lag_id; 5534 struct mlxsw_sp_upper *lag; 5535 5536 if (!mlxsw_sp_port->lagged) 5537 return; 5538 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5539 WARN_ON(lag->ref_count == 0); 5540 5541 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5542 5543 /* Any VLANs configured on the port are no longer valid */ 5544 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5545 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5546 /* Make the LAG and its directly linked uppers leave bridges they 5547 * are memeber in 5548 */ 5549 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5550 5551 if (lag->ref_count == 1) 5552 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5553 5554 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5555 mlxsw_sp_port->local_port); 5556 mlxsw_sp_port->lagged = 0; 5557 lag->ref_count--; 5558 5559 /* Make sure untagged frames are allowed to ingress */ 5560 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5561 } 5562 5563 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5564 u16 lag_id) 5565 { 5566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5567 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5568 5569 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5570 mlxsw_sp_port->local_port); 5571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5572 } 5573 5574 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5575 u16 lag_id) 5576 { 5577 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5578 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5579 5580 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5581 mlxsw_sp_port->local_port); 5582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5583 } 5584 5585 static int 5586 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5587 { 5588 int err; 5589 5590 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5591 mlxsw_sp_port->lag_id); 5592 if (err) 5593 return err; 5594 5595 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5596 if (err) 5597 goto err_dist_port_add; 5598 5599 return 0; 5600 5601 err_dist_port_add: 5602 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5603 return err; 5604 } 5605 5606 static int 5607 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5608 { 5609 int err; 5610 5611 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5612 mlxsw_sp_port->lag_id); 5613 if (err) 5614 return err; 5615 5616 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5617 mlxsw_sp_port->lag_id); 5618 if (err) 5619 goto err_col_port_disable; 5620 5621 return 0; 5622 5623 err_col_port_disable: 5624 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5625 return err; 5626 } 5627 5628 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5629 struct netdev_lag_lower_state_info *info) 5630 { 5631 if (info->tx_enabled) 5632 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5633 else 5634 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5635 } 5636 5637 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5638 bool enable) 5639 { 5640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5641 enum mlxsw_reg_spms_state spms_state; 5642 char *spms_pl; 5643 u16 vid; 5644 int err; 5645 5646 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5647 MLXSW_REG_SPMS_STATE_DISCARDING; 5648 5649 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5650 if (!spms_pl) 5651 return -ENOMEM; 5652 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5653 5654 for (vid = 0; vid < VLAN_N_VID; vid++) 5655 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5656 5657 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5658 kfree(spms_pl); 5659 return err; 5660 } 5661 5662 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5663 { 5664 u16 vid = 1; 5665 int err; 5666 5667 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5668 if (err) 5669 return err; 5670 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5671 if (err) 5672 goto err_port_stp_set; 5673 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5674 true, false); 5675 if (err) 5676 goto err_port_vlan_set; 5677 5678 for (; vid <= VLAN_N_VID - 1; vid++) { 5679 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5680 vid, false); 5681 if (err) 5682 goto err_vid_learning_set; 5683 } 5684 5685 return 0; 5686 5687 err_vid_learning_set: 5688 for (vid--; vid >= 1; vid--) 5689 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5690 err_port_vlan_set: 5691 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5692 err_port_stp_set: 5693 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5694 return err; 5695 } 5696 5697 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5698 { 5699 u16 vid; 5700 5701 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5702 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5703 vid, true); 5704 5705 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5706 false, false); 5707 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5708 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5709 } 5710 5711 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5712 { 5713 unsigned int num_vxlans = 0; 5714 struct net_device *dev; 5715 struct list_head *iter; 5716 5717 netdev_for_each_lower_dev(br_dev, dev, iter) { 5718 if (netif_is_vxlan(dev)) 5719 num_vxlans++; 5720 } 5721 5722 return num_vxlans > 1; 5723 } 5724 5725 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5726 { 5727 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5728 struct net_device *dev; 5729 struct list_head *iter; 5730 5731 netdev_for_each_lower_dev(br_dev, dev, iter) { 5732 u16 pvid; 5733 int err; 5734 5735 if (!netif_is_vxlan(dev)) 5736 continue; 5737 5738 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5739 if (err || !pvid) 5740 continue; 5741 5742 if (test_and_set_bit(pvid, vlans)) 5743 return false; 5744 } 5745 5746 return true; 5747 } 5748 5749 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5750 struct netlink_ext_ack *extack) 5751 { 5752 if (br_multicast_enabled(br_dev)) { 5753 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5754 return false; 5755 } 5756 5757 if (!br_vlan_enabled(br_dev) && 5758 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5759 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5760 return false; 5761 } 5762 5763 if (br_vlan_enabled(br_dev) && 5764 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5765 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5766 return false; 5767 } 5768 5769 return true; 5770 } 5771 5772 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5773 struct net_device *dev, 5774 unsigned long event, void *ptr) 5775 { 5776 struct netdev_notifier_changeupper_info *info; 5777 struct mlxsw_sp_port *mlxsw_sp_port; 5778 struct netlink_ext_ack *extack; 5779 struct net_device *upper_dev; 5780 struct mlxsw_sp *mlxsw_sp; 5781 int err = 0; 5782 5783 mlxsw_sp_port = netdev_priv(dev); 5784 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5785 info = ptr; 5786 extack = netdev_notifier_info_to_extack(&info->info); 5787 5788 switch (event) { 5789 case NETDEV_PRECHANGEUPPER: 5790 upper_dev = info->upper_dev; 5791 if (!is_vlan_dev(upper_dev) && 5792 !netif_is_lag_master(upper_dev) && 5793 !netif_is_bridge_master(upper_dev) && 5794 !netif_is_ovs_master(upper_dev) && 5795 !netif_is_macvlan(upper_dev)) { 5796 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5797 return -EINVAL; 5798 } 5799 if (!info->linking) 5800 break; 5801 if (netif_is_bridge_master(upper_dev) && 5802 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5803 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5804 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5805 return -EOPNOTSUPP; 5806 if (netdev_has_any_upper_dev(upper_dev) && 5807 (!netif_is_bridge_master(upper_dev) || 5808 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5809 upper_dev))) { 5810 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5811 return -EINVAL; 5812 } 5813 if (netif_is_lag_master(upper_dev) && 5814 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5815 info->upper_info, extack)) 5816 return -EINVAL; 5817 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5818 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5819 return -EINVAL; 5820 } 5821 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5822 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5823 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5824 return -EINVAL; 5825 } 5826 if (netif_is_macvlan(upper_dev) && 5827 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5828 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5829 return -EOPNOTSUPP; 5830 } 5831 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5832 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5833 return -EINVAL; 5834 } 5835 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5836 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5837 return -EINVAL; 5838 } 5839 break; 5840 case NETDEV_CHANGEUPPER: 5841 upper_dev = info->upper_dev; 5842 if (netif_is_bridge_master(upper_dev)) { 5843 if (info->linking) 5844 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5845 lower_dev, 5846 upper_dev, 5847 extack); 5848 else 5849 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5850 lower_dev, 5851 upper_dev); 5852 } else if (netif_is_lag_master(upper_dev)) { 5853 if (info->linking) { 5854 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5855 upper_dev); 5856 } else { 5857 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5858 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5859 upper_dev); 5860 } 5861 } else if (netif_is_ovs_master(upper_dev)) { 5862 if (info->linking) 5863 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5864 else 5865 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5866 } else if (netif_is_macvlan(upper_dev)) { 5867 if (!info->linking) 5868 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5869 } else if (is_vlan_dev(upper_dev)) { 5870 struct net_device *br_dev; 5871 5872 if (!netif_is_bridge_port(upper_dev)) 5873 break; 5874 if (info->linking) 5875 break; 5876 br_dev = netdev_master_upper_dev_get(upper_dev); 5877 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5878 br_dev); 5879 } 5880 break; 5881 } 5882 5883 return err; 5884 } 5885 5886 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5887 unsigned long event, void *ptr) 5888 { 5889 struct netdev_notifier_changelowerstate_info *info; 5890 struct mlxsw_sp_port *mlxsw_sp_port; 5891 int err; 5892 5893 mlxsw_sp_port = netdev_priv(dev); 5894 info = ptr; 5895 5896 switch (event) { 5897 case NETDEV_CHANGELOWERSTATE: 5898 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5899 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5900 info->lower_state_info); 5901 if (err) 5902 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5903 } 5904 break; 5905 } 5906 5907 return 0; 5908 } 5909 5910 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5911 struct net_device *port_dev, 5912 unsigned long event, void *ptr) 5913 { 5914 switch (event) { 5915 case NETDEV_PRECHANGEUPPER: 5916 case NETDEV_CHANGEUPPER: 5917 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5918 event, ptr); 5919 case NETDEV_CHANGELOWERSTATE: 5920 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5921 ptr); 5922 } 5923 5924 return 0; 5925 } 5926 5927 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5928 unsigned long event, void *ptr) 5929 { 5930 struct net_device *dev; 5931 struct list_head *iter; 5932 int ret; 5933 5934 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5935 if (mlxsw_sp_port_dev_check(dev)) { 5936 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5937 ptr); 5938 if (ret) 5939 return ret; 5940 } 5941 } 5942 5943 return 0; 5944 } 5945 5946 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5947 struct net_device *dev, 5948 unsigned long event, void *ptr, 5949 u16 vid) 5950 { 5951 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5952 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5953 struct netdev_notifier_changeupper_info *info = ptr; 5954 struct netlink_ext_ack *extack; 5955 struct net_device *upper_dev; 5956 int err = 0; 5957 5958 extack = netdev_notifier_info_to_extack(&info->info); 5959 5960 switch (event) { 5961 case NETDEV_PRECHANGEUPPER: 5962 upper_dev = info->upper_dev; 5963 if (!netif_is_bridge_master(upper_dev) && 5964 !netif_is_macvlan(upper_dev)) { 5965 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5966 return -EINVAL; 5967 } 5968 if (!info->linking) 5969 break; 5970 if (netif_is_bridge_master(upper_dev) && 5971 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5972 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5973 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5974 return -EOPNOTSUPP; 5975 if (netdev_has_any_upper_dev(upper_dev) && 5976 (!netif_is_bridge_master(upper_dev) || 5977 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5978 upper_dev))) { 5979 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5980 return -EINVAL; 5981 } 5982 if (netif_is_macvlan(upper_dev) && 5983 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5984 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5985 return -EOPNOTSUPP; 5986 } 5987 break; 5988 case NETDEV_CHANGEUPPER: 5989 upper_dev = info->upper_dev; 5990 if (netif_is_bridge_master(upper_dev)) { 5991 if (info->linking) 5992 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5993 vlan_dev, 5994 upper_dev, 5995 extack); 5996 else 5997 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5998 vlan_dev, 5999 upper_dev); 6000 } else if (netif_is_macvlan(upper_dev)) { 6001 if (!info->linking) 6002 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6003 } else { 6004 err = -EINVAL; 6005 WARN_ON(1); 6006 } 6007 break; 6008 } 6009 6010 return err; 6011 } 6012 6013 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6014 struct net_device *lag_dev, 6015 unsigned long event, 6016 void *ptr, u16 vid) 6017 { 6018 struct net_device *dev; 6019 struct list_head *iter; 6020 int ret; 6021 6022 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6023 if (mlxsw_sp_port_dev_check(dev)) { 6024 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6025 event, ptr, 6026 vid); 6027 if (ret) 6028 return ret; 6029 } 6030 } 6031 6032 return 0; 6033 } 6034 6035 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6036 struct net_device *br_dev, 6037 unsigned long event, void *ptr, 6038 u16 vid) 6039 { 6040 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6041 struct netdev_notifier_changeupper_info *info = ptr; 6042 struct netlink_ext_ack *extack; 6043 struct net_device *upper_dev; 6044 6045 if (!mlxsw_sp) 6046 return 0; 6047 6048 extack = netdev_notifier_info_to_extack(&info->info); 6049 6050 switch (event) { 6051 case NETDEV_PRECHANGEUPPER: 6052 upper_dev = info->upper_dev; 6053 if (!netif_is_macvlan(upper_dev)) { 6054 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6055 return -EOPNOTSUPP; 6056 } 6057 if (!info->linking) 6058 break; 6059 if (netif_is_macvlan(upper_dev) && 6060 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6061 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6062 return -EOPNOTSUPP; 6063 } 6064 break; 6065 case NETDEV_CHANGEUPPER: 6066 upper_dev = info->upper_dev; 6067 if (info->linking) 6068 break; 6069 if (netif_is_macvlan(upper_dev)) 6070 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6071 break; 6072 } 6073 6074 return 0; 6075 } 6076 6077 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6078 unsigned long event, void *ptr) 6079 { 6080 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6081 u16 vid = vlan_dev_vlan_id(vlan_dev); 6082 6083 if (mlxsw_sp_port_dev_check(real_dev)) 6084 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6085 event, ptr, vid); 6086 else if (netif_is_lag_master(real_dev)) 6087 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6088 real_dev, event, 6089 ptr, vid); 6090 else if (netif_is_bridge_master(real_dev)) 6091 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6092 event, ptr, vid); 6093 6094 return 0; 6095 } 6096 6097 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6098 unsigned long event, void *ptr) 6099 { 6100 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6101 struct netdev_notifier_changeupper_info *info = ptr; 6102 struct netlink_ext_ack *extack; 6103 struct net_device *upper_dev; 6104 6105 if (!mlxsw_sp) 6106 return 0; 6107 6108 extack = netdev_notifier_info_to_extack(&info->info); 6109 6110 switch (event) { 6111 case NETDEV_PRECHANGEUPPER: 6112 upper_dev = info->upper_dev; 6113 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6114 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6115 return -EOPNOTSUPP; 6116 } 6117 if (!info->linking) 6118 break; 6119 if (netif_is_macvlan(upper_dev) && 6120 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6121 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6122 return -EOPNOTSUPP; 6123 } 6124 break; 6125 case NETDEV_CHANGEUPPER: 6126 upper_dev = info->upper_dev; 6127 if (info->linking) 6128 break; 6129 if (is_vlan_dev(upper_dev)) 6130 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6131 if (netif_is_macvlan(upper_dev)) 6132 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6133 break; 6134 } 6135 6136 return 0; 6137 } 6138 6139 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6140 unsigned long event, void *ptr) 6141 { 6142 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6143 struct netdev_notifier_changeupper_info *info = ptr; 6144 struct netlink_ext_ack *extack; 6145 6146 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6147 return 0; 6148 6149 extack = netdev_notifier_info_to_extack(&info->info); 6150 6151 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6152 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6153 6154 return -EOPNOTSUPP; 6155 } 6156 6157 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6158 { 6159 struct netdev_notifier_changeupper_info *info = ptr; 6160 6161 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6162 return false; 6163 return netif_is_l3_master(info->upper_dev); 6164 } 6165 6166 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6167 struct net_device *dev, 6168 unsigned long event, void *ptr) 6169 { 6170 struct netdev_notifier_changeupper_info *cu_info; 6171 struct netdev_notifier_info *info = ptr; 6172 struct netlink_ext_ack *extack; 6173 struct net_device *upper_dev; 6174 6175 extack = netdev_notifier_info_to_extack(info); 6176 6177 switch (event) { 6178 case NETDEV_CHANGEUPPER: 6179 cu_info = container_of(info, 6180 struct netdev_notifier_changeupper_info, 6181 info); 6182 upper_dev = cu_info->upper_dev; 6183 if (!netif_is_bridge_master(upper_dev)) 6184 return 0; 6185 if (!mlxsw_sp_lower_get(upper_dev)) 6186 return 0; 6187 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6188 return -EOPNOTSUPP; 6189 if (cu_info->linking) { 6190 if (!netif_running(dev)) 6191 return 0; 6192 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6193 * device needs to be mapped to a VLAN, but at this 6194 * point no VLANs are configured on the VxLAN device 6195 */ 6196 if (br_vlan_enabled(upper_dev)) 6197 return 0; 6198 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6199 dev, 0, extack); 6200 } else { 6201 /* VLANs were already flushed, which triggered the 6202 * necessary cleanup 6203 */ 6204 if (br_vlan_enabled(upper_dev)) 6205 return 0; 6206 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6207 } 6208 break; 6209 case NETDEV_PRE_UP: 6210 upper_dev = netdev_master_upper_dev_get(dev); 6211 if (!upper_dev) 6212 return 0; 6213 if (!netif_is_bridge_master(upper_dev)) 6214 return 0; 6215 if (!mlxsw_sp_lower_get(upper_dev)) 6216 return 0; 6217 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6218 extack); 6219 case NETDEV_DOWN: 6220 upper_dev = netdev_master_upper_dev_get(dev); 6221 if (!upper_dev) 6222 return 0; 6223 if (!netif_is_bridge_master(upper_dev)) 6224 return 0; 6225 if (!mlxsw_sp_lower_get(upper_dev)) 6226 return 0; 6227 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6228 break; 6229 } 6230 6231 return 0; 6232 } 6233 6234 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6235 unsigned long event, void *ptr) 6236 { 6237 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6238 struct mlxsw_sp_span_entry *span_entry; 6239 struct mlxsw_sp *mlxsw_sp; 6240 int err = 0; 6241 6242 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6243 if (event == NETDEV_UNREGISTER) { 6244 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6245 if (span_entry) 6246 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6247 } 6248 mlxsw_sp_span_respin(mlxsw_sp); 6249 6250 if (netif_is_vxlan(dev)) 6251 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6252 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6253 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6254 event, ptr); 6255 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6256 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6257 event, ptr); 6258 else if (event == NETDEV_PRE_CHANGEADDR || 6259 event == NETDEV_CHANGEADDR || 6260 event == NETDEV_CHANGEMTU) 6261 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6262 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6263 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6264 else if (mlxsw_sp_port_dev_check(dev)) 6265 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6266 else if (netif_is_lag_master(dev)) 6267 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6268 else if (is_vlan_dev(dev)) 6269 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6270 else if (netif_is_bridge_master(dev)) 6271 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6272 else if (netif_is_macvlan(dev)) 6273 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6274 6275 return notifier_from_errno(err); 6276 } 6277 6278 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6279 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6280 }; 6281 6282 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6283 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6284 }; 6285 6286 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6287 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6288 {0, }, 6289 }; 6290 6291 static struct pci_driver mlxsw_sp1_pci_driver = { 6292 .name = mlxsw_sp1_driver_name, 6293 .id_table = mlxsw_sp1_pci_id_table, 6294 }; 6295 6296 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6297 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6298 {0, }, 6299 }; 6300 6301 static struct pci_driver mlxsw_sp2_pci_driver = { 6302 .name = mlxsw_sp2_driver_name, 6303 .id_table = mlxsw_sp2_pci_id_table, 6304 }; 6305 6306 static int __init mlxsw_sp_module_init(void) 6307 { 6308 int err; 6309 6310 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6311 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6312 6313 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6314 if (err) 6315 goto err_sp1_core_driver_register; 6316 6317 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6318 if (err) 6319 goto err_sp2_core_driver_register; 6320 6321 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6322 if (err) 6323 goto err_sp1_pci_driver_register; 6324 6325 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6326 if (err) 6327 goto err_sp2_pci_driver_register; 6328 6329 return 0; 6330 6331 err_sp2_pci_driver_register: 6332 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6333 err_sp1_pci_driver_register: 6334 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6335 err_sp2_core_driver_register: 6336 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6337 err_sp1_core_driver_register: 6338 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6339 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6340 return err; 6341 } 6342 6343 static void __exit mlxsw_sp_module_exit(void) 6344 { 6345 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6346 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6347 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6348 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6349 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6350 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6351 } 6352 6353 module_init(mlxsw_sp_module_init); 6354 module_exit(mlxsw_sp_module_exit); 6355 6356 MODULE_LICENSE("Dual BSD/GPL"); 6357 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6358 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6359 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6360 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6361 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6362