1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 struct mlxsw_sp_ptp_ops { 151 struct mlxsw_sp_ptp_clock * 152 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 153 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 154 155 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 156 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 157 158 /* Notify a driver that a packet that might be PTP was received. Driver 159 * is responsible for freeing the passed-in SKB. 160 */ 161 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 162 u8 local_port); 163 164 /* Notify a driver that a timestamped packet was transmitted. Driver 165 * is responsible for freeing the passed-in SKB. 166 */ 167 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 168 u8 local_port); 169 170 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 171 struct hwtstamp_config *config); 172 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 173 struct hwtstamp_config *config); 174 void (*shaper_work)(struct work_struct *work); 175 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 176 struct ethtool_ts_info *info); 177 }; 178 179 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 180 u16 component_index, u32 *p_max_size, 181 u8 *p_align_bits, u16 *p_max_write_size) 182 { 183 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 184 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 185 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 186 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 187 int err; 188 189 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 191 if (err) 192 return err; 193 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 194 p_max_write_size); 195 196 *p_align_bits = max_t(u8, *p_align_bits, 2); 197 *p_max_write_size = min_t(u16, *p_max_write_size, 198 MLXSW_REG_MCDA_MAX_DATA_LEN); 199 return 0; 200 } 201 202 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcc_pl[MLXSW_REG_MCC_LEN]; 208 u8 control_state; 209 int err; 210 211 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 213 if (err) 214 return err; 215 216 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 217 if (control_state != MLXFW_FSM_STATE_IDLE) 218 return -EBUSY; 219 220 mlxsw_reg_mcc_pack(mcc_pl, 221 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 222 0, *fwhandle, 0); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 224 } 225 226 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 227 u32 fwhandle, u16 component_index, 228 u32 component_size) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcc_pl[MLXSW_REG_MCC_LEN]; 234 235 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 236 component_index, fwhandle, component_size); 237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 238 } 239 240 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 241 u32 fwhandle, u8 *data, u16 size, 242 u32 offset) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcda_pl[MLXSW_REG_MCDA_LEN]; 248 249 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 251 } 252 253 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 254 u32 fwhandle, u16 component_index) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 262 component_index, fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcc_pl[MLXSW_REG_MCC_LEN]; 272 273 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 274 fwhandle, 0); 275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 276 } 277 278 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 279 enum mlxfw_fsm_state *fsm_state, 280 enum mlxfw_fsm_state_err *fsm_state_err) 281 { 282 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 283 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 285 char mcc_pl[MLXSW_REG_MCC_LEN]; 286 u8 control_state; 287 u8 error_code; 288 int err; 289 290 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 292 if (err) 293 return err; 294 295 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 296 *fsm_state = control_state; 297 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 298 MLXFW_FSM_STATE_ERR_MAX); 299 return 0; 300 } 301 302 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 310 fwhandle, 0); 311 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 312 } 313 314 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 315 { 316 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 317 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 319 char mcc_pl[MLXSW_REG_MCC_LEN]; 320 321 mlxsw_reg_mcc_pack(mcc_pl, 322 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 323 fwhandle, 0); 324 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 325 } 326 327 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 328 const char *msg, const char *comp_name, 329 u32 done_bytes, u32 total_bytes) 330 { 331 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 332 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 333 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 334 335 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 336 msg, comp_name, 337 done_bytes, total_bytes); 338 } 339 340 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 341 .component_query = mlxsw_sp_component_query, 342 .fsm_lock = mlxsw_sp_fsm_lock, 343 .fsm_component_update = mlxsw_sp_fsm_component_update, 344 .fsm_block_download = mlxsw_sp_fsm_block_download, 345 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 346 .fsm_activate = mlxsw_sp_fsm_activate, 347 .fsm_query_state = mlxsw_sp_fsm_query_state, 348 .fsm_cancel = mlxsw_sp_fsm_cancel, 349 .fsm_release = mlxsw_sp_fsm_release, 350 .status_notify = mlxsw_sp_status_notify, 351 }; 352 353 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 354 const struct firmware *firmware, 355 struct netlink_ext_ack *extack) 356 { 357 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 358 .mlxfw_dev = { 359 .ops = &mlxsw_sp_mlxfw_dev_ops, 360 .psid = mlxsw_sp->bus_info->psid, 361 .psid_size = strlen(mlxsw_sp->bus_info->psid), 362 }, 363 .mlxsw_sp = mlxsw_sp 364 }; 365 int err; 366 367 mlxsw_core_fw_flash_start(mlxsw_sp->core); 368 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 369 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 370 firmware, extack); 371 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 372 mlxsw_core_fw_flash_end(mlxsw_sp->core); 373 374 return err; 375 } 376 377 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 378 { 379 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 380 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 381 const char *fw_filename = mlxsw_sp->fw_filename; 382 union devlink_param_value value; 383 const struct firmware *firmware; 384 int err; 385 386 /* Don't check if driver does not require it */ 387 if (!req_rev || !fw_filename) 388 return 0; 389 390 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 391 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 392 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 393 &value); 394 if (err) 395 return err; 396 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 397 return 0; 398 399 /* Validate driver & FW are compatible */ 400 if (rev->major != req_rev->major) { 401 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 402 rev->major, req_rev->major); 403 return -EINVAL; 404 } 405 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 406 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 407 (rev->minor > req_rev->minor || 408 (rev->minor == req_rev->minor && 409 rev->subminor >= req_rev->subminor))) 410 return 0; 411 412 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 413 rev->major, rev->minor, rev->subminor); 414 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 415 fw_filename); 416 417 err = request_firmware_direct(&firmware, fw_filename, 418 mlxsw_sp->bus_info->dev); 419 if (err) { 420 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 421 fw_filename); 422 return err; 423 } 424 425 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 426 release_firmware(firmware); 427 if (err) 428 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 429 430 /* On FW flash success, tell the caller FW reset is needed 431 * if current FW supports it. 432 */ 433 if (rev->minor >= req_rev->can_reset_minor) 434 return err ? err : -EAGAIN; 435 else 436 return 0; 437 } 438 439 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 440 const char *file_name, const char *component, 441 struct netlink_ext_ack *extack) 442 { 443 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 444 const struct firmware *firmware; 445 int err; 446 447 if (component) 448 return -EOPNOTSUPP; 449 450 err = request_firmware_direct(&firmware, file_name, 451 mlxsw_sp->bus_info->dev); 452 if (err) 453 return err; 454 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 455 release_firmware(firmware); 456 457 return err; 458 } 459 460 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 461 unsigned int counter_index, u64 *packets, 462 u64 *bytes) 463 { 464 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 465 int err; 466 467 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 468 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 469 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 470 if (err) 471 return err; 472 if (packets) 473 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 474 if (bytes) 475 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 476 return 0; 477 } 478 479 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index) 481 { 482 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 483 484 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 485 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 486 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 487 } 488 489 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 490 unsigned int *p_counter_index) 491 { 492 int err; 493 494 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 495 p_counter_index); 496 if (err) 497 return err; 498 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 499 if (err) 500 goto err_counter_clear; 501 return 0; 502 503 err_counter_clear: 504 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 505 *p_counter_index); 506 return err; 507 } 508 509 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 510 unsigned int counter_index) 511 { 512 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 513 counter_index); 514 } 515 516 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 517 const struct mlxsw_tx_info *tx_info) 518 { 519 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 520 521 memset(txhdr, 0, MLXSW_TXHDR_LEN); 522 523 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 524 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 525 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 526 mlxsw_tx_hdr_swid_set(txhdr, 0); 527 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 528 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 529 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 530 } 531 532 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 533 { 534 switch (state) { 535 case BR_STATE_FORWARDING: 536 return MLXSW_REG_SPMS_STATE_FORWARDING; 537 case BR_STATE_LEARNING: 538 return MLXSW_REG_SPMS_STATE_LEARNING; 539 case BR_STATE_LISTENING: /* fall-through */ 540 case BR_STATE_DISABLED: /* fall-through */ 541 case BR_STATE_BLOCKING: 542 return MLXSW_REG_SPMS_STATE_DISCARDING; 543 default: 544 BUG(); 545 } 546 } 547 548 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 549 u8 state) 550 { 551 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char *spms_pl; 554 int err; 555 556 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 557 if (!spms_pl) 558 return -ENOMEM; 559 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 560 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 561 562 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 563 kfree(spms_pl); 564 return err; 565 } 566 567 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 568 { 569 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 570 int err; 571 572 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 573 if (err) 574 return err; 575 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 576 return 0; 577 } 578 579 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 580 bool enable, u32 rate) 581 { 582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 583 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 584 585 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 587 } 588 589 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 590 bool is_up) 591 { 592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 593 char paos_pl[MLXSW_REG_PAOS_LEN]; 594 595 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 596 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 597 MLXSW_PORT_ADMIN_STATUS_DOWN); 598 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 599 } 600 601 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 602 unsigned char *addr) 603 { 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 char ppad_pl[MLXSW_REG_PPAD_LEN]; 606 607 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 608 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 610 } 611 612 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 613 { 614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 615 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 616 617 ether_addr_copy(addr, mlxsw_sp->base_mac); 618 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 619 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 620 } 621 622 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 623 { 624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 625 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 626 int max_mtu; 627 int err; 628 629 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 630 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 631 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 632 if (err) 633 return err; 634 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 635 636 if (mtu > max_mtu) 637 return -EINVAL; 638 639 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 640 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 641 } 642 643 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 644 { 645 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 646 char pspa_pl[MLXSW_REG_PSPA_LEN]; 647 648 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 650 } 651 652 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 653 { 654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 655 char svpe_pl[MLXSW_REG_SVPE_LEN]; 656 657 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 659 } 660 661 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 662 bool learn_enable) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char *spvmlr_pl; 666 int err; 667 668 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 669 if (!spvmlr_pl) 670 return -ENOMEM; 671 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 672 learn_enable); 673 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 674 kfree(spvmlr_pl); 675 return err; 676 } 677 678 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 679 u16 vid) 680 { 681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 682 char spvid_pl[MLXSW_REG_SPVID_LEN]; 683 684 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 686 } 687 688 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 689 bool allow) 690 { 691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 692 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 693 694 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 696 } 697 698 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 699 { 700 int err; 701 702 if (!vid) { 703 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 704 if (err) 705 return err; 706 } else { 707 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 708 if (err) 709 return err; 710 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 711 if (err) 712 goto err_port_allow_untagged_set; 713 } 714 715 mlxsw_sp_port->pvid = vid; 716 return 0; 717 718 err_port_allow_untagged_set: 719 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 720 return err; 721 } 722 723 static int 724 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 725 { 726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 727 char sspr_pl[MLXSW_REG_SSPR_LEN]; 728 729 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 731 } 732 733 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 734 u8 local_port, u8 *p_module, 735 u8 *p_width, u8 *p_lane) 736 { 737 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 738 int err; 739 740 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 741 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 742 if (err) 743 return err; 744 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 745 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 746 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 747 return 0; 748 } 749 750 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 751 u8 module, u8 width, u8 lane) 752 { 753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 754 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 755 int i; 756 757 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 758 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 759 for (i = 0; i < width; i++) { 760 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 761 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 762 } 763 764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 } 766 767 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 768 { 769 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 770 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 771 772 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 773 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 774 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 775 } 776 777 static int mlxsw_sp_port_open(struct net_device *dev) 778 { 779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 780 int err; 781 782 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 783 if (err) 784 return err; 785 netif_start_queue(dev); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_stop(struct net_device *dev) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 793 netif_stop_queue(dev); 794 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 795 } 796 797 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 798 struct net_device *dev) 799 { 800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 801 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 802 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 803 const struct mlxsw_tx_info tx_info = { 804 .local_port = mlxsw_sp_port->local_port, 805 .is_emad = false, 806 }; 807 u64 len; 808 int err; 809 810 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 811 812 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 813 return NETDEV_TX_BUSY; 814 815 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 816 struct sk_buff *skb_orig = skb; 817 818 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 819 if (!skb) { 820 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 821 dev_kfree_skb_any(skb_orig); 822 return NETDEV_TX_OK; 823 } 824 dev_consume_skb_any(skb_orig); 825 } 826 827 if (eth_skb_pad(skb)) { 828 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 829 return NETDEV_TX_OK; 830 } 831 832 mlxsw_sp_txhdr_construct(skb, &tx_info); 833 /* TX header is consumed by HW on the way so we shouldn't count its 834 * bytes as being sent. 835 */ 836 len = skb->len - MLXSW_TXHDR_LEN; 837 838 /* Due to a race we might fail here because of a full queue. In that 839 * unlikely case we simply drop the packet. 840 */ 841 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 842 843 if (!err) { 844 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 845 u64_stats_update_begin(&pcpu_stats->syncp); 846 pcpu_stats->tx_packets++; 847 pcpu_stats->tx_bytes += len; 848 u64_stats_update_end(&pcpu_stats->syncp); 849 } else { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 dev_kfree_skb_any(skb); 852 } 853 return NETDEV_TX_OK; 854 } 855 856 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 857 { 858 } 859 860 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 861 { 862 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 863 struct sockaddr *addr = p; 864 int err; 865 866 if (!is_valid_ether_addr(addr->sa_data)) 867 return -EADDRNOTAVAIL; 868 869 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 870 if (err) 871 return err; 872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 873 return 0; 874 } 875 876 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 877 int mtu) 878 { 879 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 880 } 881 882 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 883 884 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 885 u16 delay) 886 { 887 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 888 BITS_PER_BYTE)); 889 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 890 mtu); 891 } 892 893 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 894 * Assumes 100m cable and maximum MTU. 895 */ 896 #define MLXSW_SP_PAUSE_DELAY 58752 897 898 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 899 u16 delay, bool pfc, bool pause) 900 { 901 if (pfc) 902 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 903 else if (pause) 904 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 905 else 906 return 0; 907 } 908 909 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 910 bool lossy) 911 { 912 if (lossy) 913 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 914 else 915 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 916 thres); 917 } 918 919 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 920 u8 *prio_tc, bool pause_en, 921 struct ieee_pfc *my_pfc) 922 { 923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 924 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 925 u16 delay = !!my_pfc ? my_pfc->delay : 0; 926 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 927 u32 taken_headroom_cells = 0; 928 u32 max_headroom_cells; 929 int i, j, err; 930 931 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 932 933 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 934 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 935 if (err) 936 return err; 937 938 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 939 bool configure = false; 940 bool pfc = false; 941 u16 thres_cells; 942 u16 delay_cells; 943 u16 total_cells; 944 bool lossy; 945 946 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 947 if (prio_tc[j] == i) { 948 pfc = pfc_en & BIT(j); 949 configure = true; 950 break; 951 } 952 } 953 954 if (!configure) 955 continue; 956 957 lossy = !(pfc || pause_en); 958 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 960 pfc, pause_en); 961 total_cells = thres_cells + delay_cells; 962 963 taken_headroom_cells += total_cells; 964 if (taken_headroom_cells > max_headroom_cells) 965 return -ENOBUFS; 966 967 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 968 thres_cells, lossy); 969 } 970 971 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 972 } 973 974 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 975 int mtu, bool pause_en) 976 { 977 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 978 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 979 struct ieee_pfc *my_pfc; 980 u8 *prio_tc; 981 982 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 983 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 984 985 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 986 pause_en, my_pfc); 987 } 988 989 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 990 { 991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 992 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 993 int err; 994 995 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 996 if (err) 997 return err; 998 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 999 if (err) 1000 goto err_span_port_mtu_update; 1001 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1002 if (err) 1003 goto err_port_mtu_set; 1004 dev->mtu = mtu; 1005 return 0; 1006 1007 err_port_mtu_set: 1008 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1009 err_span_port_mtu_update: 1010 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1011 return err; 1012 } 1013 1014 static int 1015 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1016 struct rtnl_link_stats64 *stats) 1017 { 1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1019 struct mlxsw_sp_port_pcpu_stats *p; 1020 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1021 u32 tx_dropped = 0; 1022 unsigned int start; 1023 int i; 1024 1025 for_each_possible_cpu(i) { 1026 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1027 do { 1028 start = u64_stats_fetch_begin_irq(&p->syncp); 1029 rx_packets = p->rx_packets; 1030 rx_bytes = p->rx_bytes; 1031 tx_packets = p->tx_packets; 1032 tx_bytes = p->tx_bytes; 1033 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1034 1035 stats->rx_packets += rx_packets; 1036 stats->rx_bytes += rx_bytes; 1037 stats->tx_packets += tx_packets; 1038 stats->tx_bytes += tx_bytes; 1039 /* tx_dropped is u32, updated without syncp protection. */ 1040 tx_dropped += p->tx_dropped; 1041 } 1042 stats->tx_dropped = tx_dropped; 1043 return 0; 1044 } 1045 1046 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1047 { 1048 switch (attr_id) { 1049 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1057 void *sp) 1058 { 1059 switch (attr_id) { 1060 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1061 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1062 } 1063 1064 return -EINVAL; 1065 } 1066 1067 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1068 int prio, char *ppcnt_pl) 1069 { 1070 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1071 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1072 1073 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1074 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1075 } 1076 1077 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1078 struct rtnl_link_stats64 *stats) 1079 { 1080 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1081 int err; 1082 1083 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1084 0, ppcnt_pl); 1085 if (err) 1086 goto out; 1087 1088 stats->tx_packets = 1089 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1090 stats->rx_packets = 1091 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1092 stats->tx_bytes = 1093 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1094 stats->rx_bytes = 1095 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1096 stats->multicast = 1097 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1098 1099 stats->rx_crc_errors = 1100 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1101 stats->rx_frame_errors = 1102 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1103 1104 stats->rx_length_errors = ( 1105 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1106 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1108 1109 stats->rx_errors = (stats->rx_crc_errors + 1110 stats->rx_frame_errors + stats->rx_length_errors); 1111 1112 out: 1113 return err; 1114 } 1115 1116 static void 1117 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1118 struct mlxsw_sp_port_xstats *xstats) 1119 { 1120 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1121 int err, i; 1122 1123 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1124 ppcnt_pl); 1125 if (!err) 1126 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1127 1128 for (i = 0; i < TC_MAX_QUEUE; i++) { 1129 err = mlxsw_sp_port_get_stats_raw(dev, 1130 MLXSW_REG_PPCNT_TC_CONG_TC, 1131 i, ppcnt_pl); 1132 if (!err) 1133 xstats->wred_drop[i] = 1134 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1137 i, ppcnt_pl); 1138 if (err) 1139 continue; 1140 1141 xstats->backlog[i] = 1142 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1143 xstats->tail_drop[i] = 1144 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1145 } 1146 1147 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1148 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1149 i, ppcnt_pl); 1150 if (err) 1151 continue; 1152 1153 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1154 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1155 } 1156 } 1157 1158 static void update_stats_cache(struct work_struct *work) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = 1161 container_of(work, struct mlxsw_sp_port, 1162 periodic_hw_stats.update_dw.work); 1163 1164 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1165 goto out; 1166 1167 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1168 &mlxsw_sp_port->periodic_hw_stats.stats); 1169 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1170 &mlxsw_sp_port->periodic_hw_stats.xstats); 1171 1172 out: 1173 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1174 MLXSW_HW_STATS_UPDATE_TIME); 1175 } 1176 1177 /* Return the stats from a cache that is updated periodically, 1178 * as this function might get called in an atomic context. 1179 */ 1180 static void 1181 mlxsw_sp_port_get_stats64(struct net_device *dev, 1182 struct rtnl_link_stats64 *stats) 1183 { 1184 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1185 1186 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1187 } 1188 1189 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1190 u16 vid_begin, u16 vid_end, 1191 bool is_member, bool untagged) 1192 { 1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1194 char *spvm_pl; 1195 int err; 1196 1197 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1198 if (!spvm_pl) 1199 return -ENOMEM; 1200 1201 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1202 vid_end, is_member, untagged); 1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1204 kfree(spvm_pl); 1205 return err; 1206 } 1207 1208 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1209 u16 vid_end, bool is_member, bool untagged) 1210 { 1211 u16 vid, vid_e; 1212 int err; 1213 1214 for (vid = vid_begin; vid <= vid_end; 1215 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1216 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1217 vid_end); 1218 1219 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1220 is_member, untagged); 1221 if (err) 1222 return err; 1223 } 1224 1225 return 0; 1226 } 1227 1228 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1229 bool flush_default) 1230 { 1231 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1232 1233 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1234 &mlxsw_sp_port->vlans_list, list) { 1235 if (!flush_default && 1236 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1237 continue; 1238 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1239 } 1240 } 1241 1242 static void 1243 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1244 { 1245 if (mlxsw_sp_port_vlan->bridge_port) 1246 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1247 else if (mlxsw_sp_port_vlan->fid) 1248 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1249 } 1250 1251 struct mlxsw_sp_port_vlan * 1252 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1253 { 1254 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1255 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1256 int err; 1257 1258 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1259 if (mlxsw_sp_port_vlan) 1260 return ERR_PTR(-EEXIST); 1261 1262 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1263 if (err) 1264 return ERR_PTR(err); 1265 1266 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1267 if (!mlxsw_sp_port_vlan) { 1268 err = -ENOMEM; 1269 goto err_port_vlan_alloc; 1270 } 1271 1272 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1273 mlxsw_sp_port_vlan->vid = vid; 1274 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1275 1276 return mlxsw_sp_port_vlan; 1277 1278 err_port_vlan_alloc: 1279 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1280 return ERR_PTR(err); 1281 } 1282 1283 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1284 { 1285 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1286 u16 vid = mlxsw_sp_port_vlan->vid; 1287 1288 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1289 list_del(&mlxsw_sp_port_vlan->list); 1290 kfree(mlxsw_sp_port_vlan); 1291 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1292 } 1293 1294 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1295 __be16 __always_unused proto, u16 vid) 1296 { 1297 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1298 1299 /* VLAN 0 is added to HW filter when device goes up, but it is 1300 * reserved in our case, so simply return. 1301 */ 1302 if (!vid) 1303 return 0; 1304 1305 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1306 } 1307 1308 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1309 __be16 __always_unused proto, u16 vid) 1310 { 1311 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1312 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1313 1314 /* VLAN 0 is removed from HW filter when device goes down, but 1315 * it is reserved in our case, so simply return. 1316 */ 1317 if (!vid) 1318 return 0; 1319 1320 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1321 if (!mlxsw_sp_port_vlan) 1322 return 0; 1323 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1324 1325 return 0; 1326 } 1327 1328 static struct mlxsw_sp_port_mall_tc_entry * 1329 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1330 unsigned long cookie) { 1331 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1332 1333 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1334 if (mall_tc_entry->cookie == cookie) 1335 return mall_tc_entry; 1336 1337 return NULL; 1338 } 1339 1340 static int 1341 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1343 const struct flow_action_entry *act, 1344 bool ingress) 1345 { 1346 enum mlxsw_sp_span_type span_type; 1347 1348 if (!act->dev) { 1349 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1350 return -EINVAL; 1351 } 1352 1353 mirror->ingress = ingress; 1354 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1355 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1356 true, &mirror->span_id); 1357 } 1358 1359 static void 1360 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1361 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1362 { 1363 enum mlxsw_sp_span_type span_type; 1364 1365 span_type = mirror->ingress ? 1366 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1367 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1368 span_type, true); 1369 } 1370 1371 static int 1372 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1373 struct tc_cls_matchall_offload *cls, 1374 const struct flow_action_entry *act, 1375 bool ingress) 1376 { 1377 int err; 1378 1379 if (!mlxsw_sp_port->sample) 1380 return -EOPNOTSUPP; 1381 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1382 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1383 return -EEXIST; 1384 } 1385 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1386 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1387 return -EOPNOTSUPP; 1388 } 1389 1390 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1391 act->sample.psample_group); 1392 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1393 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1394 mlxsw_sp_port->sample->rate = act->sample.rate; 1395 1396 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1397 if (err) 1398 goto err_port_sample_set; 1399 return 0; 1400 1401 err_port_sample_set: 1402 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1403 return err; 1404 } 1405 1406 static void 1407 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1408 { 1409 if (!mlxsw_sp_port->sample) 1410 return; 1411 1412 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1413 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1414 } 1415 1416 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1417 struct tc_cls_matchall_offload *f, 1418 bool ingress) 1419 { 1420 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1421 __be16 protocol = f->common.protocol; 1422 struct flow_action_entry *act; 1423 int err; 1424 1425 if (!flow_offload_has_one_action(&f->rule->action)) { 1426 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1427 return -EOPNOTSUPP; 1428 } 1429 1430 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1431 if (!mall_tc_entry) 1432 return -ENOMEM; 1433 mall_tc_entry->cookie = f->cookie; 1434 1435 act = &f->rule->action.entries[0]; 1436 1437 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1438 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1439 1440 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1441 mirror = &mall_tc_entry->mirror; 1442 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1443 mirror, act, 1444 ingress); 1445 } else if (act->id == FLOW_ACTION_SAMPLE && 1446 protocol == htons(ETH_P_ALL)) { 1447 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1448 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1449 act, ingress); 1450 } else { 1451 err = -EOPNOTSUPP; 1452 } 1453 1454 if (err) 1455 goto err_add_action; 1456 1457 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1458 return 0; 1459 1460 err_add_action: 1461 kfree(mall_tc_entry); 1462 return err; 1463 } 1464 1465 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1466 struct tc_cls_matchall_offload *f) 1467 { 1468 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1469 1470 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1471 f->cookie); 1472 if (!mall_tc_entry) { 1473 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1474 return; 1475 } 1476 list_del(&mall_tc_entry->list); 1477 1478 switch (mall_tc_entry->type) { 1479 case MLXSW_SP_PORT_MALL_MIRROR: 1480 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1481 &mall_tc_entry->mirror); 1482 break; 1483 case MLXSW_SP_PORT_MALL_SAMPLE: 1484 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1485 break; 1486 default: 1487 WARN_ON(1); 1488 } 1489 1490 kfree(mall_tc_entry); 1491 } 1492 1493 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1494 struct tc_cls_matchall_offload *f, 1495 bool ingress) 1496 { 1497 switch (f->command) { 1498 case TC_CLSMATCHALL_REPLACE: 1499 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1500 ingress); 1501 case TC_CLSMATCHALL_DESTROY: 1502 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1503 return 0; 1504 default: 1505 return -EOPNOTSUPP; 1506 } 1507 } 1508 1509 static int 1510 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1511 struct flow_cls_offload *f) 1512 { 1513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1514 1515 switch (f->command) { 1516 case FLOW_CLS_REPLACE: 1517 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1518 case FLOW_CLS_DESTROY: 1519 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1520 return 0; 1521 case FLOW_CLS_STATS: 1522 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1523 case FLOW_CLS_TMPLT_CREATE: 1524 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1525 case FLOW_CLS_TMPLT_DESTROY: 1526 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1527 return 0; 1528 default: 1529 return -EOPNOTSUPP; 1530 } 1531 } 1532 1533 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1534 void *type_data, 1535 void *cb_priv, bool ingress) 1536 { 1537 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1538 1539 switch (type) { 1540 case TC_SETUP_CLSMATCHALL: 1541 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1542 type_data)) 1543 return -EOPNOTSUPP; 1544 1545 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1546 ingress); 1547 case TC_SETUP_CLSFLOWER: 1548 return 0; 1549 default: 1550 return -EOPNOTSUPP; 1551 } 1552 } 1553 1554 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1555 void *type_data, 1556 void *cb_priv) 1557 { 1558 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1559 cb_priv, true); 1560 } 1561 1562 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1563 void *type_data, 1564 void *cb_priv) 1565 { 1566 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1567 cb_priv, false); 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1571 void *type_data, void *cb_priv) 1572 { 1573 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1574 1575 switch (type) { 1576 case TC_SETUP_CLSMATCHALL: 1577 return 0; 1578 case TC_SETUP_CLSFLOWER: 1579 if (mlxsw_sp_acl_block_disabled(acl_block)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1583 default: 1584 return -EOPNOTSUPP; 1585 } 1586 } 1587 1588 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1589 { 1590 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1591 1592 mlxsw_sp_acl_block_destroy(acl_block); 1593 } 1594 1595 static LIST_HEAD(mlxsw_sp_block_cb_list); 1596 1597 static int 1598 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1599 struct flow_block_offload *f, bool ingress) 1600 { 1601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1602 struct mlxsw_sp_acl_block *acl_block; 1603 struct flow_block_cb *block_cb; 1604 bool register_block = false; 1605 int err; 1606 1607 block_cb = flow_block_cb_lookup(f->block, 1608 mlxsw_sp_setup_tc_block_cb_flower, 1609 mlxsw_sp); 1610 if (!block_cb) { 1611 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1612 if (!acl_block) 1613 return -ENOMEM; 1614 block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower, 1615 mlxsw_sp, acl_block, 1616 mlxsw_sp_tc_block_flower_release); 1617 if (IS_ERR(block_cb)) { 1618 mlxsw_sp_acl_block_destroy(acl_block); 1619 err = PTR_ERR(block_cb); 1620 goto err_cb_register; 1621 } 1622 register_block = true; 1623 } else { 1624 acl_block = flow_block_cb_priv(block_cb); 1625 } 1626 flow_block_cb_incref(block_cb); 1627 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1628 mlxsw_sp_port, ingress); 1629 if (err) 1630 goto err_block_bind; 1631 1632 if (ingress) 1633 mlxsw_sp_port->ing_acl_block = acl_block; 1634 else 1635 mlxsw_sp_port->eg_acl_block = acl_block; 1636 1637 if (register_block) { 1638 flow_block_cb_add(block_cb, f); 1639 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1640 } 1641 1642 return 0; 1643 1644 err_block_bind: 1645 if (!flow_block_cb_decref(block_cb)) 1646 flow_block_cb_free(block_cb); 1647 err_cb_register: 1648 return err; 1649 } 1650 1651 static void 1652 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1653 struct flow_block_offload *f, bool ingress) 1654 { 1655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1656 struct mlxsw_sp_acl_block *acl_block; 1657 struct flow_block_cb *block_cb; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f->block, 1661 mlxsw_sp_setup_tc_block_cb_flower, 1662 mlxsw_sp); 1663 if (!block_cb) 1664 return; 1665 1666 if (ingress) 1667 mlxsw_sp_port->ing_acl_block = NULL; 1668 else 1669 mlxsw_sp_port->eg_acl_block = NULL; 1670 1671 acl_block = flow_block_cb_priv(block_cb); 1672 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1673 mlxsw_sp_port, ingress); 1674 if (!err && !flow_block_cb_decref(block_cb)) { 1675 flow_block_cb_remove(block_cb, f); 1676 list_del(&block_cb->driver_list); 1677 } 1678 } 1679 1680 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1681 struct flow_block_offload *f) 1682 { 1683 struct flow_block_cb *block_cb; 1684 flow_setup_cb_t *cb; 1685 bool ingress; 1686 int err; 1687 1688 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1689 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1690 ingress = true; 1691 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1692 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1693 ingress = false; 1694 } else { 1695 return -EOPNOTSUPP; 1696 } 1697 1698 f->driver_block_list = &mlxsw_sp_block_cb_list; 1699 1700 switch (f->command) { 1701 case FLOW_BLOCK_BIND: 1702 if (flow_block_cb_is_busy(cb, mlxsw_sp_port, 1703 &mlxsw_sp_block_cb_list)) 1704 return -EBUSY; 1705 1706 block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port, 1707 mlxsw_sp_port, NULL); 1708 if (IS_ERR(block_cb)) 1709 return PTR_ERR(block_cb); 1710 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1711 ingress); 1712 if (err) { 1713 flow_block_cb_free(block_cb); 1714 return err; 1715 } 1716 flow_block_cb_add(block_cb, f); 1717 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1718 return 0; 1719 case FLOW_BLOCK_UNBIND: 1720 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1721 f, ingress); 1722 block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port); 1723 if (!block_cb) 1724 return -ENOENT; 1725 1726 flow_block_cb_remove(block_cb, f); 1727 list_del(&block_cb->driver_list); 1728 return 0; 1729 default: 1730 return -EOPNOTSUPP; 1731 } 1732 } 1733 1734 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1735 void *type_data) 1736 { 1737 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1738 1739 switch (type) { 1740 case TC_SETUP_BLOCK: 1741 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1742 case TC_SETUP_QDISC_RED: 1743 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1744 case TC_SETUP_QDISC_PRIO: 1745 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1746 default: 1747 return -EOPNOTSUPP; 1748 } 1749 } 1750 1751 1752 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1753 { 1754 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1755 1756 if (!enable) { 1757 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1758 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1759 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1760 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1761 return -EINVAL; 1762 } 1763 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1764 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1765 } else { 1766 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1767 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1768 } 1769 return 0; 1770 } 1771 1772 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1773 { 1774 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1775 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1776 int err; 1777 1778 if (netif_running(dev)) 1779 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1780 1781 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1782 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1783 pplr_pl); 1784 1785 if (netif_running(dev)) 1786 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1787 1788 return err; 1789 } 1790 1791 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1792 1793 static int mlxsw_sp_handle_feature(struct net_device *dev, 1794 netdev_features_t wanted_features, 1795 netdev_features_t feature, 1796 mlxsw_sp_feature_handler feature_handler) 1797 { 1798 netdev_features_t changes = wanted_features ^ dev->features; 1799 bool enable = !!(wanted_features & feature); 1800 int err; 1801 1802 if (!(changes & feature)) 1803 return 0; 1804 1805 err = feature_handler(dev, enable); 1806 if (err) { 1807 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1808 enable ? "Enable" : "Disable", &feature, err); 1809 return err; 1810 } 1811 1812 if (enable) 1813 dev->features |= feature; 1814 else 1815 dev->features &= ~feature; 1816 1817 return 0; 1818 } 1819 static int mlxsw_sp_set_features(struct net_device *dev, 1820 netdev_features_t features) 1821 { 1822 netdev_features_t oper_features = dev->features; 1823 int err = 0; 1824 1825 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1826 mlxsw_sp_feature_hw_tc); 1827 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1828 mlxsw_sp_feature_loopback); 1829 1830 if (err) { 1831 dev->features = oper_features; 1832 return -EINVAL; 1833 } 1834 1835 return 0; 1836 } 1837 1838 static struct devlink_port * 1839 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1840 { 1841 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1842 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1843 1844 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1845 mlxsw_sp_port->local_port); 1846 } 1847 1848 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1849 struct ifreq *ifr) 1850 { 1851 struct hwtstamp_config config; 1852 int err; 1853 1854 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1855 return -EFAULT; 1856 1857 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1858 &config); 1859 if (err) 1860 return err; 1861 1862 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1863 return -EFAULT; 1864 1865 return 0; 1866 } 1867 1868 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1869 struct ifreq *ifr) 1870 { 1871 struct hwtstamp_config config; 1872 int err; 1873 1874 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1875 &config); 1876 if (err) 1877 return err; 1878 1879 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1880 return -EFAULT; 1881 1882 return 0; 1883 } 1884 1885 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1886 { 1887 struct hwtstamp_config config = {0}; 1888 1889 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1890 } 1891 1892 static int 1893 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1894 { 1895 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1896 1897 switch (cmd) { 1898 case SIOCSHWTSTAMP: 1899 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1900 case SIOCGHWTSTAMP: 1901 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1902 default: 1903 return -EOPNOTSUPP; 1904 } 1905 } 1906 1907 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1908 .ndo_open = mlxsw_sp_port_open, 1909 .ndo_stop = mlxsw_sp_port_stop, 1910 .ndo_start_xmit = mlxsw_sp_port_xmit, 1911 .ndo_setup_tc = mlxsw_sp_setup_tc, 1912 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1913 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1914 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1915 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1916 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1917 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1918 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1919 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1920 .ndo_set_features = mlxsw_sp_set_features, 1921 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1922 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1923 }; 1924 1925 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1926 struct ethtool_drvinfo *drvinfo) 1927 { 1928 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1929 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1930 1931 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1932 sizeof(drvinfo->driver)); 1933 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1934 sizeof(drvinfo->version)); 1935 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1936 "%d.%d.%d", 1937 mlxsw_sp->bus_info->fw_rev.major, 1938 mlxsw_sp->bus_info->fw_rev.minor, 1939 mlxsw_sp->bus_info->fw_rev.subminor); 1940 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1941 sizeof(drvinfo->bus_info)); 1942 } 1943 1944 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1945 struct ethtool_pauseparam *pause) 1946 { 1947 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1948 1949 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1950 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1951 } 1952 1953 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1954 struct ethtool_pauseparam *pause) 1955 { 1956 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1957 1958 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1959 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1960 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1961 1962 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1963 pfcc_pl); 1964 } 1965 1966 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1967 struct ethtool_pauseparam *pause) 1968 { 1969 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1970 bool pause_en = pause->tx_pause || pause->rx_pause; 1971 int err; 1972 1973 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1974 netdev_err(dev, "PFC already enabled on port\n"); 1975 return -EINVAL; 1976 } 1977 1978 if (pause->autoneg) { 1979 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1980 return -EINVAL; 1981 } 1982 1983 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1984 if (err) { 1985 netdev_err(dev, "Failed to configure port's headroom\n"); 1986 return err; 1987 } 1988 1989 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1990 if (err) { 1991 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1992 goto err_port_pause_configure; 1993 } 1994 1995 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1996 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1997 1998 return 0; 1999 2000 err_port_pause_configure: 2001 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 2002 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 2003 return err; 2004 } 2005 2006 struct mlxsw_sp_port_hw_stats { 2007 char str[ETH_GSTRING_LEN]; 2008 u64 (*getter)(const char *payload); 2009 bool cells_bytes; 2010 }; 2011 2012 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2013 { 2014 .str = "a_frames_transmitted_ok", 2015 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2016 }, 2017 { 2018 .str = "a_frames_received_ok", 2019 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2020 }, 2021 { 2022 .str = "a_frame_check_sequence_errors", 2023 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2024 }, 2025 { 2026 .str = "a_alignment_errors", 2027 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2028 }, 2029 { 2030 .str = "a_octets_transmitted_ok", 2031 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2032 }, 2033 { 2034 .str = "a_octets_received_ok", 2035 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2036 }, 2037 { 2038 .str = "a_multicast_frames_xmitted_ok", 2039 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2040 }, 2041 { 2042 .str = "a_broadcast_frames_xmitted_ok", 2043 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2044 }, 2045 { 2046 .str = "a_multicast_frames_received_ok", 2047 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2048 }, 2049 { 2050 .str = "a_broadcast_frames_received_ok", 2051 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2052 }, 2053 { 2054 .str = "a_in_range_length_errors", 2055 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2056 }, 2057 { 2058 .str = "a_out_of_range_length_field", 2059 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2060 }, 2061 { 2062 .str = "a_frame_too_long_errors", 2063 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2064 }, 2065 { 2066 .str = "a_symbol_error_during_carrier", 2067 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2068 }, 2069 { 2070 .str = "a_mac_control_frames_transmitted", 2071 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2072 }, 2073 { 2074 .str = "a_mac_control_frames_received", 2075 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2076 }, 2077 { 2078 .str = "a_unsupported_opcodes_received", 2079 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2080 }, 2081 { 2082 .str = "a_pause_mac_ctrl_frames_received", 2083 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2084 }, 2085 { 2086 .str = "a_pause_mac_ctrl_frames_xmitted", 2087 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2088 }, 2089 }; 2090 2091 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2092 2093 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2094 { 2095 .str = "if_in_discards", 2096 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2097 }, 2098 { 2099 .str = "if_out_discards", 2100 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2101 }, 2102 { 2103 .str = "if_out_errors", 2104 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2105 }, 2106 }; 2107 2108 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2109 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2110 2111 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2112 { 2113 .str = "ether_stats_undersize_pkts", 2114 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2115 }, 2116 { 2117 .str = "ether_stats_oversize_pkts", 2118 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2119 }, 2120 { 2121 .str = "ether_stats_fragments", 2122 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2123 }, 2124 { 2125 .str = "ether_pkts64octets", 2126 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2127 }, 2128 { 2129 .str = "ether_pkts65to127octets", 2130 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2131 }, 2132 { 2133 .str = "ether_pkts128to255octets", 2134 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2135 }, 2136 { 2137 .str = "ether_pkts256to511octets", 2138 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2139 }, 2140 { 2141 .str = "ether_pkts512to1023octets", 2142 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2143 }, 2144 { 2145 .str = "ether_pkts1024to1518octets", 2146 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2147 }, 2148 { 2149 .str = "ether_pkts1519to2047octets", 2150 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2151 }, 2152 { 2153 .str = "ether_pkts2048to4095octets", 2154 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2155 }, 2156 { 2157 .str = "ether_pkts4096to8191octets", 2158 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2159 }, 2160 { 2161 .str = "ether_pkts8192to10239octets", 2162 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2163 }, 2164 }; 2165 2166 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2167 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2168 2169 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2170 { 2171 .str = "dot3stats_fcs_errors", 2172 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2173 }, 2174 { 2175 .str = "dot3stats_symbol_errors", 2176 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2177 }, 2178 { 2179 .str = "dot3control_in_unknown_opcodes", 2180 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2181 }, 2182 { 2183 .str = "dot3in_pause_frames", 2184 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2185 }, 2186 }; 2187 2188 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2189 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2190 2191 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2192 { 2193 .str = "discard_ingress_general", 2194 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2195 }, 2196 { 2197 .str = "discard_ingress_policy_engine", 2198 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2199 }, 2200 { 2201 .str = "discard_ingress_vlan_membership", 2202 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2203 }, 2204 { 2205 .str = "discard_ingress_tag_frame_type", 2206 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2207 }, 2208 { 2209 .str = "discard_egress_vlan_membership", 2210 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2211 }, 2212 { 2213 .str = "discard_loopback_filter", 2214 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2215 }, 2216 { 2217 .str = "discard_egress_general", 2218 .getter = mlxsw_reg_ppcnt_egress_general_get, 2219 }, 2220 { 2221 .str = "discard_egress_hoq", 2222 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2223 }, 2224 { 2225 .str = "discard_egress_policy_engine", 2226 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2227 }, 2228 { 2229 .str = "discard_ingress_tx_link_down", 2230 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2231 }, 2232 { 2233 .str = "discard_egress_stp_filter", 2234 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2235 }, 2236 { 2237 .str = "discard_egress_sll", 2238 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2239 }, 2240 }; 2241 2242 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2243 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2244 2245 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2246 { 2247 .str = "rx_octets_prio", 2248 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2249 }, 2250 { 2251 .str = "rx_frames_prio", 2252 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2253 }, 2254 { 2255 .str = "tx_octets_prio", 2256 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2257 }, 2258 { 2259 .str = "tx_frames_prio", 2260 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2261 }, 2262 { 2263 .str = "rx_pause_prio", 2264 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2265 }, 2266 { 2267 .str = "rx_pause_duration_prio", 2268 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2269 }, 2270 { 2271 .str = "tx_pause_prio", 2272 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2273 }, 2274 { 2275 .str = "tx_pause_duration_prio", 2276 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2277 }, 2278 }; 2279 2280 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2281 2282 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2283 { 2284 .str = "tc_transmit_queue_tc", 2285 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2286 .cells_bytes = true, 2287 }, 2288 { 2289 .str = "tc_no_buffer_discard_uc_tc", 2290 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2291 }, 2292 }; 2293 2294 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2295 2296 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2297 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2298 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2299 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2300 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2301 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2302 IEEE_8021QAZ_MAX_TCS) + \ 2303 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2304 TC_MAX_QUEUE)) 2305 2306 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2307 { 2308 int i; 2309 2310 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2311 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2312 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2313 *p += ETH_GSTRING_LEN; 2314 } 2315 } 2316 2317 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2318 { 2319 int i; 2320 2321 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2322 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2323 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2324 *p += ETH_GSTRING_LEN; 2325 } 2326 } 2327 2328 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2329 u32 stringset, u8 *data) 2330 { 2331 u8 *p = data; 2332 int i; 2333 2334 switch (stringset) { 2335 case ETH_SS_STATS: 2336 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2337 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2338 ETH_GSTRING_LEN); 2339 p += ETH_GSTRING_LEN; 2340 } 2341 2342 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2343 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2344 ETH_GSTRING_LEN); 2345 p += ETH_GSTRING_LEN; 2346 } 2347 2348 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2349 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2350 ETH_GSTRING_LEN); 2351 p += ETH_GSTRING_LEN; 2352 } 2353 2354 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2355 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2356 ETH_GSTRING_LEN); 2357 p += ETH_GSTRING_LEN; 2358 } 2359 2360 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2361 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2362 ETH_GSTRING_LEN); 2363 p += ETH_GSTRING_LEN; 2364 } 2365 2366 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2367 mlxsw_sp_port_get_prio_strings(&p, i); 2368 2369 for (i = 0; i < TC_MAX_QUEUE; i++) 2370 mlxsw_sp_port_get_tc_strings(&p, i); 2371 2372 break; 2373 } 2374 } 2375 2376 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2377 enum ethtool_phys_id_state state) 2378 { 2379 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2380 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2381 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2382 bool active; 2383 2384 switch (state) { 2385 case ETHTOOL_ID_ACTIVE: 2386 active = true; 2387 break; 2388 case ETHTOOL_ID_INACTIVE: 2389 active = false; 2390 break; 2391 default: 2392 return -EOPNOTSUPP; 2393 } 2394 2395 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2396 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2397 } 2398 2399 static int 2400 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2401 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2402 { 2403 switch (grp) { 2404 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2405 *p_hw_stats = mlxsw_sp_port_hw_stats; 2406 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2407 break; 2408 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2409 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2410 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2411 break; 2412 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2413 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2414 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2415 break; 2416 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2417 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2418 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2419 break; 2420 case MLXSW_REG_PPCNT_DISCARD_CNT: 2421 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2422 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2423 break; 2424 case MLXSW_REG_PPCNT_PRIO_CNT: 2425 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2426 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2427 break; 2428 case MLXSW_REG_PPCNT_TC_CNT: 2429 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2430 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2431 break; 2432 default: 2433 WARN_ON(1); 2434 return -EOPNOTSUPP; 2435 } 2436 return 0; 2437 } 2438 2439 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2440 enum mlxsw_reg_ppcnt_grp grp, int prio, 2441 u64 *data, int data_index) 2442 { 2443 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2444 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2445 struct mlxsw_sp_port_hw_stats *hw_stats; 2446 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2447 int i, len; 2448 int err; 2449 2450 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2451 if (err) 2452 return; 2453 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2454 for (i = 0; i < len; i++) { 2455 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2456 if (!hw_stats[i].cells_bytes) 2457 continue; 2458 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2459 data[data_index + i]); 2460 } 2461 } 2462 2463 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2464 struct ethtool_stats *stats, u64 *data) 2465 { 2466 int i, data_index = 0; 2467 2468 /* IEEE 802.3 Counters */ 2469 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2470 data, data_index); 2471 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2472 2473 /* RFC 2863 Counters */ 2474 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2475 data, data_index); 2476 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2477 2478 /* RFC 2819 Counters */ 2479 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2480 data, data_index); 2481 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2482 2483 /* RFC 3635 Counters */ 2484 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2485 data, data_index); 2486 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2487 2488 /* Discard Counters */ 2489 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2490 data, data_index); 2491 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2492 2493 /* Per-Priority Counters */ 2494 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2495 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2496 data, data_index); 2497 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2498 } 2499 2500 /* Per-TC Counters */ 2501 for (i = 0; i < TC_MAX_QUEUE; i++) { 2502 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2503 data, data_index); 2504 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2505 } 2506 } 2507 2508 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2509 { 2510 switch (sset) { 2511 case ETH_SS_STATS: 2512 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2513 default: 2514 return -EOPNOTSUPP; 2515 } 2516 } 2517 2518 struct mlxsw_sp1_port_link_mode { 2519 enum ethtool_link_mode_bit_indices mask_ethtool; 2520 u32 mask; 2521 u32 speed; 2522 }; 2523 2524 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2525 { 2526 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2527 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2528 .speed = SPEED_100, 2529 }, 2530 { 2531 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2532 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2533 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2534 .speed = SPEED_1000, 2535 }, 2536 { 2537 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2538 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2539 .speed = SPEED_10000, 2540 }, 2541 { 2542 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2543 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2544 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2545 .speed = SPEED_10000, 2546 }, 2547 { 2548 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2549 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2550 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2551 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2552 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2553 .speed = SPEED_10000, 2554 }, 2555 { 2556 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2557 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2558 .speed = SPEED_20000, 2559 }, 2560 { 2561 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2562 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2563 .speed = SPEED_40000, 2564 }, 2565 { 2566 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2567 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2568 .speed = SPEED_40000, 2569 }, 2570 { 2571 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2572 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2573 .speed = SPEED_40000, 2574 }, 2575 { 2576 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2577 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2578 .speed = SPEED_40000, 2579 }, 2580 { 2581 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2582 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2583 .speed = SPEED_25000, 2584 }, 2585 { 2586 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2587 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2588 .speed = SPEED_25000, 2589 }, 2590 { 2591 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2592 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2593 .speed = SPEED_25000, 2594 }, 2595 { 2596 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2597 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2598 .speed = SPEED_50000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2602 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2603 .speed = SPEED_50000, 2604 }, 2605 { 2606 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2607 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2608 .speed = SPEED_50000, 2609 }, 2610 { 2611 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2612 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2613 .speed = SPEED_56000, 2614 }, 2615 { 2616 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2617 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2618 .speed = SPEED_56000, 2619 }, 2620 { 2621 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2622 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2623 .speed = SPEED_56000, 2624 }, 2625 { 2626 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2627 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2628 .speed = SPEED_56000, 2629 }, 2630 { 2631 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2632 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2633 .speed = SPEED_100000, 2634 }, 2635 { 2636 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2637 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2638 .speed = SPEED_100000, 2639 }, 2640 { 2641 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2642 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2643 .speed = SPEED_100000, 2644 }, 2645 { 2646 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2647 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2648 .speed = SPEED_100000, 2649 }, 2650 }; 2651 2652 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2653 2654 static void 2655 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2656 u32 ptys_eth_proto, 2657 struct ethtool_link_ksettings *cmd) 2658 { 2659 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2660 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2661 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2662 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2663 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2664 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2665 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2666 2667 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2668 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2669 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2670 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2671 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2672 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2673 } 2674 2675 static void 2676 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2677 unsigned long *mode) 2678 { 2679 int i; 2680 2681 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2682 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2683 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2684 mode); 2685 } 2686 } 2687 2688 static u32 2689 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2690 { 2691 int i; 2692 2693 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2694 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2695 return mlxsw_sp1_port_link_mode[i].speed; 2696 } 2697 2698 return SPEED_UNKNOWN; 2699 } 2700 2701 static void 2702 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2703 u32 ptys_eth_proto, 2704 struct ethtool_link_ksettings *cmd) 2705 { 2706 cmd->base.speed = SPEED_UNKNOWN; 2707 cmd->base.duplex = DUPLEX_UNKNOWN; 2708 2709 if (!carrier_ok) 2710 return; 2711 2712 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2713 if (cmd->base.speed != SPEED_UNKNOWN) 2714 cmd->base.duplex = DUPLEX_FULL; 2715 } 2716 2717 static u32 2718 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2719 const struct ethtool_link_ksettings *cmd) 2720 { 2721 u32 ptys_proto = 0; 2722 int i; 2723 2724 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2725 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2726 cmd->link_modes.advertising)) 2727 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2728 } 2729 return ptys_proto; 2730 } 2731 2732 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2733 { 2734 u32 ptys_proto = 0; 2735 int i; 2736 2737 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2738 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2739 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2740 } 2741 return ptys_proto; 2742 } 2743 2744 static u32 2745 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2746 { 2747 u32 ptys_proto = 0; 2748 int i; 2749 2750 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2751 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2752 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2753 } 2754 return ptys_proto; 2755 } 2756 2757 static int 2758 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2759 u32 *base_speed) 2760 { 2761 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2762 return 0; 2763 } 2764 2765 static void 2766 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2767 u8 local_port, u32 proto_admin, bool autoneg) 2768 { 2769 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2770 } 2771 2772 static void 2773 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2774 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2775 u32 *p_eth_proto_oper) 2776 { 2777 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2778 p_eth_proto_oper); 2779 } 2780 2781 static const struct mlxsw_sp_port_type_speed_ops 2782 mlxsw_sp1_port_type_speed_ops = { 2783 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2784 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2785 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2786 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2787 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2788 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2789 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2790 .port_speed_base = mlxsw_sp1_port_speed_base, 2791 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2792 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2793 }; 2794 2795 static const enum ethtool_link_mode_bit_indices 2796 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2797 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2798 }; 2799 2800 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2801 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2802 2803 static const enum ethtool_link_mode_bit_indices 2804 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2805 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2806 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2807 }; 2808 2809 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2810 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2811 2812 static const enum ethtool_link_mode_bit_indices 2813 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2814 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2815 }; 2816 2817 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2818 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2819 2820 static const enum ethtool_link_mode_bit_indices 2821 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2822 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2823 }; 2824 2825 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2826 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2827 2828 static const enum ethtool_link_mode_bit_indices 2829 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2830 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2831 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2832 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2833 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2834 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2835 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2836 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2837 }; 2838 2839 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2840 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2841 2842 static const enum ethtool_link_mode_bit_indices 2843 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2844 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2845 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2846 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2847 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2848 }; 2849 2850 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2851 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2852 2853 static const enum ethtool_link_mode_bit_indices 2854 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2855 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2856 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2857 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2858 }; 2859 2860 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2861 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2862 2863 static const enum ethtool_link_mode_bit_indices 2864 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2865 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2866 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2867 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2868 }; 2869 2870 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2871 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2872 2873 static const enum ethtool_link_mode_bit_indices 2874 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2875 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2876 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2877 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2878 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2879 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2880 }; 2881 2882 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2883 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2884 2885 static const enum ethtool_link_mode_bit_indices 2886 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2887 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2888 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2889 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2890 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2891 }; 2892 2893 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2894 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2895 2896 static const enum ethtool_link_mode_bit_indices 2897 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2898 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2899 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2900 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2901 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2902 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2903 }; 2904 2905 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2906 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2907 2908 static const enum ethtool_link_mode_bit_indices 2909 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2910 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2911 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2912 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2913 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2914 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2915 }; 2916 2917 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2918 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2919 2920 struct mlxsw_sp2_port_link_mode { 2921 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2922 int m_ethtool_len; 2923 u32 mask; 2924 u32 speed; 2925 }; 2926 2927 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2928 { 2929 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2930 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2931 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2932 .speed = SPEED_100, 2933 }, 2934 { 2935 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2936 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2937 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2938 .speed = SPEED_1000, 2939 }, 2940 { 2941 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2942 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2943 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2944 .speed = SPEED_2500, 2945 }, 2946 { 2947 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2948 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2949 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2950 .speed = SPEED_5000, 2951 }, 2952 { 2953 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2954 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2955 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2956 .speed = SPEED_10000, 2957 }, 2958 { 2959 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2960 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2961 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2962 .speed = SPEED_40000, 2963 }, 2964 { 2965 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2966 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2967 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2968 .speed = SPEED_25000, 2969 }, 2970 { 2971 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2972 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2973 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2974 .speed = SPEED_50000, 2975 }, 2976 { 2977 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2978 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2979 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2980 .speed = SPEED_50000, 2981 }, 2982 { 2983 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2984 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2985 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2986 .speed = SPEED_100000, 2987 }, 2988 { 2989 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2990 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2991 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2992 .speed = SPEED_100000, 2993 }, 2994 { 2995 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2996 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2997 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2998 .speed = SPEED_200000, 2999 }, 3000 }; 3001 3002 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 3003 3004 static void 3005 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3006 u32 ptys_eth_proto, 3007 struct ethtool_link_ksettings *cmd) 3008 { 3009 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3010 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3011 } 3012 3013 static void 3014 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3015 unsigned long *mode) 3016 { 3017 int i; 3018 3019 for (i = 0; i < link_mode->m_ethtool_len; i++) 3020 __set_bit(link_mode->mask_ethtool[i], mode); 3021 } 3022 3023 static void 3024 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3025 unsigned long *mode) 3026 { 3027 int i; 3028 3029 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3030 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3031 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3032 mode); 3033 } 3034 } 3035 3036 static u32 3037 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3038 { 3039 int i; 3040 3041 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3042 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3043 return mlxsw_sp2_port_link_mode[i].speed; 3044 } 3045 3046 return SPEED_UNKNOWN; 3047 } 3048 3049 static void 3050 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3051 u32 ptys_eth_proto, 3052 struct ethtool_link_ksettings *cmd) 3053 { 3054 cmd->base.speed = SPEED_UNKNOWN; 3055 cmd->base.duplex = DUPLEX_UNKNOWN; 3056 3057 if (!carrier_ok) 3058 return; 3059 3060 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3061 if (cmd->base.speed != SPEED_UNKNOWN) 3062 cmd->base.duplex = DUPLEX_FULL; 3063 } 3064 3065 static bool 3066 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3067 const unsigned long *mode) 3068 { 3069 int cnt = 0; 3070 int i; 3071 3072 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3073 if (test_bit(link_mode->mask_ethtool[i], mode)) 3074 cnt++; 3075 } 3076 3077 return cnt == link_mode->m_ethtool_len; 3078 } 3079 3080 static u32 3081 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3082 const struct ethtool_link_ksettings *cmd) 3083 { 3084 u32 ptys_proto = 0; 3085 int i; 3086 3087 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3088 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3089 cmd->link_modes.advertising)) 3090 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3091 } 3092 return ptys_proto; 3093 } 3094 3095 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3096 { 3097 u32 ptys_proto = 0; 3098 int i; 3099 3100 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3101 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3102 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3103 } 3104 return ptys_proto; 3105 } 3106 3107 static u32 3108 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3109 { 3110 u32 ptys_proto = 0; 3111 int i; 3112 3113 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3114 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3115 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3116 } 3117 return ptys_proto; 3118 } 3119 3120 static int 3121 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3122 u32 *base_speed) 3123 { 3124 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3125 u32 eth_proto_cap; 3126 int err; 3127 3128 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3129 * it from firmware. 3130 */ 3131 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3132 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3133 if (err) 3134 return err; 3135 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3136 3137 if (eth_proto_cap & 3138 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3139 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3140 return 0; 3141 } 3142 3143 if (eth_proto_cap & 3144 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3145 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3146 return 0; 3147 } 3148 3149 return -EIO; 3150 } 3151 3152 static void 3153 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3154 u8 local_port, u32 proto_admin, 3155 bool autoneg) 3156 { 3157 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3158 } 3159 3160 static void 3161 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3162 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3163 u32 *p_eth_proto_oper) 3164 { 3165 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3166 p_eth_proto_admin, p_eth_proto_oper); 3167 } 3168 3169 static const struct mlxsw_sp_port_type_speed_ops 3170 mlxsw_sp2_port_type_speed_ops = { 3171 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3172 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3173 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3174 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3175 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3176 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3177 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3178 .port_speed_base = mlxsw_sp2_port_speed_base, 3179 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3180 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3181 }; 3182 3183 static void 3184 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3185 struct ethtool_link_ksettings *cmd) 3186 { 3187 const struct mlxsw_sp_port_type_speed_ops *ops; 3188 3189 ops = mlxsw_sp->port_type_speed_ops; 3190 3191 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3192 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3193 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3194 3195 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3196 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3197 } 3198 3199 static void 3200 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3201 u32 eth_proto_admin, bool autoneg, 3202 struct ethtool_link_ksettings *cmd) 3203 { 3204 const struct mlxsw_sp_port_type_speed_ops *ops; 3205 3206 ops = mlxsw_sp->port_type_speed_ops; 3207 3208 if (!autoneg) 3209 return; 3210 3211 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3212 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3213 cmd->link_modes.advertising); 3214 } 3215 3216 static u8 3217 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3218 { 3219 switch (connector_type) { 3220 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3221 return PORT_OTHER; 3222 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3223 return PORT_NONE; 3224 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3225 return PORT_TP; 3226 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3227 return PORT_AUI; 3228 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3229 return PORT_BNC; 3230 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3231 return PORT_MII; 3232 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3233 return PORT_FIBRE; 3234 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3235 return PORT_DA; 3236 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3237 return PORT_OTHER; 3238 default: 3239 WARN_ON_ONCE(1); 3240 return PORT_OTHER; 3241 } 3242 } 3243 3244 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3245 struct ethtool_link_ksettings *cmd) 3246 { 3247 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3248 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3250 const struct mlxsw_sp_port_type_speed_ops *ops; 3251 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3252 u8 connector_type; 3253 bool autoneg; 3254 int err; 3255 3256 ops = mlxsw_sp->port_type_speed_ops; 3257 3258 autoneg = mlxsw_sp_port->link.autoneg; 3259 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3260 0, false); 3261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3262 if (err) 3263 return err; 3264 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3265 ð_proto_admin, ð_proto_oper); 3266 3267 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3268 3269 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3270 cmd); 3271 3272 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3273 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3274 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3275 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3276 eth_proto_oper, cmd); 3277 3278 return 0; 3279 } 3280 3281 static int 3282 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3283 const struct ethtool_link_ksettings *cmd) 3284 { 3285 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3286 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3287 const struct mlxsw_sp_port_type_speed_ops *ops; 3288 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3289 u32 eth_proto_cap, eth_proto_new; 3290 bool autoneg; 3291 int err; 3292 3293 ops = mlxsw_sp->port_type_speed_ops; 3294 3295 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3296 0, false); 3297 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3298 if (err) 3299 return err; 3300 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3301 3302 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3303 if (!autoneg && cmd->base.speed == SPEED_56000) { 3304 netdev_err(dev, "56G not supported with autoneg off\n"); 3305 return -EINVAL; 3306 } 3307 eth_proto_new = autoneg ? 3308 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3309 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3310 3311 eth_proto_new = eth_proto_new & eth_proto_cap; 3312 if (!eth_proto_new) { 3313 netdev_err(dev, "No supported speed requested\n"); 3314 return -EINVAL; 3315 } 3316 3317 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3318 eth_proto_new, autoneg); 3319 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3320 if (err) 3321 return err; 3322 3323 mlxsw_sp_port->link.autoneg = autoneg; 3324 3325 if (!netif_running(dev)) 3326 return 0; 3327 3328 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3329 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3330 3331 return 0; 3332 } 3333 3334 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3335 struct ethtool_modinfo *modinfo) 3336 { 3337 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3338 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3339 int err; 3340 3341 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3342 mlxsw_sp_port->mapping.module, 3343 modinfo); 3344 3345 return err; 3346 } 3347 3348 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3349 struct ethtool_eeprom *ee, 3350 u8 *data) 3351 { 3352 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3353 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3354 int err; 3355 3356 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3357 mlxsw_sp_port->mapping.module, ee, 3358 data); 3359 3360 return err; 3361 } 3362 3363 static int 3364 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3365 { 3366 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3367 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3368 3369 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3370 } 3371 3372 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3373 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3374 .get_link = ethtool_op_get_link, 3375 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3376 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3377 .get_strings = mlxsw_sp_port_get_strings, 3378 .set_phys_id = mlxsw_sp_port_set_phys_id, 3379 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3380 .get_sset_count = mlxsw_sp_port_get_sset_count, 3381 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3382 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3383 .get_module_info = mlxsw_sp_get_module_info, 3384 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3385 .get_ts_info = mlxsw_sp_get_ts_info, 3386 }; 3387 3388 static int 3389 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3390 { 3391 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3392 const struct mlxsw_sp_port_type_speed_ops *ops; 3393 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3394 u32 eth_proto_admin; 3395 u32 upper_speed; 3396 u32 base_speed; 3397 int err; 3398 3399 ops = mlxsw_sp->port_type_speed_ops; 3400 3401 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3402 &base_speed); 3403 if (err) 3404 return err; 3405 upper_speed = base_speed * width; 3406 3407 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3408 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3409 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3410 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3411 } 3412 3413 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3414 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3415 bool dwrr, u8 dwrr_weight) 3416 { 3417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3418 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3419 3420 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3421 next_index); 3422 mlxsw_reg_qeec_de_set(qeec_pl, true); 3423 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3424 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3426 } 3427 3428 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3429 enum mlxsw_reg_qeec_hr hr, u8 index, 3430 u8 next_index, u32 maxrate) 3431 { 3432 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3433 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3434 3435 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3436 next_index); 3437 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3438 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3440 } 3441 3442 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3443 enum mlxsw_reg_qeec_hr hr, u8 index, 3444 u8 next_index, u32 minrate) 3445 { 3446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3447 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3448 3449 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3450 next_index); 3451 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3452 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3453 3454 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3455 } 3456 3457 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3458 u8 switch_prio, u8 tclass) 3459 { 3460 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3461 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3462 3463 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3464 tclass); 3465 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3466 } 3467 3468 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3469 { 3470 int err, i; 3471 3472 /* Setup the elements hierarcy, so that each TC is linked to 3473 * one subgroup, which are all member in the same group. 3474 */ 3475 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3476 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3477 0); 3478 if (err) 3479 return err; 3480 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3481 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3482 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3483 0, false, 0); 3484 if (err) 3485 return err; 3486 } 3487 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3488 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3489 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3490 false, 0); 3491 if (err) 3492 return err; 3493 3494 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3495 MLXSW_REG_QEEC_HIERARCY_TC, 3496 i + 8, i, 3497 true, 100); 3498 if (err) 3499 return err; 3500 } 3501 3502 /* Make sure the max shaper is disabled in all hierarchies that support 3503 * it. Note that this disables ptps (PTP shaper), but that is intended 3504 * for the initial configuration. 3505 */ 3506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3507 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3508 MLXSW_REG_QEEC_MAS_DIS); 3509 if (err) 3510 return err; 3511 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3512 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3513 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3514 i, 0, 3515 MLXSW_REG_QEEC_MAS_DIS); 3516 if (err) 3517 return err; 3518 } 3519 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3520 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3521 MLXSW_REG_QEEC_HIERARCY_TC, 3522 i, i, 3523 MLXSW_REG_QEEC_MAS_DIS); 3524 if (err) 3525 return err; 3526 3527 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3528 MLXSW_REG_QEEC_HIERARCY_TC, 3529 i + 8, i, 3530 MLXSW_REG_QEEC_MAS_DIS); 3531 if (err) 3532 return err; 3533 } 3534 3535 /* Configure the min shaper for multicast TCs. */ 3536 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3537 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3538 MLXSW_REG_QEEC_HIERARCY_TC, 3539 i + 8, i, 3540 MLXSW_REG_QEEC_MIS_MIN); 3541 if (err) 3542 return err; 3543 } 3544 3545 /* Map all priorities to traffic class 0. */ 3546 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3547 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3548 if (err) 3549 return err; 3550 } 3551 3552 return 0; 3553 } 3554 3555 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3556 bool enable) 3557 { 3558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3559 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3560 3561 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3563 } 3564 3565 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3566 bool split, u8 module, u8 width, u8 lane) 3567 { 3568 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3569 struct mlxsw_sp_port *mlxsw_sp_port; 3570 struct net_device *dev; 3571 int err; 3572 3573 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3574 module + 1, split, lane / width, 3575 mlxsw_sp->base_mac, 3576 sizeof(mlxsw_sp->base_mac)); 3577 if (err) { 3578 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3579 local_port); 3580 return err; 3581 } 3582 3583 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3584 if (!dev) { 3585 err = -ENOMEM; 3586 goto err_alloc_etherdev; 3587 } 3588 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3589 mlxsw_sp_port = netdev_priv(dev); 3590 mlxsw_sp_port->dev = dev; 3591 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3592 mlxsw_sp_port->local_port = local_port; 3593 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3594 mlxsw_sp_port->split = split; 3595 mlxsw_sp_port->mapping.module = module; 3596 mlxsw_sp_port->mapping.width = width; 3597 mlxsw_sp_port->mapping.lane = lane; 3598 mlxsw_sp_port->link.autoneg = 1; 3599 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3600 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3601 3602 mlxsw_sp_port->pcpu_stats = 3603 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3604 if (!mlxsw_sp_port->pcpu_stats) { 3605 err = -ENOMEM; 3606 goto err_alloc_stats; 3607 } 3608 3609 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3610 GFP_KERNEL); 3611 if (!mlxsw_sp_port->sample) { 3612 err = -ENOMEM; 3613 goto err_alloc_sample; 3614 } 3615 3616 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3617 &update_stats_cache); 3618 3619 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3620 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3621 3622 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3623 if (err) { 3624 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3625 mlxsw_sp_port->local_port); 3626 goto err_port_module_map; 3627 } 3628 3629 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3630 if (err) { 3631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3632 mlxsw_sp_port->local_port); 3633 goto err_port_swid_set; 3634 } 3635 3636 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3637 if (err) { 3638 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3639 mlxsw_sp_port->local_port); 3640 goto err_dev_addr_init; 3641 } 3642 3643 netif_carrier_off(dev); 3644 3645 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3646 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3647 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3648 3649 dev->min_mtu = 0; 3650 dev->max_mtu = ETH_MAX_MTU; 3651 3652 /* Each packet needs to have a Tx header (metadata) on top all other 3653 * headers. 3654 */ 3655 dev->needed_headroom = MLXSW_TXHDR_LEN; 3656 3657 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3658 if (err) { 3659 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3660 mlxsw_sp_port->local_port); 3661 goto err_port_system_port_mapping_set; 3662 } 3663 3664 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3665 if (err) { 3666 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3667 mlxsw_sp_port->local_port); 3668 goto err_port_speed_by_width_set; 3669 } 3670 3671 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3672 if (err) { 3673 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3674 mlxsw_sp_port->local_port); 3675 goto err_port_mtu_set; 3676 } 3677 3678 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3679 if (err) 3680 goto err_port_admin_status_set; 3681 3682 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3683 if (err) { 3684 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3685 mlxsw_sp_port->local_port); 3686 goto err_port_buffers_init; 3687 } 3688 3689 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3690 if (err) { 3691 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3692 mlxsw_sp_port->local_port); 3693 goto err_port_ets_init; 3694 } 3695 3696 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3697 if (err) { 3698 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3699 mlxsw_sp_port->local_port); 3700 goto err_port_tc_mc_mode; 3701 } 3702 3703 /* ETS and buffers must be initialized before DCB. */ 3704 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3705 if (err) { 3706 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3707 mlxsw_sp_port->local_port); 3708 goto err_port_dcb_init; 3709 } 3710 3711 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3712 if (err) { 3713 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3714 mlxsw_sp_port->local_port); 3715 goto err_port_fids_init; 3716 } 3717 3718 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3719 if (err) { 3720 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3721 mlxsw_sp_port->local_port); 3722 goto err_port_qdiscs_init; 3723 } 3724 3725 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3726 if (err) { 3727 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3728 mlxsw_sp_port->local_port); 3729 goto err_port_nve_init; 3730 } 3731 3732 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3733 if (err) { 3734 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3735 mlxsw_sp_port->local_port); 3736 goto err_port_pvid_set; 3737 } 3738 3739 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3740 MLXSW_SP_DEFAULT_VID); 3741 if (IS_ERR(mlxsw_sp_port_vlan)) { 3742 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3743 mlxsw_sp_port->local_port); 3744 err = PTR_ERR(mlxsw_sp_port_vlan); 3745 goto err_port_vlan_create; 3746 } 3747 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3748 3749 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3750 mlxsw_sp->ptp_ops->shaper_work); 3751 3752 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3753 err = register_netdev(dev); 3754 if (err) { 3755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3756 mlxsw_sp_port->local_port); 3757 goto err_register_netdev; 3758 } 3759 3760 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3761 mlxsw_sp_port, dev); 3762 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3763 return 0; 3764 3765 err_register_netdev: 3766 mlxsw_sp->ports[local_port] = NULL; 3767 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3768 err_port_vlan_create: 3769 err_port_pvid_set: 3770 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3771 err_port_nve_init: 3772 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3773 err_port_qdiscs_init: 3774 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3775 err_port_fids_init: 3776 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3777 err_port_dcb_init: 3778 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3779 err_port_tc_mc_mode: 3780 err_port_ets_init: 3781 err_port_buffers_init: 3782 err_port_admin_status_set: 3783 err_port_mtu_set: 3784 err_port_speed_by_width_set: 3785 err_port_system_port_mapping_set: 3786 err_dev_addr_init: 3787 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3788 err_port_swid_set: 3789 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3790 err_port_module_map: 3791 kfree(mlxsw_sp_port->sample); 3792 err_alloc_sample: 3793 free_percpu(mlxsw_sp_port->pcpu_stats); 3794 err_alloc_stats: 3795 free_netdev(dev); 3796 err_alloc_etherdev: 3797 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3798 return err; 3799 } 3800 3801 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3802 { 3803 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3804 3805 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3806 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3807 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3808 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3809 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3810 mlxsw_sp->ports[local_port] = NULL; 3811 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3812 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3813 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3814 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3815 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3816 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3817 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3818 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3819 kfree(mlxsw_sp_port->sample); 3820 free_percpu(mlxsw_sp_port->pcpu_stats); 3821 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3822 free_netdev(mlxsw_sp_port->dev); 3823 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3824 } 3825 3826 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3827 { 3828 return mlxsw_sp->ports[local_port] != NULL; 3829 } 3830 3831 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3832 { 3833 int i; 3834 3835 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3836 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3837 mlxsw_sp_port_remove(mlxsw_sp, i); 3838 kfree(mlxsw_sp->port_to_module); 3839 kfree(mlxsw_sp->ports); 3840 } 3841 3842 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3843 { 3844 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3845 u8 module, width, lane; 3846 size_t alloc_size; 3847 int i; 3848 int err; 3849 3850 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3851 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3852 if (!mlxsw_sp->ports) 3853 return -ENOMEM; 3854 3855 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3856 GFP_KERNEL); 3857 if (!mlxsw_sp->port_to_module) { 3858 err = -ENOMEM; 3859 goto err_port_to_module_alloc; 3860 } 3861 3862 for (i = 1; i < max_ports; i++) { 3863 /* Mark as invalid */ 3864 mlxsw_sp->port_to_module[i] = -1; 3865 3866 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3867 &width, &lane); 3868 if (err) 3869 goto err_port_module_info_get; 3870 if (!width) 3871 continue; 3872 mlxsw_sp->port_to_module[i] = module; 3873 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3874 module, width, lane); 3875 if (err) 3876 goto err_port_create; 3877 } 3878 return 0; 3879 3880 err_port_create: 3881 err_port_module_info_get: 3882 for (i--; i >= 1; i--) 3883 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3884 mlxsw_sp_port_remove(mlxsw_sp, i); 3885 kfree(mlxsw_sp->port_to_module); 3886 err_port_to_module_alloc: 3887 kfree(mlxsw_sp->ports); 3888 return err; 3889 } 3890 3891 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3892 { 3893 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3894 3895 return local_port - offset; 3896 } 3897 3898 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3899 u8 module, unsigned int count, u8 offset) 3900 { 3901 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3902 int err, i; 3903 3904 for (i = 0; i < count; i++) { 3905 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3906 true, module, width, i * width); 3907 if (err) 3908 goto err_port_create; 3909 } 3910 3911 return 0; 3912 3913 err_port_create: 3914 for (i--; i >= 0; i--) 3915 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3916 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3917 return err; 3918 } 3919 3920 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3921 u8 base_port, unsigned int count) 3922 { 3923 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3924 int i; 3925 3926 /* Split by four means we need to re-create two ports, otherwise 3927 * only one. 3928 */ 3929 count = count / 2; 3930 3931 for (i = 0; i < count; i++) { 3932 local_port = base_port + i * 2; 3933 if (mlxsw_sp->port_to_module[local_port] < 0) 3934 continue; 3935 module = mlxsw_sp->port_to_module[local_port]; 3936 3937 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3938 width, 0); 3939 } 3940 } 3941 3942 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3943 unsigned int count, 3944 struct netlink_ext_ack *extack) 3945 { 3946 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3947 u8 local_ports_in_1x, local_ports_in_2x, offset; 3948 struct mlxsw_sp_port *mlxsw_sp_port; 3949 u8 module, cur_width, base_port; 3950 int i; 3951 int err; 3952 3953 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3954 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3955 return -EIO; 3956 3957 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3958 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3959 3960 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3961 if (!mlxsw_sp_port) { 3962 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3963 local_port); 3964 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3965 return -EINVAL; 3966 } 3967 3968 module = mlxsw_sp_port->mapping.module; 3969 cur_width = mlxsw_sp_port->mapping.width; 3970 3971 if (count != 2 && count != 4) { 3972 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3973 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3974 return -EINVAL; 3975 } 3976 3977 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3978 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3979 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3980 return -EINVAL; 3981 } 3982 3983 /* Make sure we have enough slave (even) ports for the split. */ 3984 if (count == 2) { 3985 offset = local_ports_in_2x; 3986 base_port = local_port; 3987 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3988 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3989 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3990 return -EINVAL; 3991 } 3992 } else { 3993 offset = local_ports_in_1x; 3994 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3995 if (mlxsw_sp->ports[base_port + 1] || 3996 mlxsw_sp->ports[base_port + 3]) { 3997 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3998 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3999 return -EINVAL; 4000 } 4001 } 4002 4003 for (i = 0; i < count; i++) 4004 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4005 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4006 4007 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4008 offset); 4009 if (err) { 4010 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4011 goto err_port_split_create; 4012 } 4013 4014 return 0; 4015 4016 err_port_split_create: 4017 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4018 return err; 4019 } 4020 4021 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4022 struct netlink_ext_ack *extack) 4023 { 4024 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4025 u8 local_ports_in_1x, local_ports_in_2x, offset; 4026 struct mlxsw_sp_port *mlxsw_sp_port; 4027 u8 cur_width, base_port; 4028 unsigned int count; 4029 int i; 4030 4031 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4032 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4033 return -EIO; 4034 4035 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4036 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4037 4038 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4039 if (!mlxsw_sp_port) { 4040 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4041 local_port); 4042 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4043 return -EINVAL; 4044 } 4045 4046 if (!mlxsw_sp_port->split) { 4047 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4048 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4049 return -EINVAL; 4050 } 4051 4052 cur_width = mlxsw_sp_port->mapping.width; 4053 count = cur_width == 1 ? 4 : 2; 4054 4055 if (count == 2) 4056 offset = local_ports_in_2x; 4057 else 4058 offset = local_ports_in_1x; 4059 4060 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4061 4062 /* Determine which ports to remove. */ 4063 if (count == 2 && local_port >= base_port + 2) 4064 base_port = base_port + 2; 4065 4066 for (i = 0; i < count; i++) 4067 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4068 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4069 4070 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4071 4072 return 0; 4073 } 4074 4075 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4076 char *pude_pl, void *priv) 4077 { 4078 struct mlxsw_sp *mlxsw_sp = priv; 4079 struct mlxsw_sp_port *mlxsw_sp_port; 4080 enum mlxsw_reg_pude_oper_status status; 4081 u8 local_port; 4082 4083 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4084 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4085 if (!mlxsw_sp_port) 4086 return; 4087 4088 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4089 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4090 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4091 netif_carrier_on(mlxsw_sp_port->dev); 4092 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4093 } else { 4094 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4095 netif_carrier_off(mlxsw_sp_port->dev); 4096 } 4097 } 4098 4099 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4100 char *mtpptr_pl, bool ingress) 4101 { 4102 u8 local_port; 4103 u8 num_rec; 4104 int i; 4105 4106 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4107 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4108 for (i = 0; i < num_rec; i++) { 4109 u8 domain_number; 4110 u8 message_type; 4111 u16 sequence_id; 4112 u64 timestamp; 4113 4114 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4115 &domain_number, &sequence_id, 4116 ×tamp); 4117 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4118 message_type, domain_number, 4119 sequence_id, timestamp); 4120 } 4121 } 4122 4123 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4124 char *mtpptr_pl, void *priv) 4125 { 4126 struct mlxsw_sp *mlxsw_sp = priv; 4127 4128 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4129 } 4130 4131 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4132 char *mtpptr_pl, void *priv) 4133 { 4134 struct mlxsw_sp *mlxsw_sp = priv; 4135 4136 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4137 } 4138 4139 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4140 u8 local_port, void *priv) 4141 { 4142 struct mlxsw_sp *mlxsw_sp = priv; 4143 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4144 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4145 4146 if (unlikely(!mlxsw_sp_port)) { 4147 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4148 local_port); 4149 return; 4150 } 4151 4152 skb->dev = mlxsw_sp_port->dev; 4153 4154 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4155 u64_stats_update_begin(&pcpu_stats->syncp); 4156 pcpu_stats->rx_packets++; 4157 pcpu_stats->rx_bytes += skb->len; 4158 u64_stats_update_end(&pcpu_stats->syncp); 4159 4160 skb->protocol = eth_type_trans(skb, skb->dev); 4161 netif_receive_skb(skb); 4162 } 4163 4164 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4165 void *priv) 4166 { 4167 skb->offload_fwd_mark = 1; 4168 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4169 } 4170 4171 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4172 u8 local_port, void *priv) 4173 { 4174 skb->offload_l3_fwd_mark = 1; 4175 skb->offload_fwd_mark = 1; 4176 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4177 } 4178 4179 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4180 void *priv) 4181 { 4182 struct mlxsw_sp *mlxsw_sp = priv; 4183 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4184 struct psample_group *psample_group; 4185 u32 size; 4186 4187 if (unlikely(!mlxsw_sp_port)) { 4188 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4189 local_port); 4190 goto out; 4191 } 4192 if (unlikely(!mlxsw_sp_port->sample)) { 4193 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4194 local_port); 4195 goto out; 4196 } 4197 4198 size = mlxsw_sp_port->sample->truncate ? 4199 mlxsw_sp_port->sample->trunc_size : skb->len; 4200 4201 rcu_read_lock(); 4202 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4203 if (!psample_group) 4204 goto out_unlock; 4205 psample_sample_packet(psample_group, skb, size, 4206 mlxsw_sp_port->dev->ifindex, 0, 4207 mlxsw_sp_port->sample->rate); 4208 out_unlock: 4209 rcu_read_unlock(); 4210 out: 4211 consume_skb(skb); 4212 } 4213 4214 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4215 void *priv) 4216 { 4217 struct mlxsw_sp *mlxsw_sp = priv; 4218 4219 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4220 } 4221 4222 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4223 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4224 _is_ctrl, SP_##_trap_group, DISCARD) 4225 4226 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4227 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4228 _is_ctrl, SP_##_trap_group, DISCARD) 4229 4230 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4231 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4232 _is_ctrl, SP_##_trap_group, DISCARD) 4233 4234 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4235 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4236 4237 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4238 /* Events */ 4239 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4240 /* L2 traps */ 4241 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4242 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4243 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4244 false, SP_LLDP, DISCARD), 4245 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4246 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4247 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4248 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4249 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4250 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4251 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4252 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4253 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4254 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4255 false), 4256 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4257 false), 4258 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4259 false), 4260 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4261 false), 4262 /* L3 traps */ 4263 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4264 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4265 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4266 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4267 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4268 false), 4269 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4270 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4271 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4272 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4273 false), 4274 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4275 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4276 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4277 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4278 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4279 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4280 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4281 false), 4282 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4283 false), 4284 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4285 false), 4286 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4287 false), 4288 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4289 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4290 false), 4291 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4292 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4293 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4294 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4295 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4296 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4297 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4298 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4299 /* PKT Sample trap */ 4300 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4301 false, SP_IP2ME, DISCARD), 4302 /* ACL trap */ 4303 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4304 /* Multicast Router Traps */ 4305 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4306 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4307 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4308 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4309 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4310 /* NVE traps */ 4311 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4312 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4313 /* PTP traps */ 4314 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4315 false, SP_PTP0, DISCARD), 4316 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4317 }; 4318 4319 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4320 /* Events */ 4321 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4322 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4323 }; 4324 4325 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4326 { 4327 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4328 enum mlxsw_reg_qpcr_ir_units ir_units; 4329 int max_cpu_policers; 4330 bool is_bytes; 4331 u8 burst_size; 4332 u32 rate; 4333 int i, err; 4334 4335 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4336 return -EIO; 4337 4338 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4339 4340 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4341 for (i = 0; i < max_cpu_policers; i++) { 4342 is_bytes = false; 4343 switch (i) { 4344 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4348 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4350 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4351 rate = 128; 4352 burst_size = 7; 4353 break; 4354 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4355 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4356 rate = 16 * 1024; 4357 burst_size = 10; 4358 break; 4359 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4360 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4361 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4362 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4363 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4364 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4365 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4366 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4367 rate = 1024; 4368 burst_size = 7; 4369 break; 4370 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4371 rate = 1024; 4372 burst_size = 7; 4373 break; 4374 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4375 rate = 24 * 1024; 4376 burst_size = 12; 4377 break; 4378 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4379 rate = 19 * 1024; 4380 burst_size = 12; 4381 break; 4382 default: 4383 continue; 4384 } 4385 4386 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4387 burst_size); 4388 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4389 if (err) 4390 return err; 4391 } 4392 4393 return 0; 4394 } 4395 4396 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4397 { 4398 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4399 enum mlxsw_reg_htgt_trap_group i; 4400 int max_cpu_policers; 4401 int max_trap_groups; 4402 u8 priority, tc; 4403 u16 policer_id; 4404 int err; 4405 4406 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4407 return -EIO; 4408 4409 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4410 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4411 4412 for (i = 0; i < max_trap_groups; i++) { 4413 policer_id = i; 4414 switch (i) { 4415 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4417 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4418 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4419 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4420 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4421 priority = 5; 4422 tc = 5; 4423 break; 4424 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4425 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4426 priority = 4; 4427 tc = 4; 4428 break; 4429 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4432 priority = 3; 4433 tc = 3; 4434 break; 4435 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4436 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4439 priority = 2; 4440 tc = 2; 4441 break; 4442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4446 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4447 priority = 1; 4448 tc = 1; 4449 break; 4450 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4451 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4452 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4453 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4454 break; 4455 default: 4456 continue; 4457 } 4458 4459 if (max_cpu_policers <= policer_id && 4460 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4461 return -EIO; 4462 4463 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4464 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4465 if (err) 4466 return err; 4467 } 4468 4469 return 0; 4470 } 4471 4472 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4473 const struct mlxsw_listener listeners[], 4474 size_t listeners_count) 4475 { 4476 int i; 4477 int err; 4478 4479 for (i = 0; i < listeners_count; i++) { 4480 err = mlxsw_core_trap_register(mlxsw_sp->core, 4481 &listeners[i], 4482 mlxsw_sp); 4483 if (err) 4484 goto err_listener_register; 4485 4486 } 4487 return 0; 4488 4489 err_listener_register: 4490 for (i--; i >= 0; i--) { 4491 mlxsw_core_trap_unregister(mlxsw_sp->core, 4492 &listeners[i], 4493 mlxsw_sp); 4494 } 4495 return err; 4496 } 4497 4498 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4499 const struct mlxsw_listener listeners[], 4500 size_t listeners_count) 4501 { 4502 int i; 4503 4504 for (i = 0; i < listeners_count; i++) { 4505 mlxsw_core_trap_unregister(mlxsw_sp->core, 4506 &listeners[i], 4507 mlxsw_sp); 4508 } 4509 } 4510 4511 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4512 { 4513 int err; 4514 4515 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4516 if (err) 4517 return err; 4518 4519 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4520 if (err) 4521 return err; 4522 4523 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4524 ARRAY_SIZE(mlxsw_sp_listener)); 4525 if (err) 4526 return err; 4527 4528 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4529 mlxsw_sp->listeners_count); 4530 if (err) 4531 goto err_extra_traps_init; 4532 4533 return 0; 4534 4535 err_extra_traps_init: 4536 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4537 ARRAY_SIZE(mlxsw_sp_listener)); 4538 return err; 4539 } 4540 4541 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4542 { 4543 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4544 mlxsw_sp->listeners_count); 4545 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4546 ARRAY_SIZE(mlxsw_sp_listener)); 4547 } 4548 4549 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4550 4551 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4552 { 4553 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4554 u32 seed; 4555 int err; 4556 4557 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4558 MLXSW_SP_LAG_SEED_INIT); 4559 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4560 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4561 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4562 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4563 MLXSW_REG_SLCR_LAG_HASH_SIP | 4564 MLXSW_REG_SLCR_LAG_HASH_DIP | 4565 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4566 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4567 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4568 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4569 if (err) 4570 return err; 4571 4572 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4573 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4574 return -EIO; 4575 4576 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4577 sizeof(struct mlxsw_sp_upper), 4578 GFP_KERNEL); 4579 if (!mlxsw_sp->lags) 4580 return -ENOMEM; 4581 4582 return 0; 4583 } 4584 4585 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4586 { 4587 kfree(mlxsw_sp->lags); 4588 } 4589 4590 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4591 { 4592 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4593 4594 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4595 MLXSW_REG_HTGT_INVALID_POLICER, 4596 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4597 MLXSW_REG_HTGT_DEFAULT_TC); 4598 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4599 } 4600 4601 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4602 .clock_init = mlxsw_sp1_ptp_clock_init, 4603 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4604 .init = mlxsw_sp1_ptp_init, 4605 .fini = mlxsw_sp1_ptp_fini, 4606 .receive = mlxsw_sp1_ptp_receive, 4607 .transmitted = mlxsw_sp1_ptp_transmitted, 4608 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4609 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4610 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4611 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4612 }; 4613 4614 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4615 .clock_init = mlxsw_sp2_ptp_clock_init, 4616 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4617 .init = mlxsw_sp2_ptp_init, 4618 .fini = mlxsw_sp2_ptp_fini, 4619 .receive = mlxsw_sp2_ptp_receive, 4620 .transmitted = mlxsw_sp2_ptp_transmitted, 4621 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4622 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4623 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4624 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4625 }; 4626 4627 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4628 unsigned long event, void *ptr); 4629 4630 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4631 const struct mlxsw_bus_info *mlxsw_bus_info) 4632 { 4633 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4634 int err; 4635 4636 mlxsw_sp->core = mlxsw_core; 4637 mlxsw_sp->bus_info = mlxsw_bus_info; 4638 4639 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4640 if (err) 4641 return err; 4642 4643 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4644 if (err) { 4645 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4646 return err; 4647 } 4648 4649 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4650 if (err) { 4651 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4652 return err; 4653 } 4654 4655 err = mlxsw_sp_fids_init(mlxsw_sp); 4656 if (err) { 4657 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4658 goto err_fids_init; 4659 } 4660 4661 err = mlxsw_sp_traps_init(mlxsw_sp); 4662 if (err) { 4663 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4664 goto err_traps_init; 4665 } 4666 4667 err = mlxsw_sp_buffers_init(mlxsw_sp); 4668 if (err) { 4669 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4670 goto err_buffers_init; 4671 } 4672 4673 err = mlxsw_sp_lag_init(mlxsw_sp); 4674 if (err) { 4675 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4676 goto err_lag_init; 4677 } 4678 4679 /* Initialize SPAN before router and switchdev, so that those components 4680 * can call mlxsw_sp_span_respin(). 4681 */ 4682 err = mlxsw_sp_span_init(mlxsw_sp); 4683 if (err) { 4684 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4685 goto err_span_init; 4686 } 4687 4688 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4689 if (err) { 4690 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4691 goto err_switchdev_init; 4692 } 4693 4694 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4695 if (err) { 4696 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4697 goto err_counter_pool_init; 4698 } 4699 4700 err = mlxsw_sp_afa_init(mlxsw_sp); 4701 if (err) { 4702 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4703 goto err_afa_init; 4704 } 4705 4706 err = mlxsw_sp_nve_init(mlxsw_sp); 4707 if (err) { 4708 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4709 goto err_nve_init; 4710 } 4711 4712 err = mlxsw_sp_acl_init(mlxsw_sp); 4713 if (err) { 4714 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4715 goto err_acl_init; 4716 } 4717 4718 err = mlxsw_sp_router_init(mlxsw_sp); 4719 if (err) { 4720 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4721 goto err_router_init; 4722 } 4723 4724 if (mlxsw_sp->bus_info->read_frc_capable) { 4725 /* NULL is a valid return value from clock_init */ 4726 mlxsw_sp->clock = 4727 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4728 mlxsw_sp->bus_info->dev); 4729 if (IS_ERR(mlxsw_sp->clock)) { 4730 err = PTR_ERR(mlxsw_sp->clock); 4731 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4732 goto err_ptp_clock_init; 4733 } 4734 } 4735 4736 if (mlxsw_sp->clock) { 4737 /* NULL is a valid return value from ptp_ops->init */ 4738 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4739 if (IS_ERR(mlxsw_sp->ptp_state)) { 4740 err = PTR_ERR(mlxsw_sp->ptp_state); 4741 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4742 goto err_ptp_init; 4743 } 4744 } 4745 4746 /* Initialize netdevice notifier after router and SPAN is initialized, 4747 * so that the event handler can use router structures and call SPAN 4748 * respin. 4749 */ 4750 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4751 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4752 if (err) { 4753 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4754 goto err_netdev_notifier; 4755 } 4756 4757 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4758 if (err) { 4759 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4760 goto err_dpipe_init; 4761 } 4762 4763 err = mlxsw_sp_ports_create(mlxsw_sp); 4764 if (err) { 4765 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4766 goto err_ports_create; 4767 } 4768 4769 return 0; 4770 4771 err_ports_create: 4772 mlxsw_sp_dpipe_fini(mlxsw_sp); 4773 err_dpipe_init: 4774 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4775 err_netdev_notifier: 4776 if (mlxsw_sp->clock) 4777 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4778 err_ptp_init: 4779 if (mlxsw_sp->clock) 4780 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4781 err_ptp_clock_init: 4782 mlxsw_sp_router_fini(mlxsw_sp); 4783 err_router_init: 4784 mlxsw_sp_acl_fini(mlxsw_sp); 4785 err_acl_init: 4786 mlxsw_sp_nve_fini(mlxsw_sp); 4787 err_nve_init: 4788 mlxsw_sp_afa_fini(mlxsw_sp); 4789 err_afa_init: 4790 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4791 err_counter_pool_init: 4792 mlxsw_sp_switchdev_fini(mlxsw_sp); 4793 err_switchdev_init: 4794 mlxsw_sp_span_fini(mlxsw_sp); 4795 err_span_init: 4796 mlxsw_sp_lag_fini(mlxsw_sp); 4797 err_lag_init: 4798 mlxsw_sp_buffers_fini(mlxsw_sp); 4799 err_buffers_init: 4800 mlxsw_sp_traps_fini(mlxsw_sp); 4801 err_traps_init: 4802 mlxsw_sp_fids_fini(mlxsw_sp); 4803 err_fids_init: 4804 mlxsw_sp_kvdl_fini(mlxsw_sp); 4805 return err; 4806 } 4807 4808 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4809 const struct mlxsw_bus_info *mlxsw_bus_info) 4810 { 4811 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4812 4813 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4814 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4815 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4816 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4817 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4818 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4819 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4820 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4821 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4822 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4823 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4824 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4825 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4826 mlxsw_sp->listeners = mlxsw_sp1_listener; 4827 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4828 4829 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4830 } 4831 4832 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4833 const struct mlxsw_bus_info *mlxsw_bus_info) 4834 { 4835 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4836 4837 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4838 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4839 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4840 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4841 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4842 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4843 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4844 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4845 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4846 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4847 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4848 4849 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4850 } 4851 4852 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4853 { 4854 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4855 4856 mlxsw_sp_ports_remove(mlxsw_sp); 4857 mlxsw_sp_dpipe_fini(mlxsw_sp); 4858 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4859 if (mlxsw_sp->clock) { 4860 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4861 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4862 } 4863 mlxsw_sp_router_fini(mlxsw_sp); 4864 mlxsw_sp_acl_fini(mlxsw_sp); 4865 mlxsw_sp_nve_fini(mlxsw_sp); 4866 mlxsw_sp_afa_fini(mlxsw_sp); 4867 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4868 mlxsw_sp_switchdev_fini(mlxsw_sp); 4869 mlxsw_sp_span_fini(mlxsw_sp); 4870 mlxsw_sp_lag_fini(mlxsw_sp); 4871 mlxsw_sp_buffers_fini(mlxsw_sp); 4872 mlxsw_sp_traps_fini(mlxsw_sp); 4873 mlxsw_sp_fids_fini(mlxsw_sp); 4874 mlxsw_sp_kvdl_fini(mlxsw_sp); 4875 } 4876 4877 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4878 * 802.1Q FIDs 4879 */ 4880 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4881 VLAN_VID_MASK - 1) 4882 4883 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4884 .used_max_mid = 1, 4885 .max_mid = MLXSW_SP_MID_MAX, 4886 .used_flood_tables = 1, 4887 .used_flood_mode = 1, 4888 .flood_mode = 3, 4889 .max_fid_flood_tables = 3, 4890 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4891 .used_max_ib_mc = 1, 4892 .max_ib_mc = 0, 4893 .used_max_pkey = 1, 4894 .max_pkey = 0, 4895 .used_kvd_sizes = 1, 4896 .kvd_hash_single_parts = 59, 4897 .kvd_hash_double_parts = 41, 4898 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4899 .swid_config = { 4900 { 4901 .used_type = 1, 4902 .type = MLXSW_PORT_SWID_TYPE_ETH, 4903 } 4904 }, 4905 }; 4906 4907 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4908 .used_max_mid = 1, 4909 .max_mid = MLXSW_SP_MID_MAX, 4910 .used_flood_tables = 1, 4911 .used_flood_mode = 1, 4912 .flood_mode = 3, 4913 .max_fid_flood_tables = 3, 4914 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4915 .used_max_ib_mc = 1, 4916 .max_ib_mc = 0, 4917 .used_max_pkey = 1, 4918 .max_pkey = 0, 4919 .swid_config = { 4920 { 4921 .used_type = 1, 4922 .type = MLXSW_PORT_SWID_TYPE_ETH, 4923 } 4924 }, 4925 }; 4926 4927 static void 4928 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4929 struct devlink_resource_size_params *kvd_size_params, 4930 struct devlink_resource_size_params *linear_size_params, 4931 struct devlink_resource_size_params *hash_double_size_params, 4932 struct devlink_resource_size_params *hash_single_size_params) 4933 { 4934 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4935 KVD_SINGLE_MIN_SIZE); 4936 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4937 KVD_DOUBLE_MIN_SIZE); 4938 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4939 u32 linear_size_min = 0; 4940 4941 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4942 MLXSW_SP_KVD_GRANULARITY, 4943 DEVLINK_RESOURCE_UNIT_ENTRY); 4944 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4945 kvd_size - single_size_min - 4946 double_size_min, 4947 MLXSW_SP_KVD_GRANULARITY, 4948 DEVLINK_RESOURCE_UNIT_ENTRY); 4949 devlink_resource_size_params_init(hash_double_size_params, 4950 double_size_min, 4951 kvd_size - single_size_min - 4952 linear_size_min, 4953 MLXSW_SP_KVD_GRANULARITY, 4954 DEVLINK_RESOURCE_UNIT_ENTRY); 4955 devlink_resource_size_params_init(hash_single_size_params, 4956 single_size_min, 4957 kvd_size - double_size_min - 4958 linear_size_min, 4959 MLXSW_SP_KVD_GRANULARITY, 4960 DEVLINK_RESOURCE_UNIT_ENTRY); 4961 } 4962 4963 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4964 { 4965 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4966 struct devlink_resource_size_params hash_single_size_params; 4967 struct devlink_resource_size_params hash_double_size_params; 4968 struct devlink_resource_size_params linear_size_params; 4969 struct devlink_resource_size_params kvd_size_params; 4970 u32 kvd_size, single_size, double_size, linear_size; 4971 const struct mlxsw_config_profile *profile; 4972 int err; 4973 4974 profile = &mlxsw_sp1_config_profile; 4975 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4976 return -EIO; 4977 4978 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4979 &linear_size_params, 4980 &hash_double_size_params, 4981 &hash_single_size_params); 4982 4983 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4984 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4985 kvd_size, MLXSW_SP_RESOURCE_KVD, 4986 DEVLINK_RESOURCE_ID_PARENT_TOP, 4987 &kvd_size_params); 4988 if (err) 4989 return err; 4990 4991 linear_size = profile->kvd_linear_size; 4992 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4993 linear_size, 4994 MLXSW_SP_RESOURCE_KVD_LINEAR, 4995 MLXSW_SP_RESOURCE_KVD, 4996 &linear_size_params); 4997 if (err) 4998 return err; 4999 5000 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 5001 if (err) 5002 return err; 5003 5004 double_size = kvd_size - linear_size; 5005 double_size *= profile->kvd_hash_double_parts; 5006 double_size /= profile->kvd_hash_double_parts + 5007 profile->kvd_hash_single_parts; 5008 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5009 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5010 double_size, 5011 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5012 MLXSW_SP_RESOURCE_KVD, 5013 &hash_double_size_params); 5014 if (err) 5015 return err; 5016 5017 single_size = kvd_size - double_size - linear_size; 5018 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5019 single_size, 5020 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5021 MLXSW_SP_RESOURCE_KVD, 5022 &hash_single_size_params); 5023 if (err) 5024 return err; 5025 5026 return 0; 5027 } 5028 5029 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5030 { 5031 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5032 } 5033 5034 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5035 { 5036 return 0; 5037 } 5038 5039 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5040 const struct mlxsw_config_profile *profile, 5041 u64 *p_single_size, u64 *p_double_size, 5042 u64 *p_linear_size) 5043 { 5044 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5045 u32 double_size; 5046 int err; 5047 5048 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5049 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5050 return -EIO; 5051 5052 /* The hash part is what left of the kvd without the 5053 * linear part. It is split to the single size and 5054 * double size by the parts ratio from the profile. 5055 * Both sizes must be a multiplications of the 5056 * granularity from the profile. In case the user 5057 * provided the sizes they are obtained via devlink. 5058 */ 5059 err = devlink_resource_size_get(devlink, 5060 MLXSW_SP_RESOURCE_KVD_LINEAR, 5061 p_linear_size); 5062 if (err) 5063 *p_linear_size = profile->kvd_linear_size; 5064 5065 err = devlink_resource_size_get(devlink, 5066 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5067 p_double_size); 5068 if (err) { 5069 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5070 *p_linear_size; 5071 double_size *= profile->kvd_hash_double_parts; 5072 double_size /= profile->kvd_hash_double_parts + 5073 profile->kvd_hash_single_parts; 5074 *p_double_size = rounddown(double_size, 5075 MLXSW_SP_KVD_GRANULARITY); 5076 } 5077 5078 err = devlink_resource_size_get(devlink, 5079 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5080 p_single_size); 5081 if (err) 5082 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5083 *p_double_size - *p_linear_size; 5084 5085 /* Check results are legal. */ 5086 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5087 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5088 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5089 return -EIO; 5090 5091 return 0; 5092 } 5093 5094 static int 5095 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5096 union devlink_param_value val, 5097 struct netlink_ext_ack *extack) 5098 { 5099 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5100 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5101 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5102 return -EINVAL; 5103 } 5104 5105 return 0; 5106 } 5107 5108 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5109 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5110 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5111 NULL, NULL, 5112 mlxsw_sp_devlink_param_fw_load_policy_validate), 5113 }; 5114 5115 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5116 { 5117 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5118 union devlink_param_value value; 5119 int err; 5120 5121 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5122 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5123 if (err) 5124 return err; 5125 5126 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5127 devlink_param_driverinit_value_set(devlink, 5128 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5129 value); 5130 return 0; 5131 } 5132 5133 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5134 { 5135 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5136 mlxsw_sp_devlink_params, 5137 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5138 } 5139 5140 static int 5141 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5142 struct devlink_param_gset_ctx *ctx) 5143 { 5144 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5145 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5146 5147 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5148 return 0; 5149 } 5150 5151 static int 5152 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5153 struct devlink_param_gset_ctx *ctx) 5154 { 5155 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5156 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5157 5158 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5159 } 5160 5161 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5162 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5163 "acl_region_rehash_interval", 5164 DEVLINK_PARAM_TYPE_U32, 5165 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5166 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5167 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5168 NULL), 5169 }; 5170 5171 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5172 { 5173 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5174 union devlink_param_value value; 5175 int err; 5176 5177 err = mlxsw_sp_params_register(mlxsw_core); 5178 if (err) 5179 return err; 5180 5181 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5182 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5183 if (err) 5184 goto err_devlink_params_register; 5185 5186 value.vu32 = 0; 5187 devlink_param_driverinit_value_set(devlink, 5188 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5189 value); 5190 return 0; 5191 5192 err_devlink_params_register: 5193 mlxsw_sp_params_unregister(mlxsw_core); 5194 return err; 5195 } 5196 5197 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5198 { 5199 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5200 mlxsw_sp2_devlink_params, 5201 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5202 mlxsw_sp_params_unregister(mlxsw_core); 5203 } 5204 5205 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5206 struct sk_buff *skb, u8 local_port) 5207 { 5208 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5209 5210 skb_pull(skb, MLXSW_TXHDR_LEN); 5211 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5212 } 5213 5214 static struct mlxsw_driver mlxsw_sp1_driver = { 5215 .kind = mlxsw_sp1_driver_name, 5216 .priv_size = sizeof(struct mlxsw_sp), 5217 .init = mlxsw_sp1_init, 5218 .fini = mlxsw_sp_fini, 5219 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5220 .port_split = mlxsw_sp_port_split, 5221 .port_unsplit = mlxsw_sp_port_unsplit, 5222 .sb_pool_get = mlxsw_sp_sb_pool_get, 5223 .sb_pool_set = mlxsw_sp_sb_pool_set, 5224 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5225 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5226 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5227 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5228 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5229 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5230 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5231 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5232 .flash_update = mlxsw_sp_flash_update, 5233 .txhdr_construct = mlxsw_sp_txhdr_construct, 5234 .resources_register = mlxsw_sp1_resources_register, 5235 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5236 .params_register = mlxsw_sp_params_register, 5237 .params_unregister = mlxsw_sp_params_unregister, 5238 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5239 .txhdr_len = MLXSW_TXHDR_LEN, 5240 .profile = &mlxsw_sp1_config_profile, 5241 .res_query_enabled = true, 5242 }; 5243 5244 static struct mlxsw_driver mlxsw_sp2_driver = { 5245 .kind = mlxsw_sp2_driver_name, 5246 .priv_size = sizeof(struct mlxsw_sp), 5247 .init = mlxsw_sp2_init, 5248 .fini = mlxsw_sp_fini, 5249 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5250 .port_split = mlxsw_sp_port_split, 5251 .port_unsplit = mlxsw_sp_port_unsplit, 5252 .sb_pool_get = mlxsw_sp_sb_pool_get, 5253 .sb_pool_set = mlxsw_sp_sb_pool_set, 5254 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5255 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5256 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5257 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5258 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5259 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5260 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5261 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5262 .flash_update = mlxsw_sp_flash_update, 5263 .txhdr_construct = mlxsw_sp_txhdr_construct, 5264 .resources_register = mlxsw_sp2_resources_register, 5265 .params_register = mlxsw_sp2_params_register, 5266 .params_unregister = mlxsw_sp2_params_unregister, 5267 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5268 .txhdr_len = MLXSW_TXHDR_LEN, 5269 .profile = &mlxsw_sp2_config_profile, 5270 .res_query_enabled = true, 5271 }; 5272 5273 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5274 { 5275 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5276 } 5277 5278 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5279 { 5280 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5281 int ret = 0; 5282 5283 if (mlxsw_sp_port_dev_check(lower_dev)) { 5284 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5285 ret = 1; 5286 } 5287 5288 return ret; 5289 } 5290 5291 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5292 { 5293 struct mlxsw_sp_port *mlxsw_sp_port; 5294 5295 if (mlxsw_sp_port_dev_check(dev)) 5296 return netdev_priv(dev); 5297 5298 mlxsw_sp_port = NULL; 5299 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5300 5301 return mlxsw_sp_port; 5302 } 5303 5304 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5305 { 5306 struct mlxsw_sp_port *mlxsw_sp_port; 5307 5308 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5309 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5310 } 5311 5312 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5313 { 5314 struct mlxsw_sp_port *mlxsw_sp_port; 5315 5316 if (mlxsw_sp_port_dev_check(dev)) 5317 return netdev_priv(dev); 5318 5319 mlxsw_sp_port = NULL; 5320 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5321 &mlxsw_sp_port); 5322 5323 return mlxsw_sp_port; 5324 } 5325 5326 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5327 { 5328 struct mlxsw_sp_port *mlxsw_sp_port; 5329 5330 rcu_read_lock(); 5331 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5332 if (mlxsw_sp_port) 5333 dev_hold(mlxsw_sp_port->dev); 5334 rcu_read_unlock(); 5335 return mlxsw_sp_port; 5336 } 5337 5338 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5339 { 5340 dev_put(mlxsw_sp_port->dev); 5341 } 5342 5343 static void 5344 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5345 struct net_device *lag_dev) 5346 { 5347 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5348 struct net_device *upper_dev; 5349 struct list_head *iter; 5350 5351 if (netif_is_bridge_port(lag_dev)) 5352 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5353 5354 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5355 if (!netif_is_bridge_port(upper_dev)) 5356 continue; 5357 br_dev = netdev_master_upper_dev_get(upper_dev); 5358 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5359 } 5360 } 5361 5362 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5363 { 5364 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5365 5366 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5368 } 5369 5370 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5371 { 5372 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5373 5374 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5375 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5376 } 5377 5378 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5379 u16 lag_id, u8 port_index) 5380 { 5381 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5382 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5383 5384 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5385 lag_id, port_index); 5386 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5387 } 5388 5389 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5390 u16 lag_id) 5391 { 5392 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5393 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5394 5395 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5396 lag_id); 5397 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5398 } 5399 5400 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5401 u16 lag_id) 5402 { 5403 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5404 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5405 5406 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5407 lag_id); 5408 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5409 } 5410 5411 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5412 u16 lag_id) 5413 { 5414 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5415 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5416 5417 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5418 lag_id); 5419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5420 } 5421 5422 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5423 struct net_device *lag_dev, 5424 u16 *p_lag_id) 5425 { 5426 struct mlxsw_sp_upper *lag; 5427 int free_lag_id = -1; 5428 u64 max_lag; 5429 int i; 5430 5431 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5432 for (i = 0; i < max_lag; i++) { 5433 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5434 if (lag->ref_count) { 5435 if (lag->dev == lag_dev) { 5436 *p_lag_id = i; 5437 return 0; 5438 } 5439 } else if (free_lag_id < 0) { 5440 free_lag_id = i; 5441 } 5442 } 5443 if (free_lag_id < 0) 5444 return -EBUSY; 5445 *p_lag_id = free_lag_id; 5446 return 0; 5447 } 5448 5449 static bool 5450 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5451 struct net_device *lag_dev, 5452 struct netdev_lag_upper_info *lag_upper_info, 5453 struct netlink_ext_ack *extack) 5454 { 5455 u16 lag_id; 5456 5457 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5458 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5459 return false; 5460 } 5461 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5462 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5463 return false; 5464 } 5465 return true; 5466 } 5467 5468 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5469 u16 lag_id, u8 *p_port_index) 5470 { 5471 u64 max_lag_members; 5472 int i; 5473 5474 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5475 MAX_LAG_MEMBERS); 5476 for (i = 0; i < max_lag_members; i++) { 5477 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5478 *p_port_index = i; 5479 return 0; 5480 } 5481 } 5482 return -EBUSY; 5483 } 5484 5485 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5486 struct net_device *lag_dev) 5487 { 5488 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5489 struct mlxsw_sp_upper *lag; 5490 u16 lag_id; 5491 u8 port_index; 5492 int err; 5493 5494 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5495 if (err) 5496 return err; 5497 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5498 if (!lag->ref_count) { 5499 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5500 if (err) 5501 return err; 5502 lag->dev = lag_dev; 5503 } 5504 5505 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5506 if (err) 5507 return err; 5508 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5509 if (err) 5510 goto err_col_port_add; 5511 5512 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5513 mlxsw_sp_port->local_port); 5514 mlxsw_sp_port->lag_id = lag_id; 5515 mlxsw_sp_port->lagged = 1; 5516 lag->ref_count++; 5517 5518 /* Port is no longer usable as a router interface */ 5519 if (mlxsw_sp_port->default_vlan->fid) 5520 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5521 5522 return 0; 5523 5524 err_col_port_add: 5525 if (!lag->ref_count) 5526 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5527 return err; 5528 } 5529 5530 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5531 struct net_device *lag_dev) 5532 { 5533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5534 u16 lag_id = mlxsw_sp_port->lag_id; 5535 struct mlxsw_sp_upper *lag; 5536 5537 if (!mlxsw_sp_port->lagged) 5538 return; 5539 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5540 WARN_ON(lag->ref_count == 0); 5541 5542 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5543 5544 /* Any VLANs configured on the port are no longer valid */ 5545 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5546 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5547 /* Make the LAG and its directly linked uppers leave bridges they 5548 * are memeber in 5549 */ 5550 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5551 5552 if (lag->ref_count == 1) 5553 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5554 5555 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5556 mlxsw_sp_port->local_port); 5557 mlxsw_sp_port->lagged = 0; 5558 lag->ref_count--; 5559 5560 /* Make sure untagged frames are allowed to ingress */ 5561 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5562 } 5563 5564 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5565 u16 lag_id) 5566 { 5567 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5568 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5569 5570 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5571 mlxsw_sp_port->local_port); 5572 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5573 } 5574 5575 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5576 u16 lag_id) 5577 { 5578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5579 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5580 5581 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5582 mlxsw_sp_port->local_port); 5583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5584 } 5585 5586 static int 5587 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5588 { 5589 int err; 5590 5591 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5592 mlxsw_sp_port->lag_id); 5593 if (err) 5594 return err; 5595 5596 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5597 if (err) 5598 goto err_dist_port_add; 5599 5600 return 0; 5601 5602 err_dist_port_add: 5603 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5604 return err; 5605 } 5606 5607 static int 5608 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5609 { 5610 int err; 5611 5612 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5613 mlxsw_sp_port->lag_id); 5614 if (err) 5615 return err; 5616 5617 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5618 mlxsw_sp_port->lag_id); 5619 if (err) 5620 goto err_col_port_disable; 5621 5622 return 0; 5623 5624 err_col_port_disable: 5625 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5626 return err; 5627 } 5628 5629 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5630 struct netdev_lag_lower_state_info *info) 5631 { 5632 if (info->tx_enabled) 5633 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5634 else 5635 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5636 } 5637 5638 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5639 bool enable) 5640 { 5641 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5642 enum mlxsw_reg_spms_state spms_state; 5643 char *spms_pl; 5644 u16 vid; 5645 int err; 5646 5647 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5648 MLXSW_REG_SPMS_STATE_DISCARDING; 5649 5650 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5651 if (!spms_pl) 5652 return -ENOMEM; 5653 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5654 5655 for (vid = 0; vid < VLAN_N_VID; vid++) 5656 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5657 5658 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5659 kfree(spms_pl); 5660 return err; 5661 } 5662 5663 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5664 { 5665 u16 vid = 1; 5666 int err; 5667 5668 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5669 if (err) 5670 return err; 5671 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5672 if (err) 5673 goto err_port_stp_set; 5674 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5675 true, false); 5676 if (err) 5677 goto err_port_vlan_set; 5678 5679 for (; vid <= VLAN_N_VID - 1; vid++) { 5680 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5681 vid, false); 5682 if (err) 5683 goto err_vid_learning_set; 5684 } 5685 5686 return 0; 5687 5688 err_vid_learning_set: 5689 for (vid--; vid >= 1; vid--) 5690 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5691 err_port_vlan_set: 5692 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5693 err_port_stp_set: 5694 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5695 return err; 5696 } 5697 5698 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5699 { 5700 u16 vid; 5701 5702 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5703 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5704 vid, true); 5705 5706 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5707 false, false); 5708 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5709 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5710 } 5711 5712 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5713 { 5714 unsigned int num_vxlans = 0; 5715 struct net_device *dev; 5716 struct list_head *iter; 5717 5718 netdev_for_each_lower_dev(br_dev, dev, iter) { 5719 if (netif_is_vxlan(dev)) 5720 num_vxlans++; 5721 } 5722 5723 return num_vxlans > 1; 5724 } 5725 5726 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5727 { 5728 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5729 struct net_device *dev; 5730 struct list_head *iter; 5731 5732 netdev_for_each_lower_dev(br_dev, dev, iter) { 5733 u16 pvid; 5734 int err; 5735 5736 if (!netif_is_vxlan(dev)) 5737 continue; 5738 5739 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5740 if (err || !pvid) 5741 continue; 5742 5743 if (test_and_set_bit(pvid, vlans)) 5744 return false; 5745 } 5746 5747 return true; 5748 } 5749 5750 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5751 struct netlink_ext_ack *extack) 5752 { 5753 if (br_multicast_enabled(br_dev)) { 5754 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5755 return false; 5756 } 5757 5758 if (!br_vlan_enabled(br_dev) && 5759 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5760 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5761 return false; 5762 } 5763 5764 if (br_vlan_enabled(br_dev) && 5765 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5766 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5767 return false; 5768 } 5769 5770 return true; 5771 } 5772 5773 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5774 struct net_device *dev, 5775 unsigned long event, void *ptr) 5776 { 5777 struct netdev_notifier_changeupper_info *info; 5778 struct mlxsw_sp_port *mlxsw_sp_port; 5779 struct netlink_ext_ack *extack; 5780 struct net_device *upper_dev; 5781 struct mlxsw_sp *mlxsw_sp; 5782 int err = 0; 5783 5784 mlxsw_sp_port = netdev_priv(dev); 5785 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5786 info = ptr; 5787 extack = netdev_notifier_info_to_extack(&info->info); 5788 5789 switch (event) { 5790 case NETDEV_PRECHANGEUPPER: 5791 upper_dev = info->upper_dev; 5792 if (!is_vlan_dev(upper_dev) && 5793 !netif_is_lag_master(upper_dev) && 5794 !netif_is_bridge_master(upper_dev) && 5795 !netif_is_ovs_master(upper_dev) && 5796 !netif_is_macvlan(upper_dev)) { 5797 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5798 return -EINVAL; 5799 } 5800 if (!info->linking) 5801 break; 5802 if (netif_is_bridge_master(upper_dev) && 5803 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5804 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5805 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5806 return -EOPNOTSUPP; 5807 if (netdev_has_any_upper_dev(upper_dev) && 5808 (!netif_is_bridge_master(upper_dev) || 5809 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5810 upper_dev))) { 5811 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5812 return -EINVAL; 5813 } 5814 if (netif_is_lag_master(upper_dev) && 5815 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5816 info->upper_info, extack)) 5817 return -EINVAL; 5818 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5819 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5820 return -EINVAL; 5821 } 5822 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5823 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5824 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5825 return -EINVAL; 5826 } 5827 if (netif_is_macvlan(upper_dev) && 5828 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5829 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5830 return -EOPNOTSUPP; 5831 } 5832 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5833 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5834 return -EINVAL; 5835 } 5836 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5837 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5838 return -EINVAL; 5839 } 5840 break; 5841 case NETDEV_CHANGEUPPER: 5842 upper_dev = info->upper_dev; 5843 if (netif_is_bridge_master(upper_dev)) { 5844 if (info->linking) 5845 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5846 lower_dev, 5847 upper_dev, 5848 extack); 5849 else 5850 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5851 lower_dev, 5852 upper_dev); 5853 } else if (netif_is_lag_master(upper_dev)) { 5854 if (info->linking) { 5855 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5856 upper_dev); 5857 } else { 5858 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5859 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5860 upper_dev); 5861 } 5862 } else if (netif_is_ovs_master(upper_dev)) { 5863 if (info->linking) 5864 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5865 else 5866 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5867 } else if (netif_is_macvlan(upper_dev)) { 5868 if (!info->linking) 5869 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5870 } else if (is_vlan_dev(upper_dev)) { 5871 struct net_device *br_dev; 5872 5873 if (!netif_is_bridge_port(upper_dev)) 5874 break; 5875 if (info->linking) 5876 break; 5877 br_dev = netdev_master_upper_dev_get(upper_dev); 5878 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5879 br_dev); 5880 } 5881 break; 5882 } 5883 5884 return err; 5885 } 5886 5887 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5888 unsigned long event, void *ptr) 5889 { 5890 struct netdev_notifier_changelowerstate_info *info; 5891 struct mlxsw_sp_port *mlxsw_sp_port; 5892 int err; 5893 5894 mlxsw_sp_port = netdev_priv(dev); 5895 info = ptr; 5896 5897 switch (event) { 5898 case NETDEV_CHANGELOWERSTATE: 5899 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5900 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5901 info->lower_state_info); 5902 if (err) 5903 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5904 } 5905 break; 5906 } 5907 5908 return 0; 5909 } 5910 5911 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5912 struct net_device *port_dev, 5913 unsigned long event, void *ptr) 5914 { 5915 switch (event) { 5916 case NETDEV_PRECHANGEUPPER: 5917 case NETDEV_CHANGEUPPER: 5918 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5919 event, ptr); 5920 case NETDEV_CHANGELOWERSTATE: 5921 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5922 ptr); 5923 } 5924 5925 return 0; 5926 } 5927 5928 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5929 unsigned long event, void *ptr) 5930 { 5931 struct net_device *dev; 5932 struct list_head *iter; 5933 int ret; 5934 5935 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5936 if (mlxsw_sp_port_dev_check(dev)) { 5937 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5938 ptr); 5939 if (ret) 5940 return ret; 5941 } 5942 } 5943 5944 return 0; 5945 } 5946 5947 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5948 struct net_device *dev, 5949 unsigned long event, void *ptr, 5950 u16 vid) 5951 { 5952 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5953 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5954 struct netdev_notifier_changeupper_info *info = ptr; 5955 struct netlink_ext_ack *extack; 5956 struct net_device *upper_dev; 5957 int err = 0; 5958 5959 extack = netdev_notifier_info_to_extack(&info->info); 5960 5961 switch (event) { 5962 case NETDEV_PRECHANGEUPPER: 5963 upper_dev = info->upper_dev; 5964 if (!netif_is_bridge_master(upper_dev) && 5965 !netif_is_macvlan(upper_dev)) { 5966 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5967 return -EINVAL; 5968 } 5969 if (!info->linking) 5970 break; 5971 if (netif_is_bridge_master(upper_dev) && 5972 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5973 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5974 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5975 return -EOPNOTSUPP; 5976 if (netdev_has_any_upper_dev(upper_dev) && 5977 (!netif_is_bridge_master(upper_dev) || 5978 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5979 upper_dev))) { 5980 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5981 return -EINVAL; 5982 } 5983 if (netif_is_macvlan(upper_dev) && 5984 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5985 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5986 return -EOPNOTSUPP; 5987 } 5988 break; 5989 case NETDEV_CHANGEUPPER: 5990 upper_dev = info->upper_dev; 5991 if (netif_is_bridge_master(upper_dev)) { 5992 if (info->linking) 5993 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5994 vlan_dev, 5995 upper_dev, 5996 extack); 5997 else 5998 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5999 vlan_dev, 6000 upper_dev); 6001 } else if (netif_is_macvlan(upper_dev)) { 6002 if (!info->linking) 6003 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6004 } else { 6005 err = -EINVAL; 6006 WARN_ON(1); 6007 } 6008 break; 6009 } 6010 6011 return err; 6012 } 6013 6014 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6015 struct net_device *lag_dev, 6016 unsigned long event, 6017 void *ptr, u16 vid) 6018 { 6019 struct net_device *dev; 6020 struct list_head *iter; 6021 int ret; 6022 6023 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6024 if (mlxsw_sp_port_dev_check(dev)) { 6025 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6026 event, ptr, 6027 vid); 6028 if (ret) 6029 return ret; 6030 } 6031 } 6032 6033 return 0; 6034 } 6035 6036 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6037 struct net_device *br_dev, 6038 unsigned long event, void *ptr, 6039 u16 vid) 6040 { 6041 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6042 struct netdev_notifier_changeupper_info *info = ptr; 6043 struct netlink_ext_ack *extack; 6044 struct net_device *upper_dev; 6045 6046 if (!mlxsw_sp) 6047 return 0; 6048 6049 extack = netdev_notifier_info_to_extack(&info->info); 6050 6051 switch (event) { 6052 case NETDEV_PRECHANGEUPPER: 6053 upper_dev = info->upper_dev; 6054 if (!netif_is_macvlan(upper_dev)) { 6055 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6056 return -EOPNOTSUPP; 6057 } 6058 if (!info->linking) 6059 break; 6060 if (netif_is_macvlan(upper_dev) && 6061 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6062 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6063 return -EOPNOTSUPP; 6064 } 6065 break; 6066 case NETDEV_CHANGEUPPER: 6067 upper_dev = info->upper_dev; 6068 if (info->linking) 6069 break; 6070 if (netif_is_macvlan(upper_dev)) 6071 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6072 break; 6073 } 6074 6075 return 0; 6076 } 6077 6078 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6079 unsigned long event, void *ptr) 6080 { 6081 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6082 u16 vid = vlan_dev_vlan_id(vlan_dev); 6083 6084 if (mlxsw_sp_port_dev_check(real_dev)) 6085 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6086 event, ptr, vid); 6087 else if (netif_is_lag_master(real_dev)) 6088 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6089 real_dev, event, 6090 ptr, vid); 6091 else if (netif_is_bridge_master(real_dev)) 6092 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6093 event, ptr, vid); 6094 6095 return 0; 6096 } 6097 6098 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6099 unsigned long event, void *ptr) 6100 { 6101 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6102 struct netdev_notifier_changeupper_info *info = ptr; 6103 struct netlink_ext_ack *extack; 6104 struct net_device *upper_dev; 6105 6106 if (!mlxsw_sp) 6107 return 0; 6108 6109 extack = netdev_notifier_info_to_extack(&info->info); 6110 6111 switch (event) { 6112 case NETDEV_PRECHANGEUPPER: 6113 upper_dev = info->upper_dev; 6114 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6115 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6116 return -EOPNOTSUPP; 6117 } 6118 if (!info->linking) 6119 break; 6120 if (netif_is_macvlan(upper_dev) && 6121 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6122 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6123 return -EOPNOTSUPP; 6124 } 6125 break; 6126 case NETDEV_CHANGEUPPER: 6127 upper_dev = info->upper_dev; 6128 if (info->linking) 6129 break; 6130 if (is_vlan_dev(upper_dev)) 6131 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6132 if (netif_is_macvlan(upper_dev)) 6133 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6134 break; 6135 } 6136 6137 return 0; 6138 } 6139 6140 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6141 unsigned long event, void *ptr) 6142 { 6143 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6144 struct netdev_notifier_changeupper_info *info = ptr; 6145 struct netlink_ext_ack *extack; 6146 6147 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6148 return 0; 6149 6150 extack = netdev_notifier_info_to_extack(&info->info); 6151 6152 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6153 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6154 6155 return -EOPNOTSUPP; 6156 } 6157 6158 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6159 { 6160 struct netdev_notifier_changeupper_info *info = ptr; 6161 6162 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6163 return false; 6164 return netif_is_l3_master(info->upper_dev); 6165 } 6166 6167 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6168 struct net_device *dev, 6169 unsigned long event, void *ptr) 6170 { 6171 struct netdev_notifier_changeupper_info *cu_info; 6172 struct netdev_notifier_info *info = ptr; 6173 struct netlink_ext_ack *extack; 6174 struct net_device *upper_dev; 6175 6176 extack = netdev_notifier_info_to_extack(info); 6177 6178 switch (event) { 6179 case NETDEV_CHANGEUPPER: 6180 cu_info = container_of(info, 6181 struct netdev_notifier_changeupper_info, 6182 info); 6183 upper_dev = cu_info->upper_dev; 6184 if (!netif_is_bridge_master(upper_dev)) 6185 return 0; 6186 if (!mlxsw_sp_lower_get(upper_dev)) 6187 return 0; 6188 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6189 return -EOPNOTSUPP; 6190 if (cu_info->linking) { 6191 if (!netif_running(dev)) 6192 return 0; 6193 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6194 * device needs to be mapped to a VLAN, but at this 6195 * point no VLANs are configured on the VxLAN device 6196 */ 6197 if (br_vlan_enabled(upper_dev)) 6198 return 0; 6199 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6200 dev, 0, extack); 6201 } else { 6202 /* VLANs were already flushed, which triggered the 6203 * necessary cleanup 6204 */ 6205 if (br_vlan_enabled(upper_dev)) 6206 return 0; 6207 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6208 } 6209 break; 6210 case NETDEV_PRE_UP: 6211 upper_dev = netdev_master_upper_dev_get(dev); 6212 if (!upper_dev) 6213 return 0; 6214 if (!netif_is_bridge_master(upper_dev)) 6215 return 0; 6216 if (!mlxsw_sp_lower_get(upper_dev)) 6217 return 0; 6218 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6219 extack); 6220 case NETDEV_DOWN: 6221 upper_dev = netdev_master_upper_dev_get(dev); 6222 if (!upper_dev) 6223 return 0; 6224 if (!netif_is_bridge_master(upper_dev)) 6225 return 0; 6226 if (!mlxsw_sp_lower_get(upper_dev)) 6227 return 0; 6228 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6229 break; 6230 } 6231 6232 return 0; 6233 } 6234 6235 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6236 unsigned long event, void *ptr) 6237 { 6238 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6239 struct mlxsw_sp_span_entry *span_entry; 6240 struct mlxsw_sp *mlxsw_sp; 6241 int err = 0; 6242 6243 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6244 if (event == NETDEV_UNREGISTER) { 6245 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6246 if (span_entry) 6247 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6248 } 6249 mlxsw_sp_span_respin(mlxsw_sp); 6250 6251 if (netif_is_vxlan(dev)) 6252 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6253 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6254 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6255 event, ptr); 6256 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6257 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6258 event, ptr); 6259 else if (event == NETDEV_PRE_CHANGEADDR || 6260 event == NETDEV_CHANGEADDR || 6261 event == NETDEV_CHANGEMTU) 6262 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6263 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6264 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6265 else if (mlxsw_sp_port_dev_check(dev)) 6266 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6267 else if (netif_is_lag_master(dev)) 6268 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6269 else if (is_vlan_dev(dev)) 6270 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6271 else if (netif_is_bridge_master(dev)) 6272 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6273 else if (netif_is_macvlan(dev)) 6274 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6275 6276 return notifier_from_errno(err); 6277 } 6278 6279 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6280 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6281 }; 6282 6283 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6284 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6285 }; 6286 6287 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6288 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6289 {0, }, 6290 }; 6291 6292 static struct pci_driver mlxsw_sp1_pci_driver = { 6293 .name = mlxsw_sp1_driver_name, 6294 .id_table = mlxsw_sp1_pci_id_table, 6295 }; 6296 6297 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6298 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6299 {0, }, 6300 }; 6301 6302 static struct pci_driver mlxsw_sp2_pci_driver = { 6303 .name = mlxsw_sp2_driver_name, 6304 .id_table = mlxsw_sp2_pci_id_table, 6305 }; 6306 6307 static int __init mlxsw_sp_module_init(void) 6308 { 6309 int err; 6310 6311 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6312 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6313 6314 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6315 if (err) 6316 goto err_sp1_core_driver_register; 6317 6318 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6319 if (err) 6320 goto err_sp2_core_driver_register; 6321 6322 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6323 if (err) 6324 goto err_sp1_pci_driver_register; 6325 6326 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6327 if (err) 6328 goto err_sp2_pci_driver_register; 6329 6330 return 0; 6331 6332 err_sp2_pci_driver_register: 6333 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6334 err_sp1_pci_driver_register: 6335 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6336 err_sp2_core_driver_register: 6337 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6338 err_sp1_core_driver_register: 6339 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6340 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6341 return err; 6342 } 6343 6344 static void __exit mlxsw_sp_module_exit(void) 6345 { 6346 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6347 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6348 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6349 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6350 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6351 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6352 } 6353 6354 module_init(mlxsw_sp_module_init); 6355 module_exit(mlxsw_sp_module_exit); 6356 6357 MODULE_LICENSE("Dual BSD/GPL"); 6358 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6359 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6360 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6361 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6362 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6363