1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <net/switchdev.h> 26 #include <net/pkt_cls.h> 27 #include <net/tc_act/tc_mirred.h> 28 #include <net/netevent.h> 29 #include <net/tc_act/tc_sample.h> 30 #include <net/addrconf.h> 31 32 #include "spectrum.h" 33 #include "pci.h" 34 #include "core.h" 35 #include "core_env.h" 36 #include "reg.h" 37 #include "port.h" 38 #include "trap.h" 39 #include "txheader.h" 40 #include "spectrum_cnt.h" 41 #include "spectrum_dpipe.h" 42 #include "spectrum_acl_flex_actions.h" 43 #include "spectrum_span.h" 44 #include "spectrum_ptp.h" 45 #include "../mlxfw/mlxfw.h" 46 47 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 48 49 #define MLXSW_SP1_FWREV_MAJOR 13 50 #define MLXSW_SP1_FWREV_MINOR 2000 51 #define MLXSW_SP1_FWREV_SUBMINOR 1122 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP1_FWREV_MINOR, 57 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 65 66 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 67 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 68 static const char mlxsw_sp_driver_version[] = "1.0"; 69 70 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 71 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 72 }; 73 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 74 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 75 }; 76 77 /* tx_hdr_version 78 * Tx header version. 79 * Must be set to 1. 80 */ 81 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 82 83 /* tx_hdr_ctl 84 * Packet control type. 85 * 0 - Ethernet control (e.g. EMADs, LACP) 86 * 1 - Ethernet data 87 */ 88 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 89 90 /* tx_hdr_proto 91 * Packet protocol type. Must be set to 1 (Ethernet). 92 */ 93 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 94 95 /* tx_hdr_rx_is_router 96 * Packet is sent from the router. Valid for data packets only. 97 */ 98 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 99 100 /* tx_hdr_fid_valid 101 * Indicates if the 'fid' field is valid and should be used for 102 * forwarding lookup. Valid for data packets only. 103 */ 104 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 105 106 /* tx_hdr_swid 107 * Switch partition ID. Must be set to 0. 108 */ 109 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 110 111 /* tx_hdr_control_tclass 112 * Indicates if the packet should use the control TClass and not one 113 * of the data TClasses. 114 */ 115 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 116 117 /* tx_hdr_etclass 118 * Egress TClass to be used on the egress device on the egress port. 119 */ 120 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 121 122 /* tx_hdr_port_mid 123 * Destination local port for unicast packets. 124 * Destination multicast ID for multicast packets. 125 * 126 * Control packets are directed to a specific egress port, while data 127 * packets are transmitted through the CPU port (0) into the switch partition, 128 * where forwarding rules are applied. 129 */ 130 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 131 132 /* tx_hdr_fid 133 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 134 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 135 * Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 138 139 /* tx_hdr_type 140 * 0 - Data packets 141 * 6 - Control packets 142 */ 143 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 144 145 struct mlxsw_sp_mlxfw_dev { 146 struct mlxfw_dev mlxfw_dev; 147 struct mlxsw_sp *mlxsw_sp; 148 }; 149 150 struct mlxsw_sp_ptp_ops { 151 struct mlxsw_sp_ptp_clock * 152 (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); 153 void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); 154 155 struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); 156 void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); 157 158 /* Notify a driver that a packet that might be PTP was received. Driver 159 * is responsible for freeing the passed-in SKB. 160 */ 161 void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 162 u8 local_port); 163 164 /* Notify a driver that a timestamped packet was transmitted. Driver 165 * is responsible for freeing the passed-in SKB. 166 */ 167 void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 168 u8 local_port); 169 170 int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, 171 struct hwtstamp_config *config); 172 int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, 173 struct hwtstamp_config *config); 174 void (*shaper_work)(struct work_struct *work); 175 int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, 176 struct ethtool_ts_info *info); 177 }; 178 179 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 180 u16 component_index, u32 *p_max_size, 181 u8 *p_align_bits, u16 *p_max_write_size) 182 { 183 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 184 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 185 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 186 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 187 int err; 188 189 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 191 if (err) 192 return err; 193 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 194 p_max_write_size); 195 196 *p_align_bits = max_t(u8, *p_align_bits, 2); 197 *p_max_write_size = min_t(u16, *p_max_write_size, 198 MLXSW_REG_MCDA_MAX_DATA_LEN); 199 return 0; 200 } 201 202 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 203 { 204 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 205 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 207 char mcc_pl[MLXSW_REG_MCC_LEN]; 208 u8 control_state; 209 int err; 210 211 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 213 if (err) 214 return err; 215 216 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 217 if (control_state != MLXFW_FSM_STATE_IDLE) 218 return -EBUSY; 219 220 mlxsw_reg_mcc_pack(mcc_pl, 221 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 222 0, *fwhandle, 0); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 224 } 225 226 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 227 u32 fwhandle, u16 component_index, 228 u32 component_size) 229 { 230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 233 char mcc_pl[MLXSW_REG_MCC_LEN]; 234 235 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 236 component_index, fwhandle, component_size); 237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 238 } 239 240 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 241 u32 fwhandle, u8 *data, u16 size, 242 u32 offset) 243 { 244 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 245 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 247 char mcda_pl[MLXSW_REG_MCDA_LEN]; 248 249 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 251 } 252 253 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 254 u32 fwhandle, u16 component_index) 255 { 256 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 257 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 259 char mcc_pl[MLXSW_REG_MCC_LEN]; 260 261 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 262 component_index, fwhandle, 0); 263 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 264 } 265 266 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 267 { 268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 271 char mcc_pl[MLXSW_REG_MCC_LEN]; 272 273 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 274 fwhandle, 0); 275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 276 } 277 278 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 279 enum mlxfw_fsm_state *fsm_state, 280 enum mlxfw_fsm_state_err *fsm_state_err) 281 { 282 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 283 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 285 char mcc_pl[MLXSW_REG_MCC_LEN]; 286 u8 control_state; 287 u8 error_code; 288 int err; 289 290 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 292 if (err) 293 return err; 294 295 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 296 *fsm_state = control_state; 297 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 298 MLXFW_FSM_STATE_ERR_MAX); 299 return 0; 300 } 301 302 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 303 { 304 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 305 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 307 char mcc_pl[MLXSW_REG_MCC_LEN]; 308 309 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 310 fwhandle, 0); 311 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 312 } 313 314 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 315 { 316 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 317 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 319 char mcc_pl[MLXSW_REG_MCC_LEN]; 320 321 mlxsw_reg_mcc_pack(mcc_pl, 322 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 323 fwhandle, 0); 324 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 325 } 326 327 static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev, 328 const char *msg, const char *comp_name, 329 u32 done_bytes, u32 total_bytes) 330 { 331 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 332 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 333 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 334 335 devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core), 336 msg, comp_name, 337 done_bytes, total_bytes); 338 } 339 340 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 341 .component_query = mlxsw_sp_component_query, 342 .fsm_lock = mlxsw_sp_fsm_lock, 343 .fsm_component_update = mlxsw_sp_fsm_component_update, 344 .fsm_block_download = mlxsw_sp_fsm_block_download, 345 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 346 .fsm_activate = mlxsw_sp_fsm_activate, 347 .fsm_query_state = mlxsw_sp_fsm_query_state, 348 .fsm_cancel = mlxsw_sp_fsm_cancel, 349 .fsm_release = mlxsw_sp_fsm_release, 350 .status_notify = mlxsw_sp_status_notify, 351 }; 352 353 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 354 const struct firmware *firmware, 355 struct netlink_ext_ack *extack) 356 { 357 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 358 .mlxfw_dev = { 359 .ops = &mlxsw_sp_mlxfw_dev_ops, 360 .psid = mlxsw_sp->bus_info->psid, 361 .psid_size = strlen(mlxsw_sp->bus_info->psid), 362 }, 363 .mlxsw_sp = mlxsw_sp 364 }; 365 int err; 366 367 mlxsw_core_fw_flash_start(mlxsw_sp->core); 368 devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core)); 369 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, 370 firmware, extack); 371 devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core)); 372 mlxsw_core_fw_flash_end(mlxsw_sp->core); 373 374 return err; 375 } 376 377 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 378 { 379 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 380 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 381 const char *fw_filename = mlxsw_sp->fw_filename; 382 union devlink_param_value value; 383 const struct firmware *firmware; 384 int err; 385 386 /* Don't check if driver does not require it */ 387 if (!req_rev || !fw_filename) 388 return 0; 389 390 /* Don't check if devlink 'fw_load_policy' param is 'flash' */ 391 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), 392 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 393 &value); 394 if (err) 395 return err; 396 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) 397 return 0; 398 399 /* Validate driver & FW are compatible */ 400 if (rev->major != req_rev->major) { 401 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 402 rev->major, req_rev->major); 403 return -EINVAL; 404 } 405 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 406 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && 407 (rev->minor > req_rev->minor || 408 (rev->minor == req_rev->minor && 409 rev->subminor >= req_rev->subminor))) 410 return 0; 411 412 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 413 rev->major, rev->minor, rev->subminor); 414 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 415 fw_filename); 416 417 err = request_firmware_direct(&firmware, fw_filename, 418 mlxsw_sp->bus_info->dev); 419 if (err) { 420 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 421 fw_filename); 422 return err; 423 } 424 425 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL); 426 release_firmware(firmware); 427 if (err) 428 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 429 430 /* On FW flash success, tell the caller FW reset is needed 431 * if current FW supports it. 432 */ 433 if (rev->minor >= req_rev->can_reset_minor) 434 return err ? err : -EAGAIN; 435 else 436 return 0; 437 } 438 439 static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core, 440 const char *file_name, const char *component, 441 struct netlink_ext_ack *extack) 442 { 443 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 444 const struct firmware *firmware; 445 int err; 446 447 if (component) 448 return -EOPNOTSUPP; 449 450 err = request_firmware_direct(&firmware, file_name, 451 mlxsw_sp->bus_info->dev); 452 if (err) 453 return err; 454 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack); 455 release_firmware(firmware); 456 457 return err; 458 } 459 460 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 461 unsigned int counter_index, u64 *packets, 462 u64 *bytes) 463 { 464 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 465 int err; 466 467 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 468 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 469 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 470 if (err) 471 return err; 472 if (packets) 473 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 474 if (bytes) 475 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 476 return 0; 477 } 478 479 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 480 unsigned int counter_index) 481 { 482 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 483 484 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 485 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 486 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 487 } 488 489 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 490 unsigned int *p_counter_index) 491 { 492 int err; 493 494 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 495 p_counter_index); 496 if (err) 497 return err; 498 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 499 if (err) 500 goto err_counter_clear; 501 return 0; 502 503 err_counter_clear: 504 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 505 *p_counter_index); 506 return err; 507 } 508 509 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 510 unsigned int counter_index) 511 { 512 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 513 counter_index); 514 } 515 516 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 517 const struct mlxsw_tx_info *tx_info) 518 { 519 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 520 521 memset(txhdr, 0, MLXSW_TXHDR_LEN); 522 523 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 524 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 525 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 526 mlxsw_tx_hdr_swid_set(txhdr, 0); 527 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 528 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 529 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 530 } 531 532 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 533 { 534 switch (state) { 535 case BR_STATE_FORWARDING: 536 return MLXSW_REG_SPMS_STATE_FORWARDING; 537 case BR_STATE_LEARNING: 538 return MLXSW_REG_SPMS_STATE_LEARNING; 539 case BR_STATE_LISTENING: /* fall-through */ 540 case BR_STATE_DISABLED: /* fall-through */ 541 case BR_STATE_BLOCKING: 542 return MLXSW_REG_SPMS_STATE_DISCARDING; 543 default: 544 BUG(); 545 } 546 } 547 548 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 549 u8 state) 550 { 551 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char *spms_pl; 554 int err; 555 556 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 557 if (!spms_pl) 558 return -ENOMEM; 559 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 560 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 561 562 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 563 kfree(spms_pl); 564 return err; 565 } 566 567 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 568 { 569 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 570 int err; 571 572 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 573 if (err) 574 return err; 575 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 576 return 0; 577 } 578 579 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 580 bool enable, u32 rate) 581 { 582 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 583 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 584 585 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 587 } 588 589 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 590 bool is_up) 591 { 592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 593 char paos_pl[MLXSW_REG_PAOS_LEN]; 594 595 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 596 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 597 MLXSW_PORT_ADMIN_STATUS_DOWN); 598 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 599 } 600 601 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 602 unsigned char *addr) 603 { 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 char ppad_pl[MLXSW_REG_PPAD_LEN]; 606 607 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 608 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 610 } 611 612 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 613 { 614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 615 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 616 617 ether_addr_copy(addr, mlxsw_sp->base_mac); 618 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 619 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 620 } 621 622 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 623 { 624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 625 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 626 int max_mtu; 627 int err; 628 629 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 630 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 631 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 632 if (err) 633 return err; 634 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 635 636 if (mtu > max_mtu) 637 return -EINVAL; 638 639 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 640 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 641 } 642 643 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 644 { 645 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 646 char pspa_pl[MLXSW_REG_PSPA_LEN]; 647 648 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 650 } 651 652 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 653 { 654 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 655 char svpe_pl[MLXSW_REG_SVPE_LEN]; 656 657 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 658 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 659 } 660 661 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 662 bool learn_enable) 663 { 664 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 665 char *spvmlr_pl; 666 int err; 667 668 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 669 if (!spvmlr_pl) 670 return -ENOMEM; 671 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 672 learn_enable); 673 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 674 kfree(spvmlr_pl); 675 return err; 676 } 677 678 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 679 u16 vid) 680 { 681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 682 char spvid_pl[MLXSW_REG_SPVID_LEN]; 683 684 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 686 } 687 688 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 689 bool allow) 690 { 691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 692 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 693 694 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 696 } 697 698 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 699 { 700 int err; 701 702 if (!vid) { 703 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 704 if (err) 705 return err; 706 } else { 707 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 708 if (err) 709 return err; 710 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 711 if (err) 712 goto err_port_allow_untagged_set; 713 } 714 715 mlxsw_sp_port->pvid = vid; 716 return 0; 717 718 err_port_allow_untagged_set: 719 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 720 return err; 721 } 722 723 static int 724 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 725 { 726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 727 char sspr_pl[MLXSW_REG_SSPR_LEN]; 728 729 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 731 } 732 733 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 734 u8 local_port, u8 *p_module, 735 u8 *p_width, u8 *p_lane) 736 { 737 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 738 int err; 739 740 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 741 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 742 if (err) 743 return err; 744 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 745 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 746 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 747 return 0; 748 } 749 750 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 751 u8 module, u8 width, u8 lane) 752 { 753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 754 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 755 int i; 756 757 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 758 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 759 for (i = 0; i < width; i++) { 760 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 761 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 762 } 763 764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 765 } 766 767 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 768 { 769 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 770 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 771 772 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 773 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 774 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 775 } 776 777 static int mlxsw_sp_port_open(struct net_device *dev) 778 { 779 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 780 int err; 781 782 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 783 if (err) 784 return err; 785 netif_start_queue(dev); 786 return 0; 787 } 788 789 static int mlxsw_sp_port_stop(struct net_device *dev) 790 { 791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 792 793 netif_stop_queue(dev); 794 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 795 } 796 797 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 798 struct net_device *dev) 799 { 800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 801 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 802 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 803 const struct mlxsw_tx_info tx_info = { 804 .local_port = mlxsw_sp_port->local_port, 805 .is_emad = false, 806 }; 807 u64 len; 808 int err; 809 810 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 811 812 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 813 return NETDEV_TX_BUSY; 814 815 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 816 struct sk_buff *skb_orig = skb; 817 818 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 819 if (!skb) { 820 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 821 dev_kfree_skb_any(skb_orig); 822 return NETDEV_TX_OK; 823 } 824 dev_consume_skb_any(skb_orig); 825 } 826 827 if (eth_skb_pad(skb)) { 828 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 829 return NETDEV_TX_OK; 830 } 831 832 mlxsw_sp_txhdr_construct(skb, &tx_info); 833 /* TX header is consumed by HW on the way so we shouldn't count its 834 * bytes as being sent. 835 */ 836 len = skb->len - MLXSW_TXHDR_LEN; 837 838 /* Due to a race we might fail here because of a full queue. In that 839 * unlikely case we simply drop the packet. 840 */ 841 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 842 843 if (!err) { 844 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 845 u64_stats_update_begin(&pcpu_stats->syncp); 846 pcpu_stats->tx_packets++; 847 pcpu_stats->tx_bytes += len; 848 u64_stats_update_end(&pcpu_stats->syncp); 849 } else { 850 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 851 dev_kfree_skb_any(skb); 852 } 853 return NETDEV_TX_OK; 854 } 855 856 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 857 { 858 } 859 860 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 861 { 862 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 863 struct sockaddr *addr = p; 864 int err; 865 866 if (!is_valid_ether_addr(addr->sa_data)) 867 return -EADDRNOTAVAIL; 868 869 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 870 if (err) 871 return err; 872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 873 return 0; 874 } 875 876 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 877 int mtu) 878 { 879 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 880 } 881 882 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 883 884 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 885 u16 delay) 886 { 887 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 888 BITS_PER_BYTE)); 889 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 890 mtu); 891 } 892 893 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 894 * Assumes 100m cable and maximum MTU. 895 */ 896 #define MLXSW_SP_PAUSE_DELAY 58752 897 898 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 899 u16 delay, bool pfc, bool pause) 900 { 901 if (pfc) 902 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 903 else if (pause) 904 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 905 else 906 return 0; 907 } 908 909 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 910 bool lossy) 911 { 912 if (lossy) 913 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 914 else 915 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 916 thres); 917 } 918 919 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 920 u8 *prio_tc, bool pause_en, 921 struct ieee_pfc *my_pfc) 922 { 923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 924 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 925 u16 delay = !!my_pfc ? my_pfc->delay : 0; 926 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 927 u32 taken_headroom_cells = 0; 928 u32 max_headroom_cells; 929 int i, j, err; 930 931 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); 932 933 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 934 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 935 if (err) 936 return err; 937 938 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 939 bool configure = false; 940 bool pfc = false; 941 u16 thres_cells; 942 u16 delay_cells; 943 u16 total_cells; 944 bool lossy; 945 946 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 947 if (prio_tc[j] == i) { 948 pfc = pfc_en & BIT(j); 949 configure = true; 950 break; 951 } 952 } 953 954 if (!configure) 955 continue; 956 957 lossy = !(pfc || pause_en); 958 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, 960 pfc, pause_en); 961 total_cells = thres_cells + delay_cells; 962 963 taken_headroom_cells += total_cells; 964 if (taken_headroom_cells > max_headroom_cells) 965 return -ENOBUFS; 966 967 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, 968 thres_cells, lossy); 969 } 970 971 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 972 } 973 974 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 975 int mtu, bool pause_en) 976 { 977 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 978 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 979 struct ieee_pfc *my_pfc; 980 u8 *prio_tc; 981 982 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 983 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 984 985 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 986 pause_en, my_pfc); 987 } 988 989 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 990 { 991 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 992 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 993 int err; 994 995 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 996 if (err) 997 return err; 998 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 999 if (err) 1000 goto err_span_port_mtu_update; 1001 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1002 if (err) 1003 goto err_port_mtu_set; 1004 dev->mtu = mtu; 1005 return 0; 1006 1007 err_port_mtu_set: 1008 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1009 err_span_port_mtu_update: 1010 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1011 return err; 1012 } 1013 1014 static int 1015 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1016 struct rtnl_link_stats64 *stats) 1017 { 1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1019 struct mlxsw_sp_port_pcpu_stats *p; 1020 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1021 u32 tx_dropped = 0; 1022 unsigned int start; 1023 int i; 1024 1025 for_each_possible_cpu(i) { 1026 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1027 do { 1028 start = u64_stats_fetch_begin_irq(&p->syncp); 1029 rx_packets = p->rx_packets; 1030 rx_bytes = p->rx_bytes; 1031 tx_packets = p->tx_packets; 1032 tx_bytes = p->tx_bytes; 1033 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1034 1035 stats->rx_packets += rx_packets; 1036 stats->rx_bytes += rx_bytes; 1037 stats->tx_packets += tx_packets; 1038 stats->tx_bytes += tx_bytes; 1039 /* tx_dropped is u32, updated without syncp protection. */ 1040 tx_dropped += p->tx_dropped; 1041 } 1042 stats->tx_dropped = tx_dropped; 1043 return 0; 1044 } 1045 1046 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1047 { 1048 switch (attr_id) { 1049 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1057 void *sp) 1058 { 1059 switch (attr_id) { 1060 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1061 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1062 } 1063 1064 return -EINVAL; 1065 } 1066 1067 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1068 int prio, char *ppcnt_pl) 1069 { 1070 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1071 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1072 1073 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1074 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1075 } 1076 1077 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1078 struct rtnl_link_stats64 *stats) 1079 { 1080 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1081 int err; 1082 1083 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1084 0, ppcnt_pl); 1085 if (err) 1086 goto out; 1087 1088 stats->tx_packets = 1089 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1090 stats->rx_packets = 1091 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1092 stats->tx_bytes = 1093 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1094 stats->rx_bytes = 1095 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1096 stats->multicast = 1097 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1098 1099 stats->rx_crc_errors = 1100 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1101 stats->rx_frame_errors = 1102 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1103 1104 stats->rx_length_errors = ( 1105 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1106 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1107 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1108 1109 stats->rx_errors = (stats->rx_crc_errors + 1110 stats->rx_frame_errors + stats->rx_length_errors); 1111 1112 out: 1113 return err; 1114 } 1115 1116 static void 1117 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1118 struct mlxsw_sp_port_xstats *xstats) 1119 { 1120 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1121 int err, i; 1122 1123 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1124 ppcnt_pl); 1125 if (!err) 1126 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1127 1128 for (i = 0; i < TC_MAX_QUEUE; i++) { 1129 err = mlxsw_sp_port_get_stats_raw(dev, 1130 MLXSW_REG_PPCNT_TC_CONG_TC, 1131 i, ppcnt_pl); 1132 if (!err) 1133 xstats->wred_drop[i] = 1134 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1135 1136 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1137 i, ppcnt_pl); 1138 if (err) 1139 continue; 1140 1141 xstats->backlog[i] = 1142 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1143 xstats->tail_drop[i] = 1144 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1145 } 1146 1147 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1148 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1149 i, ppcnt_pl); 1150 if (err) 1151 continue; 1152 1153 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1154 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1155 } 1156 } 1157 1158 static void update_stats_cache(struct work_struct *work) 1159 { 1160 struct mlxsw_sp_port *mlxsw_sp_port = 1161 container_of(work, struct mlxsw_sp_port, 1162 periodic_hw_stats.update_dw.work); 1163 1164 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1165 goto out; 1166 1167 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1168 &mlxsw_sp_port->periodic_hw_stats.stats); 1169 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1170 &mlxsw_sp_port->periodic_hw_stats.xstats); 1171 1172 out: 1173 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1174 MLXSW_HW_STATS_UPDATE_TIME); 1175 } 1176 1177 /* Return the stats from a cache that is updated periodically, 1178 * as this function might get called in an atomic context. 1179 */ 1180 static void 1181 mlxsw_sp_port_get_stats64(struct net_device *dev, 1182 struct rtnl_link_stats64 *stats) 1183 { 1184 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1185 1186 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1187 } 1188 1189 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1190 u16 vid_begin, u16 vid_end, 1191 bool is_member, bool untagged) 1192 { 1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1194 char *spvm_pl; 1195 int err; 1196 1197 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1198 if (!spvm_pl) 1199 return -ENOMEM; 1200 1201 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1202 vid_end, is_member, untagged); 1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1204 kfree(spvm_pl); 1205 return err; 1206 } 1207 1208 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1209 u16 vid_end, bool is_member, bool untagged) 1210 { 1211 u16 vid, vid_e; 1212 int err; 1213 1214 for (vid = vid_begin; vid <= vid_end; 1215 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1216 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1217 vid_end); 1218 1219 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1220 is_member, untagged); 1221 if (err) 1222 return err; 1223 } 1224 1225 return 0; 1226 } 1227 1228 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1229 bool flush_default) 1230 { 1231 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1232 1233 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1234 &mlxsw_sp_port->vlans_list, list) { 1235 if (!flush_default && 1236 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1237 continue; 1238 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1239 } 1240 } 1241 1242 static void 1243 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1244 { 1245 if (mlxsw_sp_port_vlan->bridge_port) 1246 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1247 else if (mlxsw_sp_port_vlan->fid) 1248 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1249 } 1250 1251 struct mlxsw_sp_port_vlan * 1252 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1253 { 1254 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1255 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1256 int err; 1257 1258 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1259 if (mlxsw_sp_port_vlan) 1260 return ERR_PTR(-EEXIST); 1261 1262 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1263 if (err) 1264 return ERR_PTR(err); 1265 1266 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1267 if (!mlxsw_sp_port_vlan) { 1268 err = -ENOMEM; 1269 goto err_port_vlan_alloc; 1270 } 1271 1272 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1273 mlxsw_sp_port_vlan->vid = vid; 1274 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1275 1276 return mlxsw_sp_port_vlan; 1277 1278 err_port_vlan_alloc: 1279 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1280 return ERR_PTR(err); 1281 } 1282 1283 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1284 { 1285 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1286 u16 vid = mlxsw_sp_port_vlan->vid; 1287 1288 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1289 list_del(&mlxsw_sp_port_vlan->list); 1290 kfree(mlxsw_sp_port_vlan); 1291 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1292 } 1293 1294 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1295 __be16 __always_unused proto, u16 vid) 1296 { 1297 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1298 1299 /* VLAN 0 is added to HW filter when device goes up, but it is 1300 * reserved in our case, so simply return. 1301 */ 1302 if (!vid) 1303 return 0; 1304 1305 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1306 } 1307 1308 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1309 __be16 __always_unused proto, u16 vid) 1310 { 1311 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1312 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1313 1314 /* VLAN 0 is removed from HW filter when device goes down, but 1315 * it is reserved in our case, so simply return. 1316 */ 1317 if (!vid) 1318 return 0; 1319 1320 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1321 if (!mlxsw_sp_port_vlan) 1322 return 0; 1323 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1324 1325 return 0; 1326 } 1327 1328 static struct mlxsw_sp_port_mall_tc_entry * 1329 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1330 unsigned long cookie) { 1331 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1332 1333 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1334 if (mall_tc_entry->cookie == cookie) 1335 return mall_tc_entry; 1336 1337 return NULL; 1338 } 1339 1340 static int 1341 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1342 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1343 const struct flow_action_entry *act, 1344 bool ingress) 1345 { 1346 enum mlxsw_sp_span_type span_type; 1347 1348 if (!act->dev) { 1349 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1350 return -EINVAL; 1351 } 1352 1353 mirror->ingress = ingress; 1354 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1355 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type, 1356 true, &mirror->span_id); 1357 } 1358 1359 static void 1360 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1361 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1362 { 1363 enum mlxsw_sp_span_type span_type; 1364 1365 span_type = mirror->ingress ? 1366 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1367 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1368 span_type, true); 1369 } 1370 1371 static int 1372 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1373 struct tc_cls_matchall_offload *cls, 1374 const struct flow_action_entry *act, 1375 bool ingress) 1376 { 1377 int err; 1378 1379 if (!mlxsw_sp_port->sample) 1380 return -EOPNOTSUPP; 1381 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1382 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1383 return -EEXIST; 1384 } 1385 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) { 1386 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1387 return -EOPNOTSUPP; 1388 } 1389 1390 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1391 act->sample.psample_group); 1392 mlxsw_sp_port->sample->truncate = act->sample.truncate; 1393 mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size; 1394 mlxsw_sp_port->sample->rate = act->sample.rate; 1395 1396 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate); 1397 if (err) 1398 goto err_port_sample_set; 1399 return 0; 1400 1401 err_port_sample_set: 1402 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1403 return err; 1404 } 1405 1406 static void 1407 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1408 { 1409 if (!mlxsw_sp_port->sample) 1410 return; 1411 1412 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1413 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1414 } 1415 1416 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1417 struct tc_cls_matchall_offload *f, 1418 bool ingress) 1419 { 1420 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1421 __be16 protocol = f->common.protocol; 1422 struct flow_action_entry *act; 1423 int err; 1424 1425 if (!flow_offload_has_one_action(&f->rule->action)) { 1426 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1427 return -EOPNOTSUPP; 1428 } 1429 1430 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1431 if (!mall_tc_entry) 1432 return -ENOMEM; 1433 mall_tc_entry->cookie = f->cookie; 1434 1435 act = &f->rule->action.entries[0]; 1436 1437 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 1438 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1439 1440 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1441 mirror = &mall_tc_entry->mirror; 1442 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1443 mirror, act, 1444 ingress); 1445 } else if (act->id == FLOW_ACTION_SAMPLE && 1446 protocol == htons(ETH_P_ALL)) { 1447 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1448 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1449 act, ingress); 1450 } else { 1451 err = -EOPNOTSUPP; 1452 } 1453 1454 if (err) 1455 goto err_add_action; 1456 1457 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1458 return 0; 1459 1460 err_add_action: 1461 kfree(mall_tc_entry); 1462 return err; 1463 } 1464 1465 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1466 struct tc_cls_matchall_offload *f) 1467 { 1468 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1469 1470 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1471 f->cookie); 1472 if (!mall_tc_entry) { 1473 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1474 return; 1475 } 1476 list_del(&mall_tc_entry->list); 1477 1478 switch (mall_tc_entry->type) { 1479 case MLXSW_SP_PORT_MALL_MIRROR: 1480 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1481 &mall_tc_entry->mirror); 1482 break; 1483 case MLXSW_SP_PORT_MALL_SAMPLE: 1484 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1485 break; 1486 default: 1487 WARN_ON(1); 1488 } 1489 1490 kfree(mall_tc_entry); 1491 } 1492 1493 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1494 struct tc_cls_matchall_offload *f, 1495 bool ingress) 1496 { 1497 switch (f->command) { 1498 case TC_CLSMATCHALL_REPLACE: 1499 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1500 ingress); 1501 case TC_CLSMATCHALL_DESTROY: 1502 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1503 return 0; 1504 default: 1505 return -EOPNOTSUPP; 1506 } 1507 } 1508 1509 static int 1510 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1511 struct tc_cls_flower_offload *f) 1512 { 1513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1514 1515 switch (f->command) { 1516 case TC_CLSFLOWER_REPLACE: 1517 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1518 case TC_CLSFLOWER_DESTROY: 1519 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1520 return 0; 1521 case TC_CLSFLOWER_STATS: 1522 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1523 case TC_CLSFLOWER_TMPLT_CREATE: 1524 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1525 case TC_CLSFLOWER_TMPLT_DESTROY: 1526 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1527 return 0; 1528 default: 1529 return -EOPNOTSUPP; 1530 } 1531 } 1532 1533 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1534 void *type_data, 1535 void *cb_priv, bool ingress) 1536 { 1537 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1538 1539 switch (type) { 1540 case TC_SETUP_CLSMATCHALL: 1541 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1542 type_data)) 1543 return -EOPNOTSUPP; 1544 1545 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1546 ingress); 1547 case TC_SETUP_CLSFLOWER: 1548 return 0; 1549 default: 1550 return -EOPNOTSUPP; 1551 } 1552 } 1553 1554 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1555 void *type_data, 1556 void *cb_priv) 1557 { 1558 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1559 cb_priv, true); 1560 } 1561 1562 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1563 void *type_data, 1564 void *cb_priv) 1565 { 1566 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1567 cb_priv, false); 1568 } 1569 1570 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1571 void *type_data, void *cb_priv) 1572 { 1573 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1574 1575 switch (type) { 1576 case TC_SETUP_CLSMATCHALL: 1577 return 0; 1578 case TC_SETUP_CLSFLOWER: 1579 if (mlxsw_sp_acl_block_disabled(acl_block)) 1580 return -EOPNOTSUPP; 1581 1582 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1583 default: 1584 return -EOPNOTSUPP; 1585 } 1586 } 1587 1588 static void mlxsw_sp_tc_block_flower_release(void *cb_priv) 1589 { 1590 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1591 1592 mlxsw_sp_acl_block_destroy(acl_block); 1593 } 1594 1595 static LIST_HEAD(mlxsw_sp_block_cb_list); 1596 1597 static int 1598 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1599 struct flow_block_offload *f, bool ingress) 1600 { 1601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1602 struct mlxsw_sp_acl_block *acl_block; 1603 struct flow_block_cb *block_cb; 1604 bool register_block = false; 1605 int err; 1606 1607 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1608 mlxsw_sp); 1609 if (!block_cb) { 1610 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net); 1611 if (!acl_block) 1612 return -ENOMEM; 1613 block_cb = flow_block_cb_alloc(f->net, 1614 mlxsw_sp_setup_tc_block_cb_flower, 1615 mlxsw_sp, acl_block, 1616 mlxsw_sp_tc_block_flower_release); 1617 if (IS_ERR(block_cb)) { 1618 mlxsw_sp_acl_block_destroy(acl_block); 1619 err = PTR_ERR(block_cb); 1620 goto err_cb_register; 1621 } 1622 register_block = true; 1623 } else { 1624 acl_block = flow_block_cb_priv(block_cb); 1625 } 1626 flow_block_cb_incref(block_cb); 1627 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1628 mlxsw_sp_port, ingress); 1629 if (err) 1630 goto err_block_bind; 1631 1632 if (ingress) 1633 mlxsw_sp_port->ing_acl_block = acl_block; 1634 else 1635 mlxsw_sp_port->eg_acl_block = acl_block; 1636 1637 if (register_block) { 1638 flow_block_cb_add(block_cb, f); 1639 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1640 } 1641 1642 return 0; 1643 1644 err_block_bind: 1645 if (!flow_block_cb_decref(block_cb)) 1646 flow_block_cb_free(block_cb); 1647 err_cb_register: 1648 return err; 1649 } 1650 1651 static void 1652 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1653 struct flow_block_offload *f, bool ingress) 1654 { 1655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1656 struct mlxsw_sp_acl_block *acl_block; 1657 struct flow_block_cb *block_cb; 1658 int err; 1659 1660 block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower, 1661 mlxsw_sp); 1662 if (!block_cb) 1663 return; 1664 1665 if (ingress) 1666 mlxsw_sp_port->ing_acl_block = NULL; 1667 else 1668 mlxsw_sp_port->eg_acl_block = NULL; 1669 1670 acl_block = flow_block_cb_priv(block_cb); 1671 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1672 mlxsw_sp_port, ingress); 1673 if (!err && !flow_block_cb_decref(block_cb)) { 1674 flow_block_cb_remove(block_cb, f); 1675 list_del(&block_cb->driver_list); 1676 } 1677 } 1678 1679 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1680 struct flow_block_offload *f) 1681 { 1682 struct flow_block_cb *block_cb; 1683 tc_setup_cb_t *cb; 1684 bool ingress; 1685 int err; 1686 1687 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1688 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1689 ingress = true; 1690 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1691 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1692 ingress = false; 1693 } else { 1694 return -EOPNOTSUPP; 1695 } 1696 1697 f->driver_block_list = &mlxsw_sp_block_cb_list; 1698 1699 switch (f->command) { 1700 case FLOW_BLOCK_BIND: 1701 block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port, 1702 mlxsw_sp_port, NULL); 1703 if (IS_ERR(block_cb)) 1704 return PTR_ERR(block_cb); 1705 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f, 1706 ingress); 1707 if (err) { 1708 flow_block_cb_free(block_cb); 1709 return err; 1710 } 1711 flow_block_cb_add(block_cb, f); 1712 list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list); 1713 return 0; 1714 case FLOW_BLOCK_UNBIND: 1715 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1716 f, ingress); 1717 block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port); 1718 if (!block_cb) 1719 return -ENOENT; 1720 1721 flow_block_cb_remove(block_cb, f); 1722 list_del(&block_cb->driver_list); 1723 return 0; 1724 default: 1725 return -EOPNOTSUPP; 1726 } 1727 } 1728 1729 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1730 void *type_data) 1731 { 1732 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1733 1734 switch (type) { 1735 case TC_SETUP_BLOCK: 1736 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1737 case TC_SETUP_QDISC_RED: 1738 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1739 case TC_SETUP_QDISC_PRIO: 1740 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1741 default: 1742 return -EOPNOTSUPP; 1743 } 1744 } 1745 1746 1747 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1748 { 1749 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1750 1751 if (!enable) { 1752 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1753 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1754 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1755 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1756 return -EINVAL; 1757 } 1758 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1759 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1760 } else { 1761 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1762 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1763 } 1764 return 0; 1765 } 1766 1767 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1768 { 1769 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1770 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1771 int err; 1772 1773 if (netif_running(dev)) 1774 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1775 1776 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1777 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1778 pplr_pl); 1779 1780 if (netif_running(dev)) 1781 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1782 1783 return err; 1784 } 1785 1786 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1787 1788 static int mlxsw_sp_handle_feature(struct net_device *dev, 1789 netdev_features_t wanted_features, 1790 netdev_features_t feature, 1791 mlxsw_sp_feature_handler feature_handler) 1792 { 1793 netdev_features_t changes = wanted_features ^ dev->features; 1794 bool enable = !!(wanted_features & feature); 1795 int err; 1796 1797 if (!(changes & feature)) 1798 return 0; 1799 1800 err = feature_handler(dev, enable); 1801 if (err) { 1802 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1803 enable ? "Enable" : "Disable", &feature, err); 1804 return err; 1805 } 1806 1807 if (enable) 1808 dev->features |= feature; 1809 else 1810 dev->features &= ~feature; 1811 1812 return 0; 1813 } 1814 static int mlxsw_sp_set_features(struct net_device *dev, 1815 netdev_features_t features) 1816 { 1817 netdev_features_t oper_features = dev->features; 1818 int err = 0; 1819 1820 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1821 mlxsw_sp_feature_hw_tc); 1822 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1823 mlxsw_sp_feature_loopback); 1824 1825 if (err) { 1826 dev->features = oper_features; 1827 return -EINVAL; 1828 } 1829 1830 return 0; 1831 } 1832 1833 static struct devlink_port * 1834 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1835 { 1836 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1838 1839 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1840 mlxsw_sp_port->local_port); 1841 } 1842 1843 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1844 struct ifreq *ifr) 1845 { 1846 struct hwtstamp_config config; 1847 int err; 1848 1849 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1850 return -EFAULT; 1851 1852 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1853 &config); 1854 if (err) 1855 return err; 1856 1857 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1858 return -EFAULT; 1859 1860 return 0; 1861 } 1862 1863 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1864 struct ifreq *ifr) 1865 { 1866 struct hwtstamp_config config; 1867 int err; 1868 1869 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1870 &config); 1871 if (err) 1872 return err; 1873 1874 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1875 return -EFAULT; 1876 1877 return 0; 1878 } 1879 1880 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1881 { 1882 struct hwtstamp_config config = {0}; 1883 1884 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1885 } 1886 1887 static int 1888 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1889 { 1890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1891 1892 switch (cmd) { 1893 case SIOCSHWTSTAMP: 1894 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1895 case SIOCGHWTSTAMP: 1896 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1897 default: 1898 return -EOPNOTSUPP; 1899 } 1900 } 1901 1902 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1903 .ndo_open = mlxsw_sp_port_open, 1904 .ndo_stop = mlxsw_sp_port_stop, 1905 .ndo_start_xmit = mlxsw_sp_port_xmit, 1906 .ndo_setup_tc = mlxsw_sp_setup_tc, 1907 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1908 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1909 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1910 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1911 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1912 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1913 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1914 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1915 .ndo_set_features = mlxsw_sp_set_features, 1916 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1917 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1918 }; 1919 1920 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1921 struct ethtool_drvinfo *drvinfo) 1922 { 1923 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1924 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1925 1926 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1927 sizeof(drvinfo->driver)); 1928 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1929 sizeof(drvinfo->version)); 1930 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1931 "%d.%d.%d", 1932 mlxsw_sp->bus_info->fw_rev.major, 1933 mlxsw_sp->bus_info->fw_rev.minor, 1934 mlxsw_sp->bus_info->fw_rev.subminor); 1935 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1936 sizeof(drvinfo->bus_info)); 1937 } 1938 1939 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1940 struct ethtool_pauseparam *pause) 1941 { 1942 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1943 1944 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1945 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1946 } 1947 1948 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1949 struct ethtool_pauseparam *pause) 1950 { 1951 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1952 1953 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1954 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1955 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1956 1957 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1958 pfcc_pl); 1959 } 1960 1961 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1962 struct ethtool_pauseparam *pause) 1963 { 1964 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1965 bool pause_en = pause->tx_pause || pause->rx_pause; 1966 int err; 1967 1968 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1969 netdev_err(dev, "PFC already enabled on port\n"); 1970 return -EINVAL; 1971 } 1972 1973 if (pause->autoneg) { 1974 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1975 return -EINVAL; 1976 } 1977 1978 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1979 if (err) { 1980 netdev_err(dev, "Failed to configure port's headroom\n"); 1981 return err; 1982 } 1983 1984 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1985 if (err) { 1986 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1987 goto err_port_pause_configure; 1988 } 1989 1990 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1991 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1992 1993 return 0; 1994 1995 err_port_pause_configure: 1996 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1997 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1998 return err; 1999 } 2000 2001 struct mlxsw_sp_port_hw_stats { 2002 char str[ETH_GSTRING_LEN]; 2003 u64 (*getter)(const char *payload); 2004 bool cells_bytes; 2005 }; 2006 2007 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 2008 { 2009 .str = "a_frames_transmitted_ok", 2010 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 2011 }, 2012 { 2013 .str = "a_frames_received_ok", 2014 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 2015 }, 2016 { 2017 .str = "a_frame_check_sequence_errors", 2018 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 2019 }, 2020 { 2021 .str = "a_alignment_errors", 2022 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 2023 }, 2024 { 2025 .str = "a_octets_transmitted_ok", 2026 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 2027 }, 2028 { 2029 .str = "a_octets_received_ok", 2030 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 2031 }, 2032 { 2033 .str = "a_multicast_frames_xmitted_ok", 2034 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 2035 }, 2036 { 2037 .str = "a_broadcast_frames_xmitted_ok", 2038 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 2039 }, 2040 { 2041 .str = "a_multicast_frames_received_ok", 2042 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 2043 }, 2044 { 2045 .str = "a_broadcast_frames_received_ok", 2046 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 2047 }, 2048 { 2049 .str = "a_in_range_length_errors", 2050 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 2051 }, 2052 { 2053 .str = "a_out_of_range_length_field", 2054 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 2055 }, 2056 { 2057 .str = "a_frame_too_long_errors", 2058 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 2059 }, 2060 { 2061 .str = "a_symbol_error_during_carrier", 2062 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 2063 }, 2064 { 2065 .str = "a_mac_control_frames_transmitted", 2066 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 2067 }, 2068 { 2069 .str = "a_mac_control_frames_received", 2070 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 2071 }, 2072 { 2073 .str = "a_unsupported_opcodes_received", 2074 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 2075 }, 2076 { 2077 .str = "a_pause_mac_ctrl_frames_received", 2078 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 2079 }, 2080 { 2081 .str = "a_pause_mac_ctrl_frames_xmitted", 2082 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 2083 }, 2084 }; 2085 2086 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 2087 2088 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { 2089 { 2090 .str = "if_in_discards", 2091 .getter = mlxsw_reg_ppcnt_if_in_discards_get, 2092 }, 2093 { 2094 .str = "if_out_discards", 2095 .getter = mlxsw_reg_ppcnt_if_out_discards_get, 2096 }, 2097 { 2098 .str = "if_out_errors", 2099 .getter = mlxsw_reg_ppcnt_if_out_errors_get, 2100 }, 2101 }; 2102 2103 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ 2104 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) 2105 2106 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 2107 { 2108 .str = "ether_stats_undersize_pkts", 2109 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, 2110 }, 2111 { 2112 .str = "ether_stats_oversize_pkts", 2113 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, 2114 }, 2115 { 2116 .str = "ether_stats_fragments", 2117 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, 2118 }, 2119 { 2120 .str = "ether_pkts64octets", 2121 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 2122 }, 2123 { 2124 .str = "ether_pkts65to127octets", 2125 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 2126 }, 2127 { 2128 .str = "ether_pkts128to255octets", 2129 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 2130 }, 2131 { 2132 .str = "ether_pkts256to511octets", 2133 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 2134 }, 2135 { 2136 .str = "ether_pkts512to1023octets", 2137 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 2138 }, 2139 { 2140 .str = "ether_pkts1024to1518octets", 2141 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 2142 }, 2143 { 2144 .str = "ether_pkts1519to2047octets", 2145 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 2146 }, 2147 { 2148 .str = "ether_pkts2048to4095octets", 2149 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 2150 }, 2151 { 2152 .str = "ether_pkts4096to8191octets", 2153 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 2154 }, 2155 { 2156 .str = "ether_pkts8192to10239octets", 2157 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 2158 }, 2159 }; 2160 2161 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 2162 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 2163 2164 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { 2165 { 2166 .str = "dot3stats_fcs_errors", 2167 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, 2168 }, 2169 { 2170 .str = "dot3stats_symbol_errors", 2171 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, 2172 }, 2173 { 2174 .str = "dot3control_in_unknown_opcodes", 2175 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, 2176 }, 2177 { 2178 .str = "dot3in_pause_frames", 2179 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, 2180 }, 2181 }; 2182 2183 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ 2184 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) 2185 2186 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { 2187 { 2188 .str = "discard_ingress_general", 2189 .getter = mlxsw_reg_ppcnt_ingress_general_get, 2190 }, 2191 { 2192 .str = "discard_ingress_policy_engine", 2193 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, 2194 }, 2195 { 2196 .str = "discard_ingress_vlan_membership", 2197 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, 2198 }, 2199 { 2200 .str = "discard_ingress_tag_frame_type", 2201 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, 2202 }, 2203 { 2204 .str = "discard_egress_vlan_membership", 2205 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, 2206 }, 2207 { 2208 .str = "discard_loopback_filter", 2209 .getter = mlxsw_reg_ppcnt_loopback_filter_get, 2210 }, 2211 { 2212 .str = "discard_egress_general", 2213 .getter = mlxsw_reg_ppcnt_egress_general_get, 2214 }, 2215 { 2216 .str = "discard_egress_hoq", 2217 .getter = mlxsw_reg_ppcnt_egress_hoq_get, 2218 }, 2219 { 2220 .str = "discard_egress_policy_engine", 2221 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, 2222 }, 2223 { 2224 .str = "discard_ingress_tx_link_down", 2225 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, 2226 }, 2227 { 2228 .str = "discard_egress_stp_filter", 2229 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, 2230 }, 2231 { 2232 .str = "discard_egress_sll", 2233 .getter = mlxsw_reg_ppcnt_egress_sll_get, 2234 }, 2235 }; 2236 2237 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ 2238 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) 2239 2240 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 2241 { 2242 .str = "rx_octets_prio", 2243 .getter = mlxsw_reg_ppcnt_rx_octets_get, 2244 }, 2245 { 2246 .str = "rx_frames_prio", 2247 .getter = mlxsw_reg_ppcnt_rx_frames_get, 2248 }, 2249 { 2250 .str = "tx_octets_prio", 2251 .getter = mlxsw_reg_ppcnt_tx_octets_get, 2252 }, 2253 { 2254 .str = "tx_frames_prio", 2255 .getter = mlxsw_reg_ppcnt_tx_frames_get, 2256 }, 2257 { 2258 .str = "rx_pause_prio", 2259 .getter = mlxsw_reg_ppcnt_rx_pause_get, 2260 }, 2261 { 2262 .str = "rx_pause_duration_prio", 2263 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 2264 }, 2265 { 2266 .str = "tx_pause_prio", 2267 .getter = mlxsw_reg_ppcnt_tx_pause_get, 2268 }, 2269 { 2270 .str = "tx_pause_duration_prio", 2271 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 2272 }, 2273 }; 2274 2275 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 2276 2277 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 2278 { 2279 .str = "tc_transmit_queue_tc", 2280 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 2281 .cells_bytes = true, 2282 }, 2283 { 2284 .str = "tc_no_buffer_discard_uc_tc", 2285 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 2286 }, 2287 }; 2288 2289 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 2290 2291 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 2292 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ 2293 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 2294 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ 2295 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ 2296 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 2297 IEEE_8021QAZ_MAX_TCS) + \ 2298 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 2299 TC_MAX_QUEUE)) 2300 2301 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 2302 { 2303 int i; 2304 2305 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 2306 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2307 mlxsw_sp_port_hw_prio_stats[i].str, prio); 2308 *p += ETH_GSTRING_LEN; 2309 } 2310 } 2311 2312 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 2313 { 2314 int i; 2315 2316 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 2317 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", 2318 mlxsw_sp_port_hw_tc_stats[i].str, tc); 2319 *p += ETH_GSTRING_LEN; 2320 } 2321 } 2322 2323 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2324 u32 stringset, u8 *data) 2325 { 2326 u8 *p = data; 2327 int i; 2328 2329 switch (stringset) { 2330 case ETH_SS_STATS: 2331 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2332 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2333 ETH_GSTRING_LEN); 2334 p += ETH_GSTRING_LEN; 2335 } 2336 2337 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { 2338 memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, 2339 ETH_GSTRING_LEN); 2340 p += ETH_GSTRING_LEN; 2341 } 2342 2343 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2344 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2345 ETH_GSTRING_LEN); 2346 p += ETH_GSTRING_LEN; 2347 } 2348 2349 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { 2350 memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, 2351 ETH_GSTRING_LEN); 2352 p += ETH_GSTRING_LEN; 2353 } 2354 2355 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { 2356 memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, 2357 ETH_GSTRING_LEN); 2358 p += ETH_GSTRING_LEN; 2359 } 2360 2361 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2362 mlxsw_sp_port_get_prio_strings(&p, i); 2363 2364 for (i = 0; i < TC_MAX_QUEUE; i++) 2365 mlxsw_sp_port_get_tc_strings(&p, i); 2366 2367 break; 2368 } 2369 } 2370 2371 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2372 enum ethtool_phys_id_state state) 2373 { 2374 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2376 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2377 bool active; 2378 2379 switch (state) { 2380 case ETHTOOL_ID_ACTIVE: 2381 active = true; 2382 break; 2383 case ETHTOOL_ID_INACTIVE: 2384 active = false; 2385 break; 2386 default: 2387 return -EOPNOTSUPP; 2388 } 2389 2390 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2391 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2392 } 2393 2394 static int 2395 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2396 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2397 { 2398 switch (grp) { 2399 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2400 *p_hw_stats = mlxsw_sp_port_hw_stats; 2401 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2402 break; 2403 case MLXSW_REG_PPCNT_RFC_2863_CNT: 2404 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; 2405 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2406 break; 2407 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2408 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2409 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2410 break; 2411 case MLXSW_REG_PPCNT_RFC_3635_CNT: 2412 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; 2413 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2414 break; 2415 case MLXSW_REG_PPCNT_DISCARD_CNT: 2416 *p_hw_stats = mlxsw_sp_port_hw_discard_stats; 2417 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2418 break; 2419 case MLXSW_REG_PPCNT_PRIO_CNT: 2420 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2421 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2422 break; 2423 case MLXSW_REG_PPCNT_TC_CNT: 2424 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2425 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2426 break; 2427 default: 2428 WARN_ON(1); 2429 return -EOPNOTSUPP; 2430 } 2431 return 0; 2432 } 2433 2434 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2435 enum mlxsw_reg_ppcnt_grp grp, int prio, 2436 u64 *data, int data_index) 2437 { 2438 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2440 struct mlxsw_sp_port_hw_stats *hw_stats; 2441 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2442 int i, len; 2443 int err; 2444 2445 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2446 if (err) 2447 return; 2448 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2449 for (i = 0; i < len; i++) { 2450 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2451 if (!hw_stats[i].cells_bytes) 2452 continue; 2453 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2454 data[data_index + i]); 2455 } 2456 } 2457 2458 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2459 struct ethtool_stats *stats, u64 *data) 2460 { 2461 int i, data_index = 0; 2462 2463 /* IEEE 802.3 Counters */ 2464 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2465 data, data_index); 2466 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2467 2468 /* RFC 2863 Counters */ 2469 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, 2470 data, data_index); 2471 data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; 2472 2473 /* RFC 2819 Counters */ 2474 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2475 data, data_index); 2476 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2477 2478 /* RFC 3635 Counters */ 2479 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, 2480 data, data_index); 2481 data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; 2482 2483 /* Discard Counters */ 2484 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, 2485 data, data_index); 2486 data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; 2487 2488 /* Per-Priority Counters */ 2489 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2490 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2491 data, data_index); 2492 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2493 } 2494 2495 /* Per-TC Counters */ 2496 for (i = 0; i < TC_MAX_QUEUE; i++) { 2497 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2498 data, data_index); 2499 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2500 } 2501 } 2502 2503 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2504 { 2505 switch (sset) { 2506 case ETH_SS_STATS: 2507 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2508 default: 2509 return -EOPNOTSUPP; 2510 } 2511 } 2512 2513 struct mlxsw_sp1_port_link_mode { 2514 enum ethtool_link_mode_bit_indices mask_ethtool; 2515 u32 mask; 2516 u32 speed; 2517 }; 2518 2519 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { 2520 { 2521 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2522 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2523 .speed = SPEED_100, 2524 }, 2525 { 2526 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2527 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2528 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2529 .speed = SPEED_1000, 2530 }, 2531 { 2532 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2533 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2534 .speed = SPEED_10000, 2535 }, 2536 { 2537 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2538 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2539 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2540 .speed = SPEED_10000, 2541 }, 2542 { 2543 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2544 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2545 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2546 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2547 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2548 .speed = SPEED_10000, 2549 }, 2550 { 2551 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2552 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2553 .speed = SPEED_20000, 2554 }, 2555 { 2556 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2557 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2558 .speed = SPEED_40000, 2559 }, 2560 { 2561 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2562 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2563 .speed = SPEED_40000, 2564 }, 2565 { 2566 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2567 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2568 .speed = SPEED_40000, 2569 }, 2570 { 2571 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2572 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2573 .speed = SPEED_40000, 2574 }, 2575 { 2576 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2577 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2578 .speed = SPEED_25000, 2579 }, 2580 { 2581 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2582 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2583 .speed = SPEED_25000, 2584 }, 2585 { 2586 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2587 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2588 .speed = SPEED_25000, 2589 }, 2590 { 2591 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2592 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2593 .speed = SPEED_50000, 2594 }, 2595 { 2596 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2597 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2598 .speed = SPEED_50000, 2599 }, 2600 { 2601 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2602 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2603 .speed = SPEED_50000, 2604 }, 2605 { 2606 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2607 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2608 .speed = SPEED_56000, 2609 }, 2610 { 2611 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2612 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2613 .speed = SPEED_56000, 2614 }, 2615 { 2616 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2617 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2618 .speed = SPEED_56000, 2619 }, 2620 { 2621 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2622 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2623 .speed = SPEED_56000, 2624 }, 2625 { 2626 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2627 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2628 .speed = SPEED_100000, 2629 }, 2630 { 2631 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2632 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2633 .speed = SPEED_100000, 2634 }, 2635 { 2636 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2637 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2638 .speed = SPEED_100000, 2639 }, 2640 { 2641 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2642 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2643 .speed = SPEED_100000, 2644 }, 2645 }; 2646 2647 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) 2648 2649 static void 2650 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 2651 u32 ptys_eth_proto, 2652 struct ethtool_link_ksettings *cmd) 2653 { 2654 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2655 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2656 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2657 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2658 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2659 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2660 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2661 2662 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2663 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2664 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2665 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2666 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2667 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2668 } 2669 2670 static void 2671 mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 2672 unsigned long *mode) 2673 { 2674 int i; 2675 2676 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2677 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2678 __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2679 mode); 2680 } 2681 } 2682 2683 static u32 2684 mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 2685 { 2686 int i; 2687 2688 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2689 if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) 2690 return mlxsw_sp1_port_link_mode[i].speed; 2691 } 2692 2693 return SPEED_UNKNOWN; 2694 } 2695 2696 static void 2697 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 2698 u32 ptys_eth_proto, 2699 struct ethtool_link_ksettings *cmd) 2700 { 2701 cmd->base.speed = SPEED_UNKNOWN; 2702 cmd->base.duplex = DUPLEX_UNKNOWN; 2703 2704 if (!carrier_ok) 2705 return; 2706 2707 cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 2708 if (cmd->base.speed != SPEED_UNKNOWN) 2709 cmd->base.duplex = DUPLEX_FULL; 2710 } 2711 2712 static u32 2713 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 2714 const struct ethtool_link_ksettings *cmd) 2715 { 2716 u32 ptys_proto = 0; 2717 int i; 2718 2719 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2720 if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, 2721 cmd->link_modes.advertising)) 2722 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2723 } 2724 return ptys_proto; 2725 } 2726 2727 static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 2728 { 2729 u32 ptys_proto = 0; 2730 int i; 2731 2732 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2733 if (speed == mlxsw_sp1_port_link_mode[i].speed) 2734 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2735 } 2736 return ptys_proto; 2737 } 2738 2739 static u32 2740 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 2741 { 2742 u32 ptys_proto = 0; 2743 int i; 2744 2745 for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { 2746 if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) 2747 ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; 2748 } 2749 return ptys_proto; 2750 } 2751 2752 static int 2753 mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2754 u32 *base_speed) 2755 { 2756 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 2757 return 0; 2758 } 2759 2760 static void 2761 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 2762 u8 local_port, u32 proto_admin, bool autoneg) 2763 { 2764 mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); 2765 } 2766 2767 static void 2768 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 2769 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 2770 u32 *p_eth_proto_oper) 2771 { 2772 mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, 2773 p_eth_proto_oper); 2774 } 2775 2776 static const struct mlxsw_sp_port_type_speed_ops 2777 mlxsw_sp1_port_type_speed_ops = { 2778 .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, 2779 .from_ptys_link = mlxsw_sp1_from_ptys_link, 2780 .from_ptys_speed = mlxsw_sp1_from_ptys_speed, 2781 .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, 2782 .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, 2783 .to_ptys_speed = mlxsw_sp1_to_ptys_speed, 2784 .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, 2785 .port_speed_base = mlxsw_sp1_port_speed_base, 2786 .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, 2787 .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, 2788 }; 2789 2790 static const enum ethtool_link_mode_bit_indices 2791 mlxsw_sp2_mask_ethtool_sgmii_100m[] = { 2792 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2793 }; 2794 2795 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ 2796 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) 2797 2798 static const enum ethtool_link_mode_bit_indices 2799 mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { 2800 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2801 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2802 }; 2803 2804 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ 2805 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) 2806 2807 static const enum ethtool_link_mode_bit_indices 2808 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { 2809 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 2810 }; 2811 2812 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ 2813 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) 2814 2815 static const enum ethtool_link_mode_bit_indices 2816 mlxsw_sp2_mask_ethtool_5gbase_r[] = { 2817 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 2818 }; 2819 2820 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ 2821 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) 2822 2823 static const enum ethtool_link_mode_bit_indices 2824 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { 2825 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2826 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2827 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 2828 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2829 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2830 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2831 ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 2832 }; 2833 2834 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ 2835 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) 2836 2837 static const enum ethtool_link_mode_bit_indices 2838 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { 2839 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2840 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2841 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2842 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2843 }; 2844 2845 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ 2846 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) 2847 2848 static const enum ethtool_link_mode_bit_indices 2849 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { 2850 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2851 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2852 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2853 }; 2854 2855 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ 2856 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) 2857 2858 static const enum ethtool_link_mode_bit_indices 2859 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { 2860 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2861 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2862 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2863 }; 2864 2865 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ 2866 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) 2867 2868 static const enum ethtool_link_mode_bit_indices 2869 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { 2870 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2871 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2872 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2873 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2874 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 2875 }; 2876 2877 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ 2878 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) 2879 2880 static const enum ethtool_link_mode_bit_indices 2881 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { 2882 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2883 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2884 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2885 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2886 }; 2887 2888 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ 2889 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) 2890 2891 static const enum ethtool_link_mode_bit_indices 2892 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { 2893 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2894 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2895 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2896 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2897 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 2898 }; 2899 2900 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ 2901 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) 2902 2903 static const enum ethtool_link_mode_bit_indices 2904 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { 2905 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2906 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2907 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2908 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 2909 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2910 }; 2911 2912 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ 2913 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) 2914 2915 struct mlxsw_sp2_port_link_mode { 2916 const enum ethtool_link_mode_bit_indices *mask_ethtool; 2917 int m_ethtool_len; 2918 u32 mask; 2919 u32 speed; 2920 }; 2921 2922 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { 2923 { 2924 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, 2925 .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, 2926 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, 2927 .speed = SPEED_100, 2928 }, 2929 { 2930 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, 2931 .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, 2932 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, 2933 .speed = SPEED_1000, 2934 }, 2935 { 2936 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, 2937 .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, 2938 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, 2939 .speed = SPEED_2500, 2940 }, 2941 { 2942 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, 2943 .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, 2944 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, 2945 .speed = SPEED_5000, 2946 }, 2947 { 2948 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, 2949 .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, 2950 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, 2951 .speed = SPEED_10000, 2952 }, 2953 { 2954 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, 2955 .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, 2956 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, 2957 .speed = SPEED_40000, 2958 }, 2959 { 2960 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, 2961 .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, 2962 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, 2963 .speed = SPEED_25000, 2964 }, 2965 { 2966 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, 2967 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, 2968 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, 2969 .speed = SPEED_50000, 2970 }, 2971 { 2972 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, 2973 .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, 2974 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, 2975 .speed = SPEED_50000, 2976 }, 2977 { 2978 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, 2979 .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, 2980 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, 2981 .speed = SPEED_100000, 2982 }, 2983 { 2984 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, 2985 .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, 2986 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, 2987 .speed = SPEED_100000, 2988 }, 2989 { 2990 .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, 2991 .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, 2992 .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, 2993 .speed = SPEED_200000, 2994 }, 2995 }; 2996 2997 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) 2998 2999 static void 3000 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, 3001 u32 ptys_eth_proto, 3002 struct ethtool_link_ksettings *cmd) 3003 { 3004 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 3005 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 3006 } 3007 3008 static void 3009 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3010 unsigned long *mode) 3011 { 3012 int i; 3013 3014 for (i = 0; i < link_mode->m_ethtool_len; i++) 3015 __set_bit(link_mode->mask_ethtool[i], mode); 3016 } 3017 3018 static void 3019 mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, 3020 unsigned long *mode) 3021 { 3022 int i; 3023 3024 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3025 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3026 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3027 mode); 3028 } 3029 } 3030 3031 static u32 3032 mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) 3033 { 3034 int i; 3035 3036 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3037 if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) 3038 return mlxsw_sp2_port_link_mode[i].speed; 3039 } 3040 3041 return SPEED_UNKNOWN; 3042 } 3043 3044 static void 3045 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, 3046 u32 ptys_eth_proto, 3047 struct ethtool_link_ksettings *cmd) 3048 { 3049 cmd->base.speed = SPEED_UNKNOWN; 3050 cmd->base.duplex = DUPLEX_UNKNOWN; 3051 3052 if (!carrier_ok) 3053 return; 3054 3055 cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); 3056 if (cmd->base.speed != SPEED_UNKNOWN) 3057 cmd->base.duplex = DUPLEX_FULL; 3058 } 3059 3060 static bool 3061 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, 3062 const unsigned long *mode) 3063 { 3064 int cnt = 0; 3065 int i; 3066 3067 for (i = 0; i < link_mode->m_ethtool_len; i++) { 3068 if (test_bit(link_mode->mask_ethtool[i], mode)) 3069 cnt++; 3070 } 3071 3072 return cnt == link_mode->m_ethtool_len; 3073 } 3074 3075 static u32 3076 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, 3077 const struct ethtool_link_ksettings *cmd) 3078 { 3079 u32 ptys_proto = 0; 3080 int i; 3081 3082 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3083 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], 3084 cmd->link_modes.advertising)) 3085 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3086 } 3087 return ptys_proto; 3088 } 3089 3090 static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) 3091 { 3092 u32 ptys_proto = 0; 3093 int i; 3094 3095 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3096 if (speed == mlxsw_sp2_port_link_mode[i].speed) 3097 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3098 } 3099 return ptys_proto; 3100 } 3101 3102 static u32 3103 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) 3104 { 3105 u32 ptys_proto = 0; 3106 int i; 3107 3108 for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { 3109 if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) 3110 ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; 3111 } 3112 return ptys_proto; 3113 } 3114 3115 static int 3116 mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3117 u32 *base_speed) 3118 { 3119 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3120 u32 eth_proto_cap; 3121 int err; 3122 3123 /* In Spectrum-2, the speed of 1x can change from port to port, so query 3124 * it from firmware. 3125 */ 3126 mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); 3127 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3128 if (err) 3129 return err; 3130 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 3131 3132 if (eth_proto_cap & 3133 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { 3134 *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; 3135 return 0; 3136 } 3137 3138 if (eth_proto_cap & 3139 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { 3140 *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; 3141 return 0; 3142 } 3143 3144 return -EIO; 3145 } 3146 3147 static void 3148 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, 3149 u8 local_port, u32 proto_admin, 3150 bool autoneg) 3151 { 3152 mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); 3153 } 3154 3155 static void 3156 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, 3157 u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, 3158 u32 *p_eth_proto_oper) 3159 { 3160 mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, 3161 p_eth_proto_admin, p_eth_proto_oper); 3162 } 3163 3164 static const struct mlxsw_sp_port_type_speed_ops 3165 mlxsw_sp2_port_type_speed_ops = { 3166 .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, 3167 .from_ptys_link = mlxsw_sp2_from_ptys_link, 3168 .from_ptys_speed = mlxsw_sp2_from_ptys_speed, 3169 .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, 3170 .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, 3171 .to_ptys_speed = mlxsw_sp2_to_ptys_speed, 3172 .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, 3173 .port_speed_base = mlxsw_sp2_port_speed_base, 3174 .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, 3175 .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, 3176 }; 3177 3178 static void 3179 mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, 3180 struct ethtool_link_ksettings *cmd) 3181 { 3182 const struct mlxsw_sp_port_type_speed_ops *ops; 3183 3184 ops = mlxsw_sp->port_type_speed_ops; 3185 3186 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 3187 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 3188 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 3189 3190 ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); 3191 ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); 3192 } 3193 3194 static void 3195 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, 3196 u32 eth_proto_admin, bool autoneg, 3197 struct ethtool_link_ksettings *cmd) 3198 { 3199 const struct mlxsw_sp_port_type_speed_ops *ops; 3200 3201 ops = mlxsw_sp->port_type_speed_ops; 3202 3203 if (!autoneg) 3204 return; 3205 3206 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 3207 ops->from_ptys_link(mlxsw_sp, eth_proto_admin, 3208 cmd->link_modes.advertising); 3209 } 3210 3211 static u8 3212 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) 3213 { 3214 switch (connector_type) { 3215 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: 3216 return PORT_OTHER; 3217 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: 3218 return PORT_NONE; 3219 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: 3220 return PORT_TP; 3221 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: 3222 return PORT_AUI; 3223 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: 3224 return PORT_BNC; 3225 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: 3226 return PORT_MII; 3227 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: 3228 return PORT_FIBRE; 3229 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: 3230 return PORT_DA; 3231 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: 3232 return PORT_OTHER; 3233 default: 3234 WARN_ON_ONCE(1); 3235 return PORT_OTHER; 3236 } 3237 } 3238 3239 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 3240 struct ethtool_link_ksettings *cmd) 3241 { 3242 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 3243 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3245 const struct mlxsw_sp_port_type_speed_ops *ops; 3246 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3247 u8 connector_type; 3248 bool autoneg; 3249 int err; 3250 3251 ops = mlxsw_sp->port_type_speed_ops; 3252 3253 autoneg = mlxsw_sp_port->link.autoneg; 3254 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3255 0, false); 3256 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3257 if (err) 3258 return err; 3259 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 3260 ð_proto_admin, ð_proto_oper); 3261 3262 mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); 3263 3264 mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, 3265 cmd); 3266 3267 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3268 connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); 3269 cmd->base.port = mlxsw_sp_port_connector_port(connector_type); 3270 ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), 3271 eth_proto_oper, cmd); 3272 3273 return 0; 3274 } 3275 3276 static int 3277 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 3278 const struct ethtool_link_ksettings *cmd) 3279 { 3280 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3282 const struct mlxsw_sp_port_type_speed_ops *ops; 3283 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3284 u32 eth_proto_cap, eth_proto_new; 3285 bool autoneg; 3286 int err; 3287 3288 ops = mlxsw_sp->port_type_speed_ops; 3289 3290 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3291 0, false); 3292 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3293 if (err) 3294 return err; 3295 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); 3296 3297 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 3298 if (!autoneg && cmd->base.speed == SPEED_56000) { 3299 netdev_err(dev, "56G not supported with autoneg off\n"); 3300 return -EINVAL; 3301 } 3302 eth_proto_new = autoneg ? 3303 ops->to_ptys_advert_link(mlxsw_sp, cmd) : 3304 ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); 3305 3306 eth_proto_new = eth_proto_new & eth_proto_cap; 3307 if (!eth_proto_new) { 3308 netdev_err(dev, "No supported speed requested\n"); 3309 return -EINVAL; 3310 } 3311 3312 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3313 eth_proto_new, autoneg); 3314 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3315 if (err) 3316 return err; 3317 3318 mlxsw_sp_port->link.autoneg = autoneg; 3319 3320 if (!netif_running(dev)) 3321 return 0; 3322 3323 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3324 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 3325 3326 return 0; 3327 } 3328 3329 static int mlxsw_sp_get_module_info(struct net_device *netdev, 3330 struct ethtool_modinfo *modinfo) 3331 { 3332 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3333 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3334 int err; 3335 3336 err = mlxsw_env_get_module_info(mlxsw_sp->core, 3337 mlxsw_sp_port->mapping.module, 3338 modinfo); 3339 3340 return err; 3341 } 3342 3343 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 3344 struct ethtool_eeprom *ee, 3345 u8 *data) 3346 { 3347 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3349 int err; 3350 3351 err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 3352 mlxsw_sp_port->mapping.module, ee, 3353 data); 3354 3355 return err; 3356 } 3357 3358 static int 3359 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) 3360 { 3361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 3362 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3363 3364 return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); 3365 } 3366 3367 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 3368 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 3369 .get_link = ethtool_op_get_link, 3370 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 3371 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 3372 .get_strings = mlxsw_sp_port_get_strings, 3373 .set_phys_id = mlxsw_sp_port_set_phys_id, 3374 .get_ethtool_stats = mlxsw_sp_port_get_stats, 3375 .get_sset_count = mlxsw_sp_port_get_sset_count, 3376 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 3377 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 3378 .get_module_info = mlxsw_sp_get_module_info, 3379 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 3380 .get_ts_info = mlxsw_sp_get_ts_info, 3381 }; 3382 3383 static int 3384 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 3385 { 3386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3387 const struct mlxsw_sp_port_type_speed_ops *ops; 3388 char ptys_pl[MLXSW_REG_PTYS_LEN]; 3389 u32 eth_proto_admin; 3390 u32 upper_speed; 3391 u32 base_speed; 3392 int err; 3393 3394 ops = mlxsw_sp->port_type_speed_ops; 3395 3396 err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, 3397 &base_speed); 3398 if (err) 3399 return err; 3400 upper_speed = base_speed * width; 3401 3402 eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); 3403 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 3404 eth_proto_admin, mlxsw_sp_port->link.autoneg); 3405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 3406 } 3407 3408 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 3409 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 3410 bool dwrr, u8 dwrr_weight) 3411 { 3412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3413 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3414 3415 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3416 next_index); 3417 mlxsw_reg_qeec_de_set(qeec_pl, true); 3418 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 3419 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 3420 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3421 } 3422 3423 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 3424 enum mlxsw_reg_qeec_hr hr, u8 index, 3425 u8 next_index, u32 maxrate) 3426 { 3427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3428 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3429 3430 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3431 next_index); 3432 mlxsw_reg_qeec_mase_set(qeec_pl, true); 3433 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 3434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3435 } 3436 3437 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 3438 enum mlxsw_reg_qeec_hr hr, u8 index, 3439 u8 next_index, u32 minrate) 3440 { 3441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3442 char qeec_pl[MLXSW_REG_QEEC_LEN]; 3443 3444 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 3445 next_index); 3446 mlxsw_reg_qeec_mise_set(qeec_pl, true); 3447 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 3448 3449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 3450 } 3451 3452 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 3453 u8 switch_prio, u8 tclass) 3454 { 3455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3456 char qtct_pl[MLXSW_REG_QTCT_LEN]; 3457 3458 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 3459 tclass); 3460 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 3461 } 3462 3463 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 3464 { 3465 int err, i; 3466 3467 /* Setup the elements hierarcy, so that each TC is linked to 3468 * one subgroup, which are all member in the same group. 3469 */ 3470 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3471 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 3472 0); 3473 if (err) 3474 return err; 3475 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3476 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3477 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 3478 0, false, 0); 3479 if (err) 3480 return err; 3481 } 3482 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3483 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3484 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 3485 false, 0); 3486 if (err) 3487 return err; 3488 3489 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 3490 MLXSW_REG_QEEC_HIERARCY_TC, 3491 i + 8, i, 3492 true, 100); 3493 if (err) 3494 return err; 3495 } 3496 3497 /* Make sure the max shaper is disabled in all hierarchies that support 3498 * it. Note that this disables ptps (PTP shaper), but that is intended 3499 * for the initial configuration. 3500 */ 3501 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3502 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 3503 MLXSW_REG_QEEC_MAS_DIS); 3504 if (err) 3505 return err; 3506 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3507 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3508 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 3509 i, 0, 3510 MLXSW_REG_QEEC_MAS_DIS); 3511 if (err) 3512 return err; 3513 } 3514 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3515 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3516 MLXSW_REG_QEEC_HIERARCY_TC, 3517 i, i, 3518 MLXSW_REG_QEEC_MAS_DIS); 3519 if (err) 3520 return err; 3521 3522 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 3523 MLXSW_REG_QEEC_HIERARCY_TC, 3524 i + 8, i, 3525 MLXSW_REG_QEEC_MAS_DIS); 3526 if (err) 3527 return err; 3528 } 3529 3530 /* Configure the min shaper for multicast TCs. */ 3531 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3532 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 3533 MLXSW_REG_QEEC_HIERARCY_TC, 3534 i + 8, i, 3535 MLXSW_REG_QEEC_MIS_MIN); 3536 if (err) 3537 return err; 3538 } 3539 3540 /* Map all priorities to traffic class 0. */ 3541 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 3542 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 3543 if (err) 3544 return err; 3545 } 3546 3547 return 0; 3548 } 3549 3550 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 3551 bool enable) 3552 { 3553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3554 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 3555 3556 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 3557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 3558 } 3559 3560 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 3561 bool split, u8 module, u8 width, u8 lane) 3562 { 3563 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3564 struct mlxsw_sp_port *mlxsw_sp_port; 3565 struct net_device *dev; 3566 int err; 3567 3568 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 3569 module + 1, split, lane / width, 3570 mlxsw_sp->base_mac, 3571 sizeof(mlxsw_sp->base_mac)); 3572 if (err) { 3573 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 3574 local_port); 3575 return err; 3576 } 3577 3578 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 3579 if (!dev) { 3580 err = -ENOMEM; 3581 goto err_alloc_etherdev; 3582 } 3583 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 3584 mlxsw_sp_port = netdev_priv(dev); 3585 mlxsw_sp_port->dev = dev; 3586 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 3587 mlxsw_sp_port->local_port = local_port; 3588 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 3589 mlxsw_sp_port->split = split; 3590 mlxsw_sp_port->mapping.module = module; 3591 mlxsw_sp_port->mapping.width = width; 3592 mlxsw_sp_port->mapping.lane = lane; 3593 mlxsw_sp_port->link.autoneg = 1; 3594 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 3595 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 3596 3597 mlxsw_sp_port->pcpu_stats = 3598 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 3599 if (!mlxsw_sp_port->pcpu_stats) { 3600 err = -ENOMEM; 3601 goto err_alloc_stats; 3602 } 3603 3604 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 3605 GFP_KERNEL); 3606 if (!mlxsw_sp_port->sample) { 3607 err = -ENOMEM; 3608 goto err_alloc_sample; 3609 } 3610 3611 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 3612 &update_stats_cache); 3613 3614 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 3615 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 3616 3617 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 3618 if (err) { 3619 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 3620 mlxsw_sp_port->local_port); 3621 goto err_port_module_map; 3622 } 3623 3624 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 3625 if (err) { 3626 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 3627 mlxsw_sp_port->local_port); 3628 goto err_port_swid_set; 3629 } 3630 3631 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 3632 if (err) { 3633 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 3634 mlxsw_sp_port->local_port); 3635 goto err_dev_addr_init; 3636 } 3637 3638 netif_carrier_off(dev); 3639 3640 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 3641 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 3642 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 3643 3644 dev->min_mtu = 0; 3645 dev->max_mtu = ETH_MAX_MTU; 3646 3647 /* Each packet needs to have a Tx header (metadata) on top all other 3648 * headers. 3649 */ 3650 dev->needed_headroom = MLXSW_TXHDR_LEN; 3651 3652 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 3653 if (err) { 3654 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 3655 mlxsw_sp_port->local_port); 3656 goto err_port_system_port_mapping_set; 3657 } 3658 3659 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 3660 if (err) { 3661 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 3662 mlxsw_sp_port->local_port); 3663 goto err_port_speed_by_width_set; 3664 } 3665 3666 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 3667 if (err) { 3668 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 3669 mlxsw_sp_port->local_port); 3670 goto err_port_mtu_set; 3671 } 3672 3673 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 3674 if (err) 3675 goto err_port_admin_status_set; 3676 3677 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 3678 if (err) { 3679 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 3680 mlxsw_sp_port->local_port); 3681 goto err_port_buffers_init; 3682 } 3683 3684 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 3685 if (err) { 3686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 3687 mlxsw_sp_port->local_port); 3688 goto err_port_ets_init; 3689 } 3690 3691 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 3692 if (err) { 3693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 3694 mlxsw_sp_port->local_port); 3695 goto err_port_tc_mc_mode; 3696 } 3697 3698 /* ETS and buffers must be initialized before DCB. */ 3699 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 3700 if (err) { 3701 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 3702 mlxsw_sp_port->local_port); 3703 goto err_port_dcb_init; 3704 } 3705 3706 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 3707 if (err) { 3708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 3709 mlxsw_sp_port->local_port); 3710 goto err_port_fids_init; 3711 } 3712 3713 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 3714 if (err) { 3715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 3716 mlxsw_sp_port->local_port); 3717 goto err_port_qdiscs_init; 3718 } 3719 3720 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 3723 mlxsw_sp_port->local_port); 3724 goto err_port_nve_init; 3725 } 3726 3727 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 3728 if (err) { 3729 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 3730 mlxsw_sp_port->local_port); 3731 goto err_port_pvid_set; 3732 } 3733 3734 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 3735 MLXSW_SP_DEFAULT_VID); 3736 if (IS_ERR(mlxsw_sp_port_vlan)) { 3737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 3738 mlxsw_sp_port->local_port); 3739 err = PTR_ERR(mlxsw_sp_port_vlan); 3740 goto err_port_vlan_create; 3741 } 3742 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 3743 3744 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 3745 mlxsw_sp->ptp_ops->shaper_work); 3746 3747 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 3748 err = register_netdev(dev); 3749 if (err) { 3750 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3751 mlxsw_sp_port->local_port); 3752 goto err_register_netdev; 3753 } 3754 3755 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3756 mlxsw_sp_port, dev); 3757 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3758 return 0; 3759 3760 err_register_netdev: 3761 mlxsw_sp->ports[local_port] = NULL; 3762 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 3763 err_port_vlan_create: 3764 err_port_pvid_set: 3765 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3766 err_port_nve_init: 3767 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3768 err_port_qdiscs_init: 3769 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3770 err_port_fids_init: 3771 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3772 err_port_dcb_init: 3773 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3774 err_port_tc_mc_mode: 3775 err_port_ets_init: 3776 err_port_buffers_init: 3777 err_port_admin_status_set: 3778 err_port_mtu_set: 3779 err_port_speed_by_width_set: 3780 err_port_system_port_mapping_set: 3781 err_dev_addr_init: 3782 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3783 err_port_swid_set: 3784 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3785 err_port_module_map: 3786 kfree(mlxsw_sp_port->sample); 3787 err_alloc_sample: 3788 free_percpu(mlxsw_sp_port->pcpu_stats); 3789 err_alloc_stats: 3790 free_netdev(dev); 3791 err_alloc_etherdev: 3792 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3793 return err; 3794 } 3795 3796 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3797 { 3798 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3799 3800 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3801 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 3802 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 3803 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3804 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3805 mlxsw_sp->ports[local_port] = NULL; 3806 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 3807 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 3808 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3809 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3810 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3811 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3812 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3813 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3814 kfree(mlxsw_sp_port->sample); 3815 free_percpu(mlxsw_sp_port->pcpu_stats); 3816 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3817 free_netdev(mlxsw_sp_port->dev); 3818 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3819 } 3820 3821 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3822 { 3823 return mlxsw_sp->ports[local_port] != NULL; 3824 } 3825 3826 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3827 { 3828 int i; 3829 3830 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3831 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3832 mlxsw_sp_port_remove(mlxsw_sp, i); 3833 kfree(mlxsw_sp->port_to_module); 3834 kfree(mlxsw_sp->ports); 3835 } 3836 3837 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3838 { 3839 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3840 u8 module, width, lane; 3841 size_t alloc_size; 3842 int i; 3843 int err; 3844 3845 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3846 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3847 if (!mlxsw_sp->ports) 3848 return -ENOMEM; 3849 3850 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3851 GFP_KERNEL); 3852 if (!mlxsw_sp->port_to_module) { 3853 err = -ENOMEM; 3854 goto err_port_to_module_alloc; 3855 } 3856 3857 for (i = 1; i < max_ports; i++) { 3858 /* Mark as invalid */ 3859 mlxsw_sp->port_to_module[i] = -1; 3860 3861 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3862 &width, &lane); 3863 if (err) 3864 goto err_port_module_info_get; 3865 if (!width) 3866 continue; 3867 mlxsw_sp->port_to_module[i] = module; 3868 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3869 module, width, lane); 3870 if (err) 3871 goto err_port_create; 3872 } 3873 return 0; 3874 3875 err_port_create: 3876 err_port_module_info_get: 3877 for (i--; i >= 1; i--) 3878 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3879 mlxsw_sp_port_remove(mlxsw_sp, i); 3880 kfree(mlxsw_sp->port_to_module); 3881 err_port_to_module_alloc: 3882 kfree(mlxsw_sp->ports); 3883 return err; 3884 } 3885 3886 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3887 { 3888 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3889 3890 return local_port - offset; 3891 } 3892 3893 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3894 u8 module, unsigned int count, u8 offset) 3895 { 3896 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3897 int err, i; 3898 3899 for (i = 0; i < count; i++) { 3900 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 3901 true, module, width, i * width); 3902 if (err) 3903 goto err_port_create; 3904 } 3905 3906 return 0; 3907 3908 err_port_create: 3909 for (i--; i >= 0; i--) 3910 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 3911 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 3912 return err; 3913 } 3914 3915 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3916 u8 base_port, unsigned int count) 3917 { 3918 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3919 int i; 3920 3921 /* Split by four means we need to re-create two ports, otherwise 3922 * only one. 3923 */ 3924 count = count / 2; 3925 3926 for (i = 0; i < count; i++) { 3927 local_port = base_port + i * 2; 3928 if (mlxsw_sp->port_to_module[local_port] < 0) 3929 continue; 3930 module = mlxsw_sp->port_to_module[local_port]; 3931 3932 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3933 width, 0); 3934 } 3935 } 3936 3937 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3938 unsigned int count, 3939 struct netlink_ext_ack *extack) 3940 { 3941 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3942 u8 local_ports_in_1x, local_ports_in_2x, offset; 3943 struct mlxsw_sp_port *mlxsw_sp_port; 3944 u8 module, cur_width, base_port; 3945 int i; 3946 int err; 3947 3948 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 3949 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 3950 return -EIO; 3951 3952 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 3953 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 3954 3955 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3956 if (!mlxsw_sp_port) { 3957 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3958 local_port); 3959 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3960 return -EINVAL; 3961 } 3962 3963 module = mlxsw_sp_port->mapping.module; 3964 cur_width = mlxsw_sp_port->mapping.width; 3965 3966 if (count != 2 && count != 4) { 3967 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3968 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3969 return -EINVAL; 3970 } 3971 3972 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3973 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3974 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3975 return -EINVAL; 3976 } 3977 3978 /* Make sure we have enough slave (even) ports for the split. */ 3979 if (count == 2) { 3980 offset = local_ports_in_2x; 3981 base_port = local_port; 3982 if (mlxsw_sp->ports[base_port + local_ports_in_2x]) { 3983 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3984 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3985 return -EINVAL; 3986 } 3987 } else { 3988 offset = local_ports_in_1x; 3989 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3990 if (mlxsw_sp->ports[base_port + 1] || 3991 mlxsw_sp->ports[base_port + 3]) { 3992 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3993 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3994 return -EINVAL; 3995 } 3996 } 3997 3998 for (i = 0; i < count; i++) 3999 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4000 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4001 4002 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count, 4003 offset); 4004 if (err) { 4005 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 4006 goto err_port_split_create; 4007 } 4008 4009 return 0; 4010 4011 err_port_split_create: 4012 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4013 return err; 4014 } 4015 4016 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 4017 struct netlink_ext_ack *extack) 4018 { 4019 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4020 u8 local_ports_in_1x, local_ports_in_2x, offset; 4021 struct mlxsw_sp_port *mlxsw_sp_port; 4022 u8 cur_width, base_port; 4023 unsigned int count; 4024 int i; 4025 4026 if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) || 4027 !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X)) 4028 return -EIO; 4029 4030 local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); 4031 local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); 4032 4033 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4034 if (!mlxsw_sp_port) { 4035 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4036 local_port); 4037 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 4038 return -EINVAL; 4039 } 4040 4041 if (!mlxsw_sp_port->split) { 4042 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 4043 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 4044 return -EINVAL; 4045 } 4046 4047 cur_width = mlxsw_sp_port->mapping.width; 4048 count = cur_width == 1 ? 4 : 2; 4049 4050 if (count == 2) 4051 offset = local_ports_in_2x; 4052 else 4053 offset = local_ports_in_1x; 4054 4055 base_port = mlxsw_sp_cluster_base_port_get(local_port); 4056 4057 /* Determine which ports to remove. */ 4058 if (count == 2 && local_port >= base_port + 2) 4059 base_port = base_port + 2; 4060 4061 for (i = 0; i < count; i++) 4062 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 4063 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 4064 4065 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 4066 4067 return 0; 4068 } 4069 4070 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 4071 char *pude_pl, void *priv) 4072 { 4073 struct mlxsw_sp *mlxsw_sp = priv; 4074 struct mlxsw_sp_port *mlxsw_sp_port; 4075 enum mlxsw_reg_pude_oper_status status; 4076 u8 local_port; 4077 4078 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 4079 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4080 if (!mlxsw_sp_port) 4081 return; 4082 4083 status = mlxsw_reg_pude_oper_status_get(pude_pl); 4084 if (status == MLXSW_PORT_OPER_STATUS_UP) { 4085 netdev_info(mlxsw_sp_port->dev, "link up\n"); 4086 netif_carrier_on(mlxsw_sp_port->dev); 4087 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 4088 } else { 4089 netdev_info(mlxsw_sp_port->dev, "link down\n"); 4090 netif_carrier_off(mlxsw_sp_port->dev); 4091 } 4092 } 4093 4094 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 4095 char *mtpptr_pl, bool ingress) 4096 { 4097 u8 local_port; 4098 u8 num_rec; 4099 int i; 4100 4101 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 4102 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 4103 for (i = 0; i < num_rec; i++) { 4104 u8 domain_number; 4105 u8 message_type; 4106 u16 sequence_id; 4107 u64 timestamp; 4108 4109 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 4110 &domain_number, &sequence_id, 4111 ×tamp); 4112 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 4113 message_type, domain_number, 4114 sequence_id, timestamp); 4115 } 4116 } 4117 4118 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 4119 char *mtpptr_pl, void *priv) 4120 { 4121 struct mlxsw_sp *mlxsw_sp = priv; 4122 4123 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 4124 } 4125 4126 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 4127 char *mtpptr_pl, void *priv) 4128 { 4129 struct mlxsw_sp *mlxsw_sp = priv; 4130 4131 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 4132 } 4133 4134 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 4135 u8 local_port, void *priv) 4136 { 4137 struct mlxsw_sp *mlxsw_sp = priv; 4138 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4139 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 4140 4141 if (unlikely(!mlxsw_sp_port)) { 4142 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 4143 local_port); 4144 return; 4145 } 4146 4147 skb->dev = mlxsw_sp_port->dev; 4148 4149 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 4150 u64_stats_update_begin(&pcpu_stats->syncp); 4151 pcpu_stats->rx_packets++; 4152 pcpu_stats->rx_bytes += skb->len; 4153 u64_stats_update_end(&pcpu_stats->syncp); 4154 4155 skb->protocol = eth_type_trans(skb, skb->dev); 4156 netif_receive_skb(skb); 4157 } 4158 4159 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 4160 void *priv) 4161 { 4162 skb->offload_fwd_mark = 1; 4163 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4164 } 4165 4166 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 4167 u8 local_port, void *priv) 4168 { 4169 skb->offload_l3_fwd_mark = 1; 4170 skb->offload_fwd_mark = 1; 4171 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 4172 } 4173 4174 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 4175 void *priv) 4176 { 4177 struct mlxsw_sp *mlxsw_sp = priv; 4178 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4179 struct psample_group *psample_group; 4180 u32 size; 4181 4182 if (unlikely(!mlxsw_sp_port)) { 4183 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 4184 local_port); 4185 goto out; 4186 } 4187 if (unlikely(!mlxsw_sp_port->sample)) { 4188 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 4189 local_port); 4190 goto out; 4191 } 4192 4193 size = mlxsw_sp_port->sample->truncate ? 4194 mlxsw_sp_port->sample->trunc_size : skb->len; 4195 4196 rcu_read_lock(); 4197 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 4198 if (!psample_group) 4199 goto out_unlock; 4200 psample_sample_packet(psample_group, skb, size, 4201 mlxsw_sp_port->dev->ifindex, 0, 4202 mlxsw_sp_port->sample->rate); 4203 out_unlock: 4204 rcu_read_unlock(); 4205 out: 4206 consume_skb(skb); 4207 } 4208 4209 static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port, 4210 void *priv) 4211 { 4212 struct mlxsw_sp *mlxsw_sp = priv; 4213 4214 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 4215 } 4216 4217 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4218 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 4219 _is_ctrl, SP_##_trap_group, DISCARD) 4220 4221 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4222 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 4223 _is_ctrl, SP_##_trap_group, DISCARD) 4224 4225 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 4226 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 4227 _is_ctrl, SP_##_trap_group, DISCARD) 4228 4229 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 4230 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 4231 4232 static const struct mlxsw_listener mlxsw_sp_listener[] = { 4233 /* Events */ 4234 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 4235 /* L2 traps */ 4236 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 4237 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 4238 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU, 4239 false, SP_LLDP, DISCARD), 4240 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 4241 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 4242 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 4243 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 4244 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 4245 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 4246 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 4247 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 4248 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 4249 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 4250 false), 4251 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4252 false), 4253 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 4254 false), 4255 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 4256 false), 4257 /* L3 traps */ 4258 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4259 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4260 MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), 4261 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 4262 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 4263 false), 4264 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 4265 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 4266 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 4267 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 4268 false), 4269 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 4270 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 4271 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 4272 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 4273 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 4274 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 4275 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4276 false), 4277 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4278 false), 4279 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 4280 false), 4281 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 4282 false), 4283 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 4284 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 4285 false), 4286 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 4287 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 4288 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 4289 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4290 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4291 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4292 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4293 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4294 /* PKT Sample trap */ 4295 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4296 false, SP_IP2ME, DISCARD), 4297 /* ACL trap */ 4298 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 4299 /* Multicast Router Traps */ 4300 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 4301 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 4302 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 4303 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 4304 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 4305 /* NVE traps */ 4306 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 4307 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), 4308 /* PTP traps */ 4309 MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU, 4310 false, SP_PTP0, DISCARD), 4311 MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false), 4312 }; 4313 4314 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 4315 /* Events */ 4316 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 4317 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 4318 }; 4319 4320 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 4321 { 4322 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 4323 enum mlxsw_reg_qpcr_ir_units ir_units; 4324 int max_cpu_policers; 4325 bool is_bytes; 4326 u8 burst_size; 4327 u32 rate; 4328 int i, err; 4329 4330 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 4331 return -EIO; 4332 4333 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4334 4335 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 4336 for (i = 0; i < max_cpu_policers; i++) { 4337 is_bytes = false; 4338 switch (i) { 4339 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4340 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4341 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4342 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4343 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4344 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4346 rate = 128; 4347 burst_size = 7; 4348 break; 4349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4350 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4351 rate = 16 * 1024; 4352 burst_size = 10; 4353 break; 4354 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4355 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4356 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4357 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4358 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4359 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4360 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4361 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4362 rate = 1024; 4363 burst_size = 7; 4364 break; 4365 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4366 rate = 1024; 4367 burst_size = 7; 4368 break; 4369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4370 rate = 24 * 1024; 4371 burst_size = 12; 4372 break; 4373 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4374 rate = 19 * 1024; 4375 burst_size = 12; 4376 break; 4377 default: 4378 continue; 4379 } 4380 4381 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 4382 burst_size); 4383 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 4384 if (err) 4385 return err; 4386 } 4387 4388 return 0; 4389 } 4390 4391 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 4392 { 4393 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4394 enum mlxsw_reg_htgt_trap_group i; 4395 int max_cpu_policers; 4396 int max_trap_groups; 4397 u8 priority, tc; 4398 u16 policer_id; 4399 int err; 4400 4401 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 4402 return -EIO; 4403 4404 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 4405 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 4406 4407 for (i = 0; i < max_trap_groups; i++) { 4408 policer_id = i; 4409 switch (i) { 4410 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 4411 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 4412 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 4413 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4414 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4415 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4416 priority = 5; 4417 tc = 5; 4418 break; 4419 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 4420 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 4421 priority = 4; 4422 tc = 4; 4423 break; 4424 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 4425 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 4426 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 4427 priority = 3; 4428 tc = 3; 4429 break; 4430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 4431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 4432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 4433 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1: 4434 priority = 2; 4435 tc = 2; 4436 break; 4437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 4438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 4439 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 4440 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 4441 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: 4442 priority = 1; 4443 tc = 1; 4444 break; 4445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 4446 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 4447 tc = MLXSW_REG_HTGT_DEFAULT_TC; 4448 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 4449 break; 4450 default: 4451 continue; 4452 } 4453 4454 if (max_cpu_policers <= policer_id && 4455 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 4456 return -EIO; 4457 4458 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 4459 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4460 if (err) 4461 return err; 4462 } 4463 4464 return 0; 4465 } 4466 4467 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 4468 const struct mlxsw_listener listeners[], 4469 size_t listeners_count) 4470 { 4471 int i; 4472 int err; 4473 4474 for (i = 0; i < listeners_count; i++) { 4475 err = mlxsw_core_trap_register(mlxsw_sp->core, 4476 &listeners[i], 4477 mlxsw_sp); 4478 if (err) 4479 goto err_listener_register; 4480 4481 } 4482 return 0; 4483 4484 err_listener_register: 4485 for (i--; i >= 0; i--) { 4486 mlxsw_core_trap_unregister(mlxsw_sp->core, 4487 &listeners[i], 4488 mlxsw_sp); 4489 } 4490 return err; 4491 } 4492 4493 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 4494 const struct mlxsw_listener listeners[], 4495 size_t listeners_count) 4496 { 4497 int i; 4498 4499 for (i = 0; i < listeners_count; i++) { 4500 mlxsw_core_trap_unregister(mlxsw_sp->core, 4501 &listeners[i], 4502 mlxsw_sp); 4503 } 4504 } 4505 4506 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 4507 { 4508 int err; 4509 4510 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 4511 if (err) 4512 return err; 4513 4514 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 4515 if (err) 4516 return err; 4517 4518 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 4519 ARRAY_SIZE(mlxsw_sp_listener)); 4520 if (err) 4521 return err; 4522 4523 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 4524 mlxsw_sp->listeners_count); 4525 if (err) 4526 goto err_extra_traps_init; 4527 4528 return 0; 4529 4530 err_extra_traps_init: 4531 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4532 ARRAY_SIZE(mlxsw_sp_listener)); 4533 return err; 4534 } 4535 4536 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 4537 { 4538 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 4539 mlxsw_sp->listeners_count); 4540 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 4541 ARRAY_SIZE(mlxsw_sp_listener)); 4542 } 4543 4544 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 4545 4546 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 4547 { 4548 char slcr_pl[MLXSW_REG_SLCR_LEN]; 4549 u32 seed; 4550 int err; 4551 4552 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 4553 MLXSW_SP_LAG_SEED_INIT); 4554 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 4555 MLXSW_REG_SLCR_LAG_HASH_DMAC | 4556 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 4557 MLXSW_REG_SLCR_LAG_HASH_VLANID | 4558 MLXSW_REG_SLCR_LAG_HASH_SIP | 4559 MLXSW_REG_SLCR_LAG_HASH_DIP | 4560 MLXSW_REG_SLCR_LAG_HASH_SPORT | 4561 MLXSW_REG_SLCR_LAG_HASH_DPORT | 4562 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 4563 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 4564 if (err) 4565 return err; 4566 4567 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 4568 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 4569 return -EIO; 4570 4571 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 4572 sizeof(struct mlxsw_sp_upper), 4573 GFP_KERNEL); 4574 if (!mlxsw_sp->lags) 4575 return -ENOMEM; 4576 4577 return 0; 4578 } 4579 4580 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 4581 { 4582 kfree(mlxsw_sp->lags); 4583 } 4584 4585 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 4586 { 4587 char htgt_pl[MLXSW_REG_HTGT_LEN]; 4588 4589 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 4590 MLXSW_REG_HTGT_INVALID_POLICER, 4591 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 4592 MLXSW_REG_HTGT_DEFAULT_TC); 4593 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 4594 } 4595 4596 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 4597 .clock_init = mlxsw_sp1_ptp_clock_init, 4598 .clock_fini = mlxsw_sp1_ptp_clock_fini, 4599 .init = mlxsw_sp1_ptp_init, 4600 .fini = mlxsw_sp1_ptp_fini, 4601 .receive = mlxsw_sp1_ptp_receive, 4602 .transmitted = mlxsw_sp1_ptp_transmitted, 4603 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 4604 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 4605 .shaper_work = mlxsw_sp1_ptp_shaper_work, 4606 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 4607 }; 4608 4609 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 4610 .clock_init = mlxsw_sp2_ptp_clock_init, 4611 .clock_fini = mlxsw_sp2_ptp_clock_fini, 4612 .init = mlxsw_sp2_ptp_init, 4613 .fini = mlxsw_sp2_ptp_fini, 4614 .receive = mlxsw_sp2_ptp_receive, 4615 .transmitted = mlxsw_sp2_ptp_transmitted, 4616 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 4617 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 4618 .shaper_work = mlxsw_sp2_ptp_shaper_work, 4619 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 4620 }; 4621 4622 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4623 unsigned long event, void *ptr); 4624 4625 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 4626 const struct mlxsw_bus_info *mlxsw_bus_info) 4627 { 4628 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4629 int err; 4630 4631 mlxsw_sp->core = mlxsw_core; 4632 mlxsw_sp->bus_info = mlxsw_bus_info; 4633 4634 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 4635 if (err) 4636 return err; 4637 4638 err = mlxsw_sp_base_mac_get(mlxsw_sp); 4639 if (err) { 4640 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 4641 return err; 4642 } 4643 4644 err = mlxsw_sp_kvdl_init(mlxsw_sp); 4645 if (err) { 4646 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 4647 return err; 4648 } 4649 4650 err = mlxsw_sp_fids_init(mlxsw_sp); 4651 if (err) { 4652 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 4653 goto err_fids_init; 4654 } 4655 4656 err = mlxsw_sp_traps_init(mlxsw_sp); 4657 if (err) { 4658 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 4659 goto err_traps_init; 4660 } 4661 4662 err = mlxsw_sp_buffers_init(mlxsw_sp); 4663 if (err) { 4664 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 4665 goto err_buffers_init; 4666 } 4667 4668 err = mlxsw_sp_lag_init(mlxsw_sp); 4669 if (err) { 4670 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 4671 goto err_lag_init; 4672 } 4673 4674 /* Initialize SPAN before router and switchdev, so that those components 4675 * can call mlxsw_sp_span_respin(). 4676 */ 4677 err = mlxsw_sp_span_init(mlxsw_sp); 4678 if (err) { 4679 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 4680 goto err_span_init; 4681 } 4682 4683 err = mlxsw_sp_switchdev_init(mlxsw_sp); 4684 if (err) { 4685 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 4686 goto err_switchdev_init; 4687 } 4688 4689 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 4690 if (err) { 4691 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 4692 goto err_counter_pool_init; 4693 } 4694 4695 err = mlxsw_sp_afa_init(mlxsw_sp); 4696 if (err) { 4697 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 4698 goto err_afa_init; 4699 } 4700 4701 err = mlxsw_sp_nve_init(mlxsw_sp); 4702 if (err) { 4703 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 4704 goto err_nve_init; 4705 } 4706 4707 err = mlxsw_sp_acl_init(mlxsw_sp); 4708 if (err) { 4709 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 4710 goto err_acl_init; 4711 } 4712 4713 err = mlxsw_sp_router_init(mlxsw_sp); 4714 if (err) { 4715 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 4716 goto err_router_init; 4717 } 4718 4719 if (mlxsw_sp->bus_info->read_frc_capable) { 4720 /* NULL is a valid return value from clock_init */ 4721 mlxsw_sp->clock = 4722 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 4723 mlxsw_sp->bus_info->dev); 4724 if (IS_ERR(mlxsw_sp->clock)) { 4725 err = PTR_ERR(mlxsw_sp->clock); 4726 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 4727 goto err_ptp_clock_init; 4728 } 4729 } 4730 4731 if (mlxsw_sp->clock) { 4732 /* NULL is a valid return value from ptp_ops->init */ 4733 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 4734 if (IS_ERR(mlxsw_sp->ptp_state)) { 4735 err = PTR_ERR(mlxsw_sp->ptp_state); 4736 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 4737 goto err_ptp_init; 4738 } 4739 } 4740 4741 /* Initialize netdevice notifier after router and SPAN is initialized, 4742 * so that the event handler can use router structures and call SPAN 4743 * respin. 4744 */ 4745 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 4746 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4747 if (err) { 4748 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 4749 goto err_netdev_notifier; 4750 } 4751 4752 err = mlxsw_sp_dpipe_init(mlxsw_sp); 4753 if (err) { 4754 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 4755 goto err_dpipe_init; 4756 } 4757 4758 err = mlxsw_sp_ports_create(mlxsw_sp); 4759 if (err) { 4760 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 4761 goto err_ports_create; 4762 } 4763 4764 return 0; 4765 4766 err_ports_create: 4767 mlxsw_sp_dpipe_fini(mlxsw_sp); 4768 err_dpipe_init: 4769 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4770 err_netdev_notifier: 4771 if (mlxsw_sp->clock) 4772 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4773 err_ptp_init: 4774 if (mlxsw_sp->clock) 4775 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4776 err_ptp_clock_init: 4777 mlxsw_sp_router_fini(mlxsw_sp); 4778 err_router_init: 4779 mlxsw_sp_acl_fini(mlxsw_sp); 4780 err_acl_init: 4781 mlxsw_sp_nve_fini(mlxsw_sp); 4782 err_nve_init: 4783 mlxsw_sp_afa_fini(mlxsw_sp); 4784 err_afa_init: 4785 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4786 err_counter_pool_init: 4787 mlxsw_sp_switchdev_fini(mlxsw_sp); 4788 err_switchdev_init: 4789 mlxsw_sp_span_fini(mlxsw_sp); 4790 err_span_init: 4791 mlxsw_sp_lag_fini(mlxsw_sp); 4792 err_lag_init: 4793 mlxsw_sp_buffers_fini(mlxsw_sp); 4794 err_buffers_init: 4795 mlxsw_sp_traps_fini(mlxsw_sp); 4796 err_traps_init: 4797 mlxsw_sp_fids_fini(mlxsw_sp); 4798 err_fids_init: 4799 mlxsw_sp_kvdl_fini(mlxsw_sp); 4800 return err; 4801 } 4802 4803 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 4804 const struct mlxsw_bus_info *mlxsw_bus_info) 4805 { 4806 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4807 4808 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 4809 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 4810 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 4811 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 4812 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 4813 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 4814 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 4815 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 4816 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 4817 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; 4818 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 4819 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 4820 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 4821 mlxsw_sp->listeners = mlxsw_sp1_listener; 4822 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 4823 4824 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4825 } 4826 4827 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 4828 const struct mlxsw_bus_info *mlxsw_bus_info) 4829 { 4830 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4831 4832 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 4833 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 4834 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 4835 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 4836 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 4837 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 4838 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 4839 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; 4840 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 4841 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 4842 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 4843 4844 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 4845 } 4846 4847 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 4848 { 4849 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 4850 4851 mlxsw_sp_ports_remove(mlxsw_sp); 4852 mlxsw_sp_dpipe_fini(mlxsw_sp); 4853 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 4854 if (mlxsw_sp->clock) { 4855 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 4856 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 4857 } 4858 mlxsw_sp_router_fini(mlxsw_sp); 4859 mlxsw_sp_acl_fini(mlxsw_sp); 4860 mlxsw_sp_nve_fini(mlxsw_sp); 4861 mlxsw_sp_afa_fini(mlxsw_sp); 4862 mlxsw_sp_counter_pool_fini(mlxsw_sp); 4863 mlxsw_sp_switchdev_fini(mlxsw_sp); 4864 mlxsw_sp_span_fini(mlxsw_sp); 4865 mlxsw_sp_lag_fini(mlxsw_sp); 4866 mlxsw_sp_buffers_fini(mlxsw_sp); 4867 mlxsw_sp_traps_fini(mlxsw_sp); 4868 mlxsw_sp_fids_fini(mlxsw_sp); 4869 mlxsw_sp_kvdl_fini(mlxsw_sp); 4870 } 4871 4872 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 4873 * 802.1Q FIDs 4874 */ 4875 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 4876 VLAN_VID_MASK - 1) 4877 4878 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 4879 .used_max_mid = 1, 4880 .max_mid = MLXSW_SP_MID_MAX, 4881 .used_flood_tables = 1, 4882 .used_flood_mode = 1, 4883 .flood_mode = 3, 4884 .max_fid_flood_tables = 3, 4885 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4886 .used_max_ib_mc = 1, 4887 .max_ib_mc = 0, 4888 .used_max_pkey = 1, 4889 .max_pkey = 0, 4890 .used_kvd_sizes = 1, 4891 .kvd_hash_single_parts = 59, 4892 .kvd_hash_double_parts = 41, 4893 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 4894 .swid_config = { 4895 { 4896 .used_type = 1, 4897 .type = MLXSW_PORT_SWID_TYPE_ETH, 4898 } 4899 }, 4900 }; 4901 4902 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 4903 .used_max_mid = 1, 4904 .max_mid = MLXSW_SP_MID_MAX, 4905 .used_flood_tables = 1, 4906 .used_flood_mode = 1, 4907 .flood_mode = 3, 4908 .max_fid_flood_tables = 3, 4909 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 4910 .used_max_ib_mc = 1, 4911 .max_ib_mc = 0, 4912 .used_max_pkey = 1, 4913 .max_pkey = 0, 4914 .swid_config = { 4915 { 4916 .used_type = 1, 4917 .type = MLXSW_PORT_SWID_TYPE_ETH, 4918 } 4919 }, 4920 }; 4921 4922 static void 4923 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 4924 struct devlink_resource_size_params *kvd_size_params, 4925 struct devlink_resource_size_params *linear_size_params, 4926 struct devlink_resource_size_params *hash_double_size_params, 4927 struct devlink_resource_size_params *hash_single_size_params) 4928 { 4929 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4930 KVD_SINGLE_MIN_SIZE); 4931 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4932 KVD_DOUBLE_MIN_SIZE); 4933 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4934 u32 linear_size_min = 0; 4935 4936 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 4937 MLXSW_SP_KVD_GRANULARITY, 4938 DEVLINK_RESOURCE_UNIT_ENTRY); 4939 devlink_resource_size_params_init(linear_size_params, linear_size_min, 4940 kvd_size - single_size_min - 4941 double_size_min, 4942 MLXSW_SP_KVD_GRANULARITY, 4943 DEVLINK_RESOURCE_UNIT_ENTRY); 4944 devlink_resource_size_params_init(hash_double_size_params, 4945 double_size_min, 4946 kvd_size - single_size_min - 4947 linear_size_min, 4948 MLXSW_SP_KVD_GRANULARITY, 4949 DEVLINK_RESOURCE_UNIT_ENTRY); 4950 devlink_resource_size_params_init(hash_single_size_params, 4951 single_size_min, 4952 kvd_size - double_size_min - 4953 linear_size_min, 4954 MLXSW_SP_KVD_GRANULARITY, 4955 DEVLINK_RESOURCE_UNIT_ENTRY); 4956 } 4957 4958 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 4959 { 4960 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4961 struct devlink_resource_size_params hash_single_size_params; 4962 struct devlink_resource_size_params hash_double_size_params; 4963 struct devlink_resource_size_params linear_size_params; 4964 struct devlink_resource_size_params kvd_size_params; 4965 u32 kvd_size, single_size, double_size, linear_size; 4966 const struct mlxsw_config_profile *profile; 4967 int err; 4968 4969 profile = &mlxsw_sp1_config_profile; 4970 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4971 return -EIO; 4972 4973 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 4974 &linear_size_params, 4975 &hash_double_size_params, 4976 &hash_single_size_params); 4977 4978 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4979 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4980 kvd_size, MLXSW_SP_RESOURCE_KVD, 4981 DEVLINK_RESOURCE_ID_PARENT_TOP, 4982 &kvd_size_params); 4983 if (err) 4984 return err; 4985 4986 linear_size = profile->kvd_linear_size; 4987 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4988 linear_size, 4989 MLXSW_SP_RESOURCE_KVD_LINEAR, 4990 MLXSW_SP_RESOURCE_KVD, 4991 &linear_size_params); 4992 if (err) 4993 return err; 4994 4995 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4996 if (err) 4997 return err; 4998 4999 double_size = kvd_size - linear_size; 5000 double_size *= profile->kvd_hash_double_parts; 5001 double_size /= profile->kvd_hash_double_parts + 5002 profile->kvd_hash_single_parts; 5003 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 5004 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 5005 double_size, 5006 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5007 MLXSW_SP_RESOURCE_KVD, 5008 &hash_double_size_params); 5009 if (err) 5010 return err; 5011 5012 single_size = kvd_size - double_size - linear_size; 5013 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 5014 single_size, 5015 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5016 MLXSW_SP_RESOURCE_KVD, 5017 &hash_single_size_params); 5018 if (err) 5019 return err; 5020 5021 return 0; 5022 } 5023 5024 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 5025 { 5026 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 5027 } 5028 5029 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 5030 { 5031 return 0; 5032 } 5033 5034 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 5035 const struct mlxsw_config_profile *profile, 5036 u64 *p_single_size, u64 *p_double_size, 5037 u64 *p_linear_size) 5038 { 5039 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5040 u32 double_size; 5041 int err; 5042 5043 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5044 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 5045 return -EIO; 5046 5047 /* The hash part is what left of the kvd without the 5048 * linear part. It is split to the single size and 5049 * double size by the parts ratio from the profile. 5050 * Both sizes must be a multiplications of the 5051 * granularity from the profile. In case the user 5052 * provided the sizes they are obtained via devlink. 5053 */ 5054 err = devlink_resource_size_get(devlink, 5055 MLXSW_SP_RESOURCE_KVD_LINEAR, 5056 p_linear_size); 5057 if (err) 5058 *p_linear_size = profile->kvd_linear_size; 5059 5060 err = devlink_resource_size_get(devlink, 5061 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 5062 p_double_size); 5063 if (err) { 5064 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5065 *p_linear_size; 5066 double_size *= profile->kvd_hash_double_parts; 5067 double_size /= profile->kvd_hash_double_parts + 5068 profile->kvd_hash_single_parts; 5069 *p_double_size = rounddown(double_size, 5070 MLXSW_SP_KVD_GRANULARITY); 5071 } 5072 5073 err = devlink_resource_size_get(devlink, 5074 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 5075 p_single_size); 5076 if (err) 5077 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 5078 *p_double_size - *p_linear_size; 5079 5080 /* Check results are legal. */ 5081 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 5082 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 5083 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 5084 return -EIO; 5085 5086 return 0; 5087 } 5088 5089 static int 5090 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, 5091 union devlink_param_value val, 5092 struct netlink_ext_ack *extack) 5093 { 5094 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && 5095 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { 5096 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); 5097 return -EINVAL; 5098 } 5099 5100 return 0; 5101 } 5102 5103 static const struct devlink_param mlxsw_sp_devlink_params[] = { 5104 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, 5105 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 5106 NULL, NULL, 5107 mlxsw_sp_devlink_param_fw_load_policy_validate), 5108 }; 5109 5110 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) 5111 { 5112 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5113 union devlink_param_value value; 5114 int err; 5115 5116 err = devlink_params_register(devlink, mlxsw_sp_devlink_params, 5117 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5118 if (err) 5119 return err; 5120 5121 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; 5122 devlink_param_driverinit_value_set(devlink, 5123 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 5124 value); 5125 return 0; 5126 } 5127 5128 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) 5129 { 5130 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5131 mlxsw_sp_devlink_params, 5132 ARRAY_SIZE(mlxsw_sp_devlink_params)); 5133 } 5134 5135 static int 5136 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 5137 struct devlink_param_gset_ctx *ctx) 5138 { 5139 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5140 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5141 5142 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 5143 return 0; 5144 } 5145 5146 static int 5147 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 5148 struct devlink_param_gset_ctx *ctx) 5149 { 5150 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 5151 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5152 5153 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 5154 } 5155 5156 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 5157 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5158 "acl_region_rehash_interval", 5159 DEVLINK_PARAM_TYPE_U32, 5160 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 5161 mlxsw_sp_params_acl_region_rehash_intrvl_get, 5162 mlxsw_sp_params_acl_region_rehash_intrvl_set, 5163 NULL), 5164 }; 5165 5166 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 5167 { 5168 struct devlink *devlink = priv_to_devlink(mlxsw_core); 5169 union devlink_param_value value; 5170 int err; 5171 5172 err = mlxsw_sp_params_register(mlxsw_core); 5173 if (err) 5174 return err; 5175 5176 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 5177 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5178 if (err) 5179 goto err_devlink_params_register; 5180 5181 value.vu32 = 0; 5182 devlink_param_driverinit_value_set(devlink, 5183 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 5184 value); 5185 return 0; 5186 5187 err_devlink_params_register: 5188 mlxsw_sp_params_unregister(mlxsw_core); 5189 return err; 5190 } 5191 5192 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 5193 { 5194 devlink_params_unregister(priv_to_devlink(mlxsw_core), 5195 mlxsw_sp2_devlink_params, 5196 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 5197 mlxsw_sp_params_unregister(mlxsw_core); 5198 } 5199 5200 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 5201 struct sk_buff *skb, u8 local_port) 5202 { 5203 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 5204 5205 skb_pull(skb, MLXSW_TXHDR_LEN); 5206 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 5207 } 5208 5209 static struct mlxsw_driver mlxsw_sp1_driver = { 5210 .kind = mlxsw_sp1_driver_name, 5211 .priv_size = sizeof(struct mlxsw_sp), 5212 .init = mlxsw_sp1_init, 5213 .fini = mlxsw_sp_fini, 5214 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5215 .port_split = mlxsw_sp_port_split, 5216 .port_unsplit = mlxsw_sp_port_unsplit, 5217 .sb_pool_get = mlxsw_sp_sb_pool_get, 5218 .sb_pool_set = mlxsw_sp_sb_pool_set, 5219 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5220 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5221 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5222 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5223 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5224 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5225 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5226 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5227 .flash_update = mlxsw_sp_flash_update, 5228 .txhdr_construct = mlxsw_sp_txhdr_construct, 5229 .resources_register = mlxsw_sp1_resources_register, 5230 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 5231 .params_register = mlxsw_sp_params_register, 5232 .params_unregister = mlxsw_sp_params_unregister, 5233 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5234 .txhdr_len = MLXSW_TXHDR_LEN, 5235 .profile = &mlxsw_sp1_config_profile, 5236 .res_query_enabled = true, 5237 }; 5238 5239 static struct mlxsw_driver mlxsw_sp2_driver = { 5240 .kind = mlxsw_sp2_driver_name, 5241 .priv_size = sizeof(struct mlxsw_sp), 5242 .init = mlxsw_sp2_init, 5243 .fini = mlxsw_sp_fini, 5244 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 5245 .port_split = mlxsw_sp_port_split, 5246 .port_unsplit = mlxsw_sp_port_unsplit, 5247 .sb_pool_get = mlxsw_sp_sb_pool_get, 5248 .sb_pool_set = mlxsw_sp_sb_pool_set, 5249 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 5250 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 5251 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 5252 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 5253 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 5254 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 5255 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 5256 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 5257 .flash_update = mlxsw_sp_flash_update, 5258 .txhdr_construct = mlxsw_sp_txhdr_construct, 5259 .resources_register = mlxsw_sp2_resources_register, 5260 .params_register = mlxsw_sp2_params_register, 5261 .params_unregister = mlxsw_sp2_params_unregister, 5262 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 5263 .txhdr_len = MLXSW_TXHDR_LEN, 5264 .profile = &mlxsw_sp2_config_profile, 5265 .res_query_enabled = true, 5266 }; 5267 5268 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 5269 { 5270 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 5271 } 5272 5273 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 5274 { 5275 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 5276 int ret = 0; 5277 5278 if (mlxsw_sp_port_dev_check(lower_dev)) { 5279 *p_mlxsw_sp_port = netdev_priv(lower_dev); 5280 ret = 1; 5281 } 5282 5283 return ret; 5284 } 5285 5286 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 5287 { 5288 struct mlxsw_sp_port *mlxsw_sp_port; 5289 5290 if (mlxsw_sp_port_dev_check(dev)) 5291 return netdev_priv(dev); 5292 5293 mlxsw_sp_port = NULL; 5294 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 5295 5296 return mlxsw_sp_port; 5297 } 5298 5299 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 5300 { 5301 struct mlxsw_sp_port *mlxsw_sp_port; 5302 5303 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 5304 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 5305 } 5306 5307 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 5308 { 5309 struct mlxsw_sp_port *mlxsw_sp_port; 5310 5311 if (mlxsw_sp_port_dev_check(dev)) 5312 return netdev_priv(dev); 5313 5314 mlxsw_sp_port = NULL; 5315 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 5316 &mlxsw_sp_port); 5317 5318 return mlxsw_sp_port; 5319 } 5320 5321 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 5322 { 5323 struct mlxsw_sp_port *mlxsw_sp_port; 5324 5325 rcu_read_lock(); 5326 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 5327 if (mlxsw_sp_port) 5328 dev_hold(mlxsw_sp_port->dev); 5329 rcu_read_unlock(); 5330 return mlxsw_sp_port; 5331 } 5332 5333 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 5334 { 5335 dev_put(mlxsw_sp_port->dev); 5336 } 5337 5338 static void 5339 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 5340 struct net_device *lag_dev) 5341 { 5342 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 5343 struct net_device *upper_dev; 5344 struct list_head *iter; 5345 5346 if (netif_is_bridge_port(lag_dev)) 5347 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 5348 5349 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 5350 if (!netif_is_bridge_port(upper_dev)) 5351 continue; 5352 br_dev = netdev_master_upper_dev_get(upper_dev); 5353 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 5354 } 5355 } 5356 5357 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5358 { 5359 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5360 5361 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 5362 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5363 } 5364 5365 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 5366 { 5367 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5368 5369 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 5370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5371 } 5372 5373 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5374 u16 lag_id, u8 port_index) 5375 { 5376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5377 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5378 5379 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 5380 lag_id, port_index); 5381 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5382 } 5383 5384 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5385 u16 lag_id) 5386 { 5387 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5388 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5389 5390 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 5391 lag_id); 5392 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5393 } 5394 5395 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 5396 u16 lag_id) 5397 { 5398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5399 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5400 5401 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 5402 lag_id); 5403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5404 } 5405 5406 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 5407 u16 lag_id) 5408 { 5409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5410 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 5411 5412 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 5413 lag_id); 5414 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 5415 } 5416 5417 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5418 struct net_device *lag_dev, 5419 u16 *p_lag_id) 5420 { 5421 struct mlxsw_sp_upper *lag; 5422 int free_lag_id = -1; 5423 u64 max_lag; 5424 int i; 5425 5426 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 5427 for (i = 0; i < max_lag; i++) { 5428 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 5429 if (lag->ref_count) { 5430 if (lag->dev == lag_dev) { 5431 *p_lag_id = i; 5432 return 0; 5433 } 5434 } else if (free_lag_id < 0) { 5435 free_lag_id = i; 5436 } 5437 } 5438 if (free_lag_id < 0) 5439 return -EBUSY; 5440 *p_lag_id = free_lag_id; 5441 return 0; 5442 } 5443 5444 static bool 5445 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 5446 struct net_device *lag_dev, 5447 struct netdev_lag_upper_info *lag_upper_info, 5448 struct netlink_ext_ack *extack) 5449 { 5450 u16 lag_id; 5451 5452 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 5453 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 5454 return false; 5455 } 5456 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 5457 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 5458 return false; 5459 } 5460 return true; 5461 } 5462 5463 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 5464 u16 lag_id, u8 *p_port_index) 5465 { 5466 u64 max_lag_members; 5467 int i; 5468 5469 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 5470 MAX_LAG_MEMBERS); 5471 for (i = 0; i < max_lag_members; i++) { 5472 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 5473 *p_port_index = i; 5474 return 0; 5475 } 5476 } 5477 return -EBUSY; 5478 } 5479 5480 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 5481 struct net_device *lag_dev) 5482 { 5483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5484 struct mlxsw_sp_upper *lag; 5485 u16 lag_id; 5486 u8 port_index; 5487 int err; 5488 5489 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 5490 if (err) 5491 return err; 5492 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5493 if (!lag->ref_count) { 5494 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 5495 if (err) 5496 return err; 5497 lag->dev = lag_dev; 5498 } 5499 5500 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 5501 if (err) 5502 return err; 5503 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 5504 if (err) 5505 goto err_col_port_add; 5506 5507 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 5508 mlxsw_sp_port->local_port); 5509 mlxsw_sp_port->lag_id = lag_id; 5510 mlxsw_sp_port->lagged = 1; 5511 lag->ref_count++; 5512 5513 /* Port is no longer usable as a router interface */ 5514 if (mlxsw_sp_port->default_vlan->fid) 5515 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 5516 5517 return 0; 5518 5519 err_col_port_add: 5520 if (!lag->ref_count) 5521 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5522 return err; 5523 } 5524 5525 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 5526 struct net_device *lag_dev) 5527 { 5528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5529 u16 lag_id = mlxsw_sp_port->lag_id; 5530 struct mlxsw_sp_upper *lag; 5531 5532 if (!mlxsw_sp_port->lagged) 5533 return; 5534 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 5535 WARN_ON(lag->ref_count == 0); 5536 5537 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 5538 5539 /* Any VLANs configured on the port are no longer valid */ 5540 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 5541 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 5542 /* Make the LAG and its directly linked uppers leave bridges they 5543 * are memeber in 5544 */ 5545 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 5546 5547 if (lag->ref_count == 1) 5548 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 5549 5550 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 5551 mlxsw_sp_port->local_port); 5552 mlxsw_sp_port->lagged = 0; 5553 lag->ref_count--; 5554 5555 /* Make sure untagged frames are allowed to ingress */ 5556 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); 5557 } 5558 5559 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 5560 u16 lag_id) 5561 { 5562 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5563 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5564 5565 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 5566 mlxsw_sp_port->local_port); 5567 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5568 } 5569 5570 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 5571 u16 lag_id) 5572 { 5573 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5574 char sldr_pl[MLXSW_REG_SLDR_LEN]; 5575 5576 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 5577 mlxsw_sp_port->local_port); 5578 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 5579 } 5580 5581 static int 5582 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 5583 { 5584 int err; 5585 5586 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 5587 mlxsw_sp_port->lag_id); 5588 if (err) 5589 return err; 5590 5591 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5592 if (err) 5593 goto err_dist_port_add; 5594 5595 return 0; 5596 5597 err_dist_port_add: 5598 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5599 return err; 5600 } 5601 5602 static int 5603 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 5604 { 5605 int err; 5606 5607 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 5608 mlxsw_sp_port->lag_id); 5609 if (err) 5610 return err; 5611 5612 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 5613 mlxsw_sp_port->lag_id); 5614 if (err) 5615 goto err_col_port_disable; 5616 5617 return 0; 5618 5619 err_col_port_disable: 5620 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 5621 return err; 5622 } 5623 5624 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 5625 struct netdev_lag_lower_state_info *info) 5626 { 5627 if (info->tx_enabled) 5628 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 5629 else 5630 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5631 } 5632 5633 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 5634 bool enable) 5635 { 5636 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5637 enum mlxsw_reg_spms_state spms_state; 5638 char *spms_pl; 5639 u16 vid; 5640 int err; 5641 5642 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 5643 MLXSW_REG_SPMS_STATE_DISCARDING; 5644 5645 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 5646 if (!spms_pl) 5647 return -ENOMEM; 5648 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 5649 5650 for (vid = 0; vid < VLAN_N_VID; vid++) 5651 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 5652 5653 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 5654 kfree(spms_pl); 5655 return err; 5656 } 5657 5658 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 5659 { 5660 u16 vid = 1; 5661 int err; 5662 5663 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 5664 if (err) 5665 return err; 5666 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 5667 if (err) 5668 goto err_port_stp_set; 5669 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5670 true, false); 5671 if (err) 5672 goto err_port_vlan_set; 5673 5674 for (; vid <= VLAN_N_VID - 1; vid++) { 5675 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5676 vid, false); 5677 if (err) 5678 goto err_vid_learning_set; 5679 } 5680 5681 return 0; 5682 5683 err_vid_learning_set: 5684 for (vid--; vid >= 1; vid--) 5685 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 5686 err_port_vlan_set: 5687 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5688 err_port_stp_set: 5689 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5690 return err; 5691 } 5692 5693 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 5694 { 5695 u16 vid; 5696 5697 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 5698 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 5699 vid, true); 5700 5701 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 5702 false, false); 5703 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 5704 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 5705 } 5706 5707 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 5708 { 5709 unsigned int num_vxlans = 0; 5710 struct net_device *dev; 5711 struct list_head *iter; 5712 5713 netdev_for_each_lower_dev(br_dev, dev, iter) { 5714 if (netif_is_vxlan(dev)) 5715 num_vxlans++; 5716 } 5717 5718 return num_vxlans > 1; 5719 } 5720 5721 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 5722 { 5723 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 5724 struct net_device *dev; 5725 struct list_head *iter; 5726 5727 netdev_for_each_lower_dev(br_dev, dev, iter) { 5728 u16 pvid; 5729 int err; 5730 5731 if (!netif_is_vxlan(dev)) 5732 continue; 5733 5734 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 5735 if (err || !pvid) 5736 continue; 5737 5738 if (test_and_set_bit(pvid, vlans)) 5739 return false; 5740 } 5741 5742 return true; 5743 } 5744 5745 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 5746 struct netlink_ext_ack *extack) 5747 { 5748 if (br_multicast_enabled(br_dev)) { 5749 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 5750 return false; 5751 } 5752 5753 if (!br_vlan_enabled(br_dev) && 5754 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 5755 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 5756 return false; 5757 } 5758 5759 if (br_vlan_enabled(br_dev) && 5760 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 5761 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 5762 return false; 5763 } 5764 5765 return true; 5766 } 5767 5768 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 5769 struct net_device *dev, 5770 unsigned long event, void *ptr) 5771 { 5772 struct netdev_notifier_changeupper_info *info; 5773 struct mlxsw_sp_port *mlxsw_sp_port; 5774 struct netlink_ext_ack *extack; 5775 struct net_device *upper_dev; 5776 struct mlxsw_sp *mlxsw_sp; 5777 int err = 0; 5778 5779 mlxsw_sp_port = netdev_priv(dev); 5780 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5781 info = ptr; 5782 extack = netdev_notifier_info_to_extack(&info->info); 5783 5784 switch (event) { 5785 case NETDEV_PRECHANGEUPPER: 5786 upper_dev = info->upper_dev; 5787 if (!is_vlan_dev(upper_dev) && 5788 !netif_is_lag_master(upper_dev) && 5789 !netif_is_bridge_master(upper_dev) && 5790 !netif_is_ovs_master(upper_dev) && 5791 !netif_is_macvlan(upper_dev)) { 5792 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5793 return -EINVAL; 5794 } 5795 if (!info->linking) 5796 break; 5797 if (netif_is_bridge_master(upper_dev) && 5798 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5799 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5800 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5801 return -EOPNOTSUPP; 5802 if (netdev_has_any_upper_dev(upper_dev) && 5803 (!netif_is_bridge_master(upper_dev) || 5804 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5805 upper_dev))) { 5806 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5807 return -EINVAL; 5808 } 5809 if (netif_is_lag_master(upper_dev) && 5810 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 5811 info->upper_info, extack)) 5812 return -EINVAL; 5813 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 5814 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 5815 return -EINVAL; 5816 } 5817 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 5818 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 5819 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 5820 return -EINVAL; 5821 } 5822 if (netif_is_macvlan(upper_dev) && 5823 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 5824 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5825 return -EOPNOTSUPP; 5826 } 5827 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 5828 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 5829 return -EINVAL; 5830 } 5831 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 5832 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 5833 return -EINVAL; 5834 } 5835 break; 5836 case NETDEV_CHANGEUPPER: 5837 upper_dev = info->upper_dev; 5838 if (netif_is_bridge_master(upper_dev)) { 5839 if (info->linking) 5840 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5841 lower_dev, 5842 upper_dev, 5843 extack); 5844 else 5845 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5846 lower_dev, 5847 upper_dev); 5848 } else if (netif_is_lag_master(upper_dev)) { 5849 if (info->linking) { 5850 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5851 upper_dev); 5852 } else { 5853 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 5854 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5855 upper_dev); 5856 } 5857 } else if (netif_is_ovs_master(upper_dev)) { 5858 if (info->linking) 5859 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5860 else 5861 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 5862 } else if (netif_is_macvlan(upper_dev)) { 5863 if (!info->linking) 5864 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5865 } else if (is_vlan_dev(upper_dev)) { 5866 struct net_device *br_dev; 5867 5868 if (!netif_is_bridge_port(upper_dev)) 5869 break; 5870 if (info->linking) 5871 break; 5872 br_dev = netdev_master_upper_dev_get(upper_dev); 5873 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 5874 br_dev); 5875 } 5876 break; 5877 } 5878 5879 return err; 5880 } 5881 5882 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 5883 unsigned long event, void *ptr) 5884 { 5885 struct netdev_notifier_changelowerstate_info *info; 5886 struct mlxsw_sp_port *mlxsw_sp_port; 5887 int err; 5888 5889 mlxsw_sp_port = netdev_priv(dev); 5890 info = ptr; 5891 5892 switch (event) { 5893 case NETDEV_CHANGELOWERSTATE: 5894 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 5895 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 5896 info->lower_state_info); 5897 if (err) 5898 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 5899 } 5900 break; 5901 } 5902 5903 return 0; 5904 } 5905 5906 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 5907 struct net_device *port_dev, 5908 unsigned long event, void *ptr) 5909 { 5910 switch (event) { 5911 case NETDEV_PRECHANGEUPPER: 5912 case NETDEV_CHANGEUPPER: 5913 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 5914 event, ptr); 5915 case NETDEV_CHANGELOWERSTATE: 5916 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 5917 ptr); 5918 } 5919 5920 return 0; 5921 } 5922 5923 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 5924 unsigned long event, void *ptr) 5925 { 5926 struct net_device *dev; 5927 struct list_head *iter; 5928 int ret; 5929 5930 netdev_for_each_lower_dev(lag_dev, dev, iter) { 5931 if (mlxsw_sp_port_dev_check(dev)) { 5932 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 5933 ptr); 5934 if (ret) 5935 return ret; 5936 } 5937 } 5938 5939 return 0; 5940 } 5941 5942 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 5943 struct net_device *dev, 5944 unsigned long event, void *ptr, 5945 u16 vid) 5946 { 5947 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 5948 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 5949 struct netdev_notifier_changeupper_info *info = ptr; 5950 struct netlink_ext_ack *extack; 5951 struct net_device *upper_dev; 5952 int err = 0; 5953 5954 extack = netdev_notifier_info_to_extack(&info->info); 5955 5956 switch (event) { 5957 case NETDEV_PRECHANGEUPPER: 5958 upper_dev = info->upper_dev; 5959 if (!netif_is_bridge_master(upper_dev) && 5960 !netif_is_macvlan(upper_dev)) { 5961 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5962 return -EINVAL; 5963 } 5964 if (!info->linking) 5965 break; 5966 if (netif_is_bridge_master(upper_dev) && 5967 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 5968 mlxsw_sp_bridge_has_vxlan(upper_dev) && 5969 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5970 return -EOPNOTSUPP; 5971 if (netdev_has_any_upper_dev(upper_dev) && 5972 (!netif_is_bridge_master(upper_dev) || 5973 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 5974 upper_dev))) { 5975 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 5976 return -EINVAL; 5977 } 5978 if (netif_is_macvlan(upper_dev) && 5979 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 5980 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5981 return -EOPNOTSUPP; 5982 } 5983 break; 5984 case NETDEV_CHANGEUPPER: 5985 upper_dev = info->upper_dev; 5986 if (netif_is_bridge_master(upper_dev)) { 5987 if (info->linking) 5988 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 5989 vlan_dev, 5990 upper_dev, 5991 extack); 5992 else 5993 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 5994 vlan_dev, 5995 upper_dev); 5996 } else if (netif_is_macvlan(upper_dev)) { 5997 if (!info->linking) 5998 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5999 } else { 6000 err = -EINVAL; 6001 WARN_ON(1); 6002 } 6003 break; 6004 } 6005 6006 return err; 6007 } 6008 6009 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 6010 struct net_device *lag_dev, 6011 unsigned long event, 6012 void *ptr, u16 vid) 6013 { 6014 struct net_device *dev; 6015 struct list_head *iter; 6016 int ret; 6017 6018 netdev_for_each_lower_dev(lag_dev, dev, iter) { 6019 if (mlxsw_sp_port_dev_check(dev)) { 6020 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 6021 event, ptr, 6022 vid); 6023 if (ret) 6024 return ret; 6025 } 6026 } 6027 6028 return 0; 6029 } 6030 6031 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 6032 struct net_device *br_dev, 6033 unsigned long event, void *ptr, 6034 u16 vid) 6035 { 6036 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 6037 struct netdev_notifier_changeupper_info *info = ptr; 6038 struct netlink_ext_ack *extack; 6039 struct net_device *upper_dev; 6040 6041 if (!mlxsw_sp) 6042 return 0; 6043 6044 extack = netdev_notifier_info_to_extack(&info->info); 6045 6046 switch (event) { 6047 case NETDEV_PRECHANGEUPPER: 6048 upper_dev = info->upper_dev; 6049 if (!netif_is_macvlan(upper_dev)) { 6050 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6051 return -EOPNOTSUPP; 6052 } 6053 if (!info->linking) 6054 break; 6055 if (netif_is_macvlan(upper_dev) && 6056 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 6057 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6058 return -EOPNOTSUPP; 6059 } 6060 break; 6061 case NETDEV_CHANGEUPPER: 6062 upper_dev = info->upper_dev; 6063 if (info->linking) 6064 break; 6065 if (netif_is_macvlan(upper_dev)) 6066 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6067 break; 6068 } 6069 6070 return 0; 6071 } 6072 6073 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 6074 unsigned long event, void *ptr) 6075 { 6076 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6077 u16 vid = vlan_dev_vlan_id(vlan_dev); 6078 6079 if (mlxsw_sp_port_dev_check(real_dev)) 6080 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 6081 event, ptr, vid); 6082 else if (netif_is_lag_master(real_dev)) 6083 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 6084 real_dev, event, 6085 ptr, vid); 6086 else if (netif_is_bridge_master(real_dev)) 6087 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 6088 event, ptr, vid); 6089 6090 return 0; 6091 } 6092 6093 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 6094 unsigned long event, void *ptr) 6095 { 6096 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 6097 struct netdev_notifier_changeupper_info *info = ptr; 6098 struct netlink_ext_ack *extack; 6099 struct net_device *upper_dev; 6100 6101 if (!mlxsw_sp) 6102 return 0; 6103 6104 extack = netdev_notifier_info_to_extack(&info->info); 6105 6106 switch (event) { 6107 case NETDEV_PRECHANGEUPPER: 6108 upper_dev = info->upper_dev; 6109 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 6110 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6111 return -EOPNOTSUPP; 6112 } 6113 if (!info->linking) 6114 break; 6115 if (netif_is_macvlan(upper_dev) && 6116 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 6117 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6118 return -EOPNOTSUPP; 6119 } 6120 break; 6121 case NETDEV_CHANGEUPPER: 6122 upper_dev = info->upper_dev; 6123 if (info->linking) 6124 break; 6125 if (is_vlan_dev(upper_dev)) 6126 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 6127 if (netif_is_macvlan(upper_dev)) 6128 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 6129 break; 6130 } 6131 6132 return 0; 6133 } 6134 6135 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 6136 unsigned long event, void *ptr) 6137 { 6138 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 6139 struct netdev_notifier_changeupper_info *info = ptr; 6140 struct netlink_ext_ack *extack; 6141 6142 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 6143 return 0; 6144 6145 extack = netdev_notifier_info_to_extack(&info->info); 6146 6147 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 6148 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 6149 6150 return -EOPNOTSUPP; 6151 } 6152 6153 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 6154 { 6155 struct netdev_notifier_changeupper_info *info = ptr; 6156 6157 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 6158 return false; 6159 return netif_is_l3_master(info->upper_dev); 6160 } 6161 6162 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 6163 struct net_device *dev, 6164 unsigned long event, void *ptr) 6165 { 6166 struct netdev_notifier_changeupper_info *cu_info; 6167 struct netdev_notifier_info *info = ptr; 6168 struct netlink_ext_ack *extack; 6169 struct net_device *upper_dev; 6170 6171 extack = netdev_notifier_info_to_extack(info); 6172 6173 switch (event) { 6174 case NETDEV_CHANGEUPPER: 6175 cu_info = container_of(info, 6176 struct netdev_notifier_changeupper_info, 6177 info); 6178 upper_dev = cu_info->upper_dev; 6179 if (!netif_is_bridge_master(upper_dev)) 6180 return 0; 6181 if (!mlxsw_sp_lower_get(upper_dev)) 6182 return 0; 6183 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 6184 return -EOPNOTSUPP; 6185 if (cu_info->linking) { 6186 if (!netif_running(dev)) 6187 return 0; 6188 /* When the bridge is VLAN-aware, the VNI of the VxLAN 6189 * device needs to be mapped to a VLAN, but at this 6190 * point no VLANs are configured on the VxLAN device 6191 */ 6192 if (br_vlan_enabled(upper_dev)) 6193 return 0; 6194 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 6195 dev, 0, extack); 6196 } else { 6197 /* VLANs were already flushed, which triggered the 6198 * necessary cleanup 6199 */ 6200 if (br_vlan_enabled(upper_dev)) 6201 return 0; 6202 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6203 } 6204 break; 6205 case NETDEV_PRE_UP: 6206 upper_dev = netdev_master_upper_dev_get(dev); 6207 if (!upper_dev) 6208 return 0; 6209 if (!netif_is_bridge_master(upper_dev)) 6210 return 0; 6211 if (!mlxsw_sp_lower_get(upper_dev)) 6212 return 0; 6213 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 6214 extack); 6215 case NETDEV_DOWN: 6216 upper_dev = netdev_master_upper_dev_get(dev); 6217 if (!upper_dev) 6218 return 0; 6219 if (!netif_is_bridge_master(upper_dev)) 6220 return 0; 6221 if (!mlxsw_sp_lower_get(upper_dev)) 6222 return 0; 6223 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 6224 break; 6225 } 6226 6227 return 0; 6228 } 6229 6230 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 6231 unsigned long event, void *ptr) 6232 { 6233 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6234 struct mlxsw_sp_span_entry *span_entry; 6235 struct mlxsw_sp *mlxsw_sp; 6236 int err = 0; 6237 6238 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 6239 if (event == NETDEV_UNREGISTER) { 6240 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 6241 if (span_entry) 6242 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 6243 } 6244 mlxsw_sp_span_respin(mlxsw_sp); 6245 6246 if (netif_is_vxlan(dev)) 6247 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 6248 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 6249 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 6250 event, ptr); 6251 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 6252 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 6253 event, ptr); 6254 else if (event == NETDEV_PRE_CHANGEADDR || 6255 event == NETDEV_CHANGEADDR || 6256 event == NETDEV_CHANGEMTU) 6257 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 6258 else if (mlxsw_sp_is_vrf_event(event, ptr)) 6259 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 6260 else if (mlxsw_sp_port_dev_check(dev)) 6261 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 6262 else if (netif_is_lag_master(dev)) 6263 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 6264 else if (is_vlan_dev(dev)) 6265 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 6266 else if (netif_is_bridge_master(dev)) 6267 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 6268 else if (netif_is_macvlan(dev)) 6269 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 6270 6271 return notifier_from_errno(err); 6272 } 6273 6274 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 6275 .notifier_call = mlxsw_sp_inetaddr_valid_event, 6276 }; 6277 6278 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 6279 .notifier_call = mlxsw_sp_inet6addr_valid_event, 6280 }; 6281 6282 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 6283 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 6284 {0, }, 6285 }; 6286 6287 static struct pci_driver mlxsw_sp1_pci_driver = { 6288 .name = mlxsw_sp1_driver_name, 6289 .id_table = mlxsw_sp1_pci_id_table, 6290 }; 6291 6292 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 6293 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 6294 {0, }, 6295 }; 6296 6297 static struct pci_driver mlxsw_sp2_pci_driver = { 6298 .name = mlxsw_sp2_driver_name, 6299 .id_table = mlxsw_sp2_pci_id_table, 6300 }; 6301 6302 static int __init mlxsw_sp_module_init(void) 6303 { 6304 int err; 6305 6306 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6307 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6308 6309 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 6310 if (err) 6311 goto err_sp1_core_driver_register; 6312 6313 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 6314 if (err) 6315 goto err_sp2_core_driver_register; 6316 6317 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 6318 if (err) 6319 goto err_sp1_pci_driver_register; 6320 6321 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 6322 if (err) 6323 goto err_sp2_pci_driver_register; 6324 6325 return 0; 6326 6327 err_sp2_pci_driver_register: 6328 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6329 err_sp1_pci_driver_register: 6330 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6331 err_sp2_core_driver_register: 6332 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6333 err_sp1_core_driver_register: 6334 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6335 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6336 return err; 6337 } 6338 6339 static void __exit mlxsw_sp_module_exit(void) 6340 { 6341 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 6342 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 6343 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 6344 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 6345 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 6346 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 6347 } 6348 6349 module_init(mlxsw_sp_module_init); 6350 module_exit(mlxsw_sp_module_exit); 6351 6352 MODULE_LICENSE("Dual BSD/GPL"); 6353 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 6354 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 6355 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 6356 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 6357 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 6358