1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <net/switchdev.h> 25 #include <net/pkt_cls.h> 26 #include <net/tc_act/tc_mirred.h> 27 #include <net/netevent.h> 28 #include <net/tc_act/tc_sample.h> 29 #include <net/addrconf.h> 30 31 #include "spectrum.h" 32 #include "pci.h" 33 #include "core.h" 34 #include "reg.h" 35 #include "port.h" 36 #include "trap.h" 37 #include "txheader.h" 38 #include "spectrum_cnt.h" 39 #include "spectrum_dpipe.h" 40 #include "spectrum_acl_flex_actions.h" 41 #include "spectrum_span.h" 42 #include "../mlxfw/mlxfw.h" 43 44 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 45 46 #define MLXSW_SP1_FWREV_MAJOR 13 47 #define MLXSW_SP1_FWREV_MINOR 1702 48 #define MLXSW_SP1_FWREV_SUBMINOR 6 49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 50 51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 52 .major = MLXSW_SP1_FWREV_MAJOR, 53 .minor = MLXSW_SP1_FWREV_MINOR, 54 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 55 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 56 }; 57 58 #define MLXSW_SP1_FW_FILENAME \ 59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 62 63 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 64 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 65 static const char mlxsw_sp_driver_version[] = "1.0"; 66 67 /* tx_hdr_version 68 * Tx header version. 69 * Must be set to 1. 70 */ 71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 72 73 /* tx_hdr_ctl 74 * Packet control type. 75 * 0 - Ethernet control (e.g. EMADs, LACP) 76 * 1 - Ethernet data 77 */ 78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 79 80 /* tx_hdr_proto 81 * Packet protocol type. Must be set to 1 (Ethernet). 82 */ 83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 84 85 /* tx_hdr_rx_is_router 86 * Packet is sent from the router. Valid for data packets only. 87 */ 88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 89 90 /* tx_hdr_fid_valid 91 * Indicates if the 'fid' field is valid and should be used for 92 * forwarding lookup. Valid for data packets only. 93 */ 94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 95 96 /* tx_hdr_swid 97 * Switch partition ID. Must be set to 0. 98 */ 99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 100 101 /* tx_hdr_control_tclass 102 * Indicates if the packet should use the control TClass and not one 103 * of the data TClasses. 104 */ 105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 106 107 /* tx_hdr_etclass 108 * Egress TClass to be used on the egress device on the egress port. 109 */ 110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 111 112 /* tx_hdr_port_mid 113 * Destination local port for unicast packets. 114 * Destination multicast ID for multicast packets. 115 * 116 * Control packets are directed to a specific egress port, while data 117 * packets are transmitted through the CPU port (0) into the switch partition, 118 * where forwarding rules are applied. 119 */ 120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 121 122 /* tx_hdr_fid 123 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 124 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 125 * Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 128 129 /* tx_hdr_type 130 * 0 - Data packets 131 * 6 - Control packets 132 */ 133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 134 135 struct mlxsw_sp_mlxfw_dev { 136 struct mlxfw_dev mlxfw_dev; 137 struct mlxsw_sp *mlxsw_sp; 138 }; 139 140 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 141 u16 component_index, u32 *p_max_size, 142 u8 *p_align_bits, u16 *p_max_write_size) 143 { 144 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 145 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 146 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 147 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 148 int err; 149 150 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 152 if (err) 153 return err; 154 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 155 p_max_write_size); 156 157 *p_align_bits = max_t(u8, *p_align_bits, 2); 158 *p_max_write_size = min_t(u16, *p_max_write_size, 159 MLXSW_REG_MCDA_MAX_DATA_LEN); 160 return 0; 161 } 162 163 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 164 { 165 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 166 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 168 char mcc_pl[MLXSW_REG_MCC_LEN]; 169 u8 control_state; 170 int err; 171 172 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 173 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 174 if (err) 175 return err; 176 177 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 178 if (control_state != MLXFW_FSM_STATE_IDLE) 179 return -EBUSY; 180 181 mlxsw_reg_mcc_pack(mcc_pl, 182 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 183 0, *fwhandle, 0); 184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 185 } 186 187 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 188 u32 fwhandle, u16 component_index, 189 u32 component_size) 190 { 191 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 192 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 194 char mcc_pl[MLXSW_REG_MCC_LEN]; 195 196 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 197 component_index, fwhandle, component_size); 198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 199 } 200 201 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 202 u32 fwhandle, u8 *data, u16 size, 203 u32 offset) 204 { 205 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 206 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 207 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 208 char mcda_pl[MLXSW_REG_MCDA_LEN]; 209 210 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 211 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 212 } 213 214 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 215 u32 fwhandle, u16 component_index) 216 { 217 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 218 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 220 char mcc_pl[MLXSW_REG_MCC_LEN]; 221 222 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 223 component_index, fwhandle, 0); 224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 225 } 226 227 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 228 { 229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 232 char mcc_pl[MLXSW_REG_MCC_LEN]; 233 234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 235 fwhandle, 0); 236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 237 } 238 239 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 240 enum mlxfw_fsm_state *fsm_state, 241 enum mlxfw_fsm_state_err *fsm_state_err) 242 { 243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 246 char mcc_pl[MLXSW_REG_MCC_LEN]; 247 u8 control_state; 248 u8 error_code; 249 int err; 250 251 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 252 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 253 if (err) 254 return err; 255 256 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 257 *fsm_state = control_state; 258 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 259 MLXFW_FSM_STATE_ERR_MAX); 260 return 0; 261 } 262 263 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 264 { 265 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 266 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 268 char mcc_pl[MLXSW_REG_MCC_LEN]; 269 270 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 271 fwhandle, 0); 272 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 273 } 274 275 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 276 { 277 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 278 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 280 char mcc_pl[MLXSW_REG_MCC_LEN]; 281 282 mlxsw_reg_mcc_pack(mcc_pl, 283 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 284 fwhandle, 0); 285 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 286 } 287 288 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 289 .component_query = mlxsw_sp_component_query, 290 .fsm_lock = mlxsw_sp_fsm_lock, 291 .fsm_component_update = mlxsw_sp_fsm_component_update, 292 .fsm_block_download = mlxsw_sp_fsm_block_download, 293 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 294 .fsm_activate = mlxsw_sp_fsm_activate, 295 .fsm_query_state = mlxsw_sp_fsm_query_state, 296 .fsm_cancel = mlxsw_sp_fsm_cancel, 297 .fsm_release = mlxsw_sp_fsm_release 298 }; 299 300 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 301 const struct firmware *firmware) 302 { 303 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 304 .mlxfw_dev = { 305 .ops = &mlxsw_sp_mlxfw_dev_ops, 306 .psid = mlxsw_sp->bus_info->psid, 307 .psid_size = strlen(mlxsw_sp->bus_info->psid), 308 }, 309 .mlxsw_sp = mlxsw_sp 310 }; 311 312 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 313 } 314 315 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 316 { 317 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 318 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; 319 const char *fw_filename = mlxsw_sp->fw_filename; 320 const struct firmware *firmware; 321 int err; 322 323 /* Don't check if driver does not require it */ 324 if (!req_rev || !fw_filename) 325 return 0; 326 327 /* Validate driver & FW are compatible */ 328 if (rev->major != req_rev->major) { 329 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", 330 rev->major, req_rev->major); 331 return -EINVAL; 332 } 333 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == 334 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) 335 return 0; 336 337 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", 338 rev->major, rev->minor, rev->subminor); 339 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", 340 fw_filename); 341 342 err = request_firmware_direct(&firmware, fw_filename, 343 mlxsw_sp->bus_info->dev); 344 if (err) { 345 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 346 fw_filename); 347 return err; 348 } 349 350 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 351 release_firmware(firmware); 352 if (err) 353 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 354 355 /* On FW flash success, tell the caller FW reset is needed 356 * if current FW supports it. 357 */ 358 if (rev->minor >= req_rev->can_reset_minor) 359 return err ? err : -EAGAIN; 360 else 361 return 0; 362 } 363 364 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 365 unsigned int counter_index, u64 *packets, 366 u64 *bytes) 367 { 368 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 369 int err; 370 371 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 372 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 373 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 374 if (err) 375 return err; 376 if (packets) 377 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 378 if (bytes) 379 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 380 return 0; 381 } 382 383 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 384 unsigned int counter_index) 385 { 386 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 387 388 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 389 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 391 } 392 393 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 394 unsigned int *p_counter_index) 395 { 396 int err; 397 398 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 399 p_counter_index); 400 if (err) 401 return err; 402 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 403 if (err) 404 goto err_counter_clear; 405 return 0; 406 407 err_counter_clear: 408 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 409 *p_counter_index); 410 return err; 411 } 412 413 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 414 unsigned int counter_index) 415 { 416 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 417 counter_index); 418 } 419 420 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 421 const struct mlxsw_tx_info *tx_info) 422 { 423 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 424 425 memset(txhdr, 0, MLXSW_TXHDR_LEN); 426 427 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 428 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 429 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 430 mlxsw_tx_hdr_swid_set(txhdr, 0); 431 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 432 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 433 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 434 } 435 436 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 437 { 438 switch (state) { 439 case BR_STATE_FORWARDING: 440 return MLXSW_REG_SPMS_STATE_FORWARDING; 441 case BR_STATE_LEARNING: 442 return MLXSW_REG_SPMS_STATE_LEARNING; 443 case BR_STATE_LISTENING: /* fall-through */ 444 case BR_STATE_DISABLED: /* fall-through */ 445 case BR_STATE_BLOCKING: 446 return MLXSW_REG_SPMS_STATE_DISCARDING; 447 default: 448 BUG(); 449 } 450 } 451 452 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 u8 state) 454 { 455 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 457 char *spms_pl; 458 int err; 459 460 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 461 if (!spms_pl) 462 return -ENOMEM; 463 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 464 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 465 466 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 467 kfree(spms_pl); 468 return err; 469 } 470 471 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 472 { 473 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 474 int err; 475 476 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 477 if (err) 478 return err; 479 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 480 return 0; 481 } 482 483 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 484 bool enable, u32 rate) 485 { 486 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 487 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 488 489 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 491 } 492 493 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 494 bool is_up) 495 { 496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 497 char paos_pl[MLXSW_REG_PAOS_LEN]; 498 499 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 500 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 501 MLXSW_PORT_ADMIN_STATUS_DOWN); 502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 503 } 504 505 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 506 unsigned char *addr) 507 { 508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 509 char ppad_pl[MLXSW_REG_PPAD_LEN]; 510 511 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 512 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 513 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 514 } 515 516 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 517 { 518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 519 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 520 521 ether_addr_copy(addr, mlxsw_sp->base_mac); 522 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 523 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 524 } 525 526 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 527 { 528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 529 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 530 int max_mtu; 531 int err; 532 533 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 534 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 535 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 536 if (err) 537 return err; 538 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 539 540 if (mtu > max_mtu) 541 return -EINVAL; 542 543 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 544 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 545 } 546 547 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 548 { 549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 550 char pspa_pl[MLXSW_REG_PSPA_LEN]; 551 552 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 553 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 554 } 555 556 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 557 { 558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 559 char svpe_pl[MLXSW_REG_SVPE_LEN]; 560 561 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 563 } 564 565 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 566 bool learn_enable) 567 { 568 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 569 char *spvmlr_pl; 570 int err; 571 572 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 573 if (!spvmlr_pl) 574 return -ENOMEM; 575 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 576 learn_enable); 577 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 578 kfree(spvmlr_pl); 579 return err; 580 } 581 582 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 583 u16 vid) 584 { 585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 586 char spvid_pl[MLXSW_REG_SPVID_LEN]; 587 588 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 589 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 590 } 591 592 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 593 bool allow) 594 { 595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 596 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 597 598 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 599 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 600 } 601 602 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 603 { 604 int err; 605 606 if (!vid) { 607 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 608 if (err) 609 return err; 610 } else { 611 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 612 if (err) 613 return err; 614 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 615 if (err) 616 goto err_port_allow_untagged_set; 617 } 618 619 mlxsw_sp_port->pvid = vid; 620 return 0; 621 622 err_port_allow_untagged_set: 623 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 624 return err; 625 } 626 627 static int 628 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 629 { 630 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 631 char sspr_pl[MLXSW_REG_SSPR_LEN]; 632 633 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 634 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 635 } 636 637 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 638 u8 local_port, u8 *p_module, 639 u8 *p_width, u8 *p_lane) 640 { 641 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 642 int err; 643 644 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 645 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 646 if (err) 647 return err; 648 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 649 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 650 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 651 return 0; 652 } 653 654 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 655 u8 module, u8 width, u8 lane) 656 { 657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 658 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 659 int i; 660 661 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 662 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 663 for (i = 0; i < width; i++) { 664 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 665 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 666 } 667 668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 669 } 670 671 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 672 { 673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 674 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 675 676 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 677 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 679 } 680 681 static int mlxsw_sp_port_open(struct net_device *dev) 682 { 683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 684 int err; 685 686 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 687 if (err) 688 return err; 689 netif_start_queue(dev); 690 return 0; 691 } 692 693 static int mlxsw_sp_port_stop(struct net_device *dev) 694 { 695 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 696 697 netif_stop_queue(dev); 698 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 699 } 700 701 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 702 struct net_device *dev) 703 { 704 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 705 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 706 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 707 const struct mlxsw_tx_info tx_info = { 708 .local_port = mlxsw_sp_port->local_port, 709 .is_emad = false, 710 }; 711 u64 len; 712 int err; 713 714 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 715 return NETDEV_TX_BUSY; 716 717 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 718 struct sk_buff *skb_orig = skb; 719 720 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 721 if (!skb) { 722 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 723 dev_kfree_skb_any(skb_orig); 724 return NETDEV_TX_OK; 725 } 726 dev_consume_skb_any(skb_orig); 727 } 728 729 if (eth_skb_pad(skb)) { 730 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 731 return NETDEV_TX_OK; 732 } 733 734 mlxsw_sp_txhdr_construct(skb, &tx_info); 735 /* TX header is consumed by HW on the way so we shouldn't count its 736 * bytes as being sent. 737 */ 738 len = skb->len - MLXSW_TXHDR_LEN; 739 740 /* Due to a race we might fail here because of a full queue. In that 741 * unlikely case we simply drop the packet. 742 */ 743 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 744 745 if (!err) { 746 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 747 u64_stats_update_begin(&pcpu_stats->syncp); 748 pcpu_stats->tx_packets++; 749 pcpu_stats->tx_bytes += len; 750 u64_stats_update_end(&pcpu_stats->syncp); 751 } else { 752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 753 dev_kfree_skb_any(skb); 754 } 755 return NETDEV_TX_OK; 756 } 757 758 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 759 { 760 } 761 762 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 763 { 764 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 765 struct sockaddr *addr = p; 766 int err; 767 768 if (!is_valid_ether_addr(addr->sa_data)) 769 return -EADDRNOTAVAIL; 770 771 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 772 if (err) 773 return err; 774 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 775 return 0; 776 } 777 778 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 779 int mtu) 780 { 781 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 782 } 783 784 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 785 786 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 787 u16 delay) 788 { 789 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 790 BITS_PER_BYTE)); 791 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 792 mtu); 793 } 794 795 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 796 * Assumes 100m cable and maximum MTU. 797 */ 798 #define MLXSW_SP_PAUSE_DELAY 58752 799 800 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 801 u16 delay, bool pfc, bool pause) 802 { 803 if (pfc) 804 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 805 else if (pause) 806 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 807 else 808 return 0; 809 } 810 811 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 812 bool lossy) 813 { 814 if (lossy) 815 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 816 else 817 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 818 thres); 819 } 820 821 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 822 u8 *prio_tc, bool pause_en, 823 struct ieee_pfc *my_pfc) 824 { 825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 826 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 827 u16 delay = !!my_pfc ? my_pfc->delay : 0; 828 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 829 int i, j, err; 830 831 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 832 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 833 if (err) 834 return err; 835 836 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 837 bool configure = false; 838 bool pfc = false; 839 bool lossy; 840 u16 thres; 841 842 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 843 if (prio_tc[j] == i) { 844 pfc = pfc_en & BIT(j); 845 configure = true; 846 break; 847 } 848 } 849 850 if (!configure) 851 continue; 852 853 lossy = !(pfc || pause_en); 854 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 855 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 856 pause_en); 857 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 858 } 859 860 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 861 } 862 863 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 864 int mtu, bool pause_en) 865 { 866 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 867 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 868 struct ieee_pfc *my_pfc; 869 u8 *prio_tc; 870 871 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 872 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 873 874 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 875 pause_en, my_pfc); 876 } 877 878 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 879 { 880 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 881 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 882 int err; 883 884 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 885 if (err) 886 return err; 887 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 888 if (err) 889 goto err_span_port_mtu_update; 890 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 891 if (err) 892 goto err_port_mtu_set; 893 dev->mtu = mtu; 894 return 0; 895 896 err_port_mtu_set: 897 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 898 err_span_port_mtu_update: 899 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 900 return err; 901 } 902 903 static int 904 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 905 struct rtnl_link_stats64 *stats) 906 { 907 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 908 struct mlxsw_sp_port_pcpu_stats *p; 909 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 910 u32 tx_dropped = 0; 911 unsigned int start; 912 int i; 913 914 for_each_possible_cpu(i) { 915 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 916 do { 917 start = u64_stats_fetch_begin_irq(&p->syncp); 918 rx_packets = p->rx_packets; 919 rx_bytes = p->rx_bytes; 920 tx_packets = p->tx_packets; 921 tx_bytes = p->tx_bytes; 922 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 923 924 stats->rx_packets += rx_packets; 925 stats->rx_bytes += rx_bytes; 926 stats->tx_packets += tx_packets; 927 stats->tx_bytes += tx_bytes; 928 /* tx_dropped is u32, updated without syncp protection. */ 929 tx_dropped += p->tx_dropped; 930 } 931 stats->tx_dropped = tx_dropped; 932 return 0; 933 } 934 935 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 936 { 937 switch (attr_id) { 938 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 939 return true; 940 } 941 942 return false; 943 } 944 945 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 946 void *sp) 947 { 948 switch (attr_id) { 949 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 950 return mlxsw_sp_port_get_sw_stats64(dev, sp); 951 } 952 953 return -EINVAL; 954 } 955 956 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 957 int prio, char *ppcnt_pl) 958 { 959 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 961 962 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 963 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 964 } 965 966 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 967 struct rtnl_link_stats64 *stats) 968 { 969 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 970 int err; 971 972 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 973 0, ppcnt_pl); 974 if (err) 975 goto out; 976 977 stats->tx_packets = 978 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 979 stats->rx_packets = 980 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 981 stats->tx_bytes = 982 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 983 stats->rx_bytes = 984 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 985 stats->multicast = 986 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 987 988 stats->rx_crc_errors = 989 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 990 stats->rx_frame_errors = 991 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 992 993 stats->rx_length_errors = ( 994 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 995 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 996 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 997 998 stats->rx_errors = (stats->rx_crc_errors + 999 stats->rx_frame_errors + stats->rx_length_errors); 1000 1001 out: 1002 return err; 1003 } 1004 1005 static void 1006 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 1007 struct mlxsw_sp_port_xstats *xstats) 1008 { 1009 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1010 int err, i; 1011 1012 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 1013 ppcnt_pl); 1014 if (!err) 1015 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 1016 1017 for (i = 0; i < TC_MAX_QUEUE; i++) { 1018 err = mlxsw_sp_port_get_stats_raw(dev, 1019 MLXSW_REG_PPCNT_TC_CONG_TC, 1020 i, ppcnt_pl); 1021 if (!err) 1022 xstats->wred_drop[i] = 1023 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 1024 1025 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 1026 i, ppcnt_pl); 1027 if (err) 1028 continue; 1029 1030 xstats->backlog[i] = 1031 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 1032 xstats->tail_drop[i] = 1033 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 1034 } 1035 1036 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1037 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 1038 i, ppcnt_pl); 1039 if (err) 1040 continue; 1041 1042 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 1043 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 1044 } 1045 } 1046 1047 static void update_stats_cache(struct work_struct *work) 1048 { 1049 struct mlxsw_sp_port *mlxsw_sp_port = 1050 container_of(work, struct mlxsw_sp_port, 1051 periodic_hw_stats.update_dw.work); 1052 1053 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1054 goto out; 1055 1056 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1057 &mlxsw_sp_port->periodic_hw_stats.stats); 1058 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 1059 &mlxsw_sp_port->periodic_hw_stats.xstats); 1060 1061 out: 1062 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1063 MLXSW_HW_STATS_UPDATE_TIME); 1064 } 1065 1066 /* Return the stats from a cache that is updated periodically, 1067 * as this function might get called in an atomic context. 1068 */ 1069 static void 1070 mlxsw_sp_port_get_stats64(struct net_device *dev, 1071 struct rtnl_link_stats64 *stats) 1072 { 1073 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1074 1075 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 1076 } 1077 1078 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1079 u16 vid_begin, u16 vid_end, 1080 bool is_member, bool untagged) 1081 { 1082 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1083 char *spvm_pl; 1084 int err; 1085 1086 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1087 if (!spvm_pl) 1088 return -ENOMEM; 1089 1090 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1091 vid_end, is_member, untagged); 1092 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1093 kfree(spvm_pl); 1094 return err; 1095 } 1096 1097 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1098 u16 vid_end, bool is_member, bool untagged) 1099 { 1100 u16 vid, vid_e; 1101 int err; 1102 1103 for (vid = vid_begin; vid <= vid_end; 1104 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1105 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1106 vid_end); 1107 1108 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1109 is_member, untagged); 1110 if (err) 1111 return err; 1112 } 1113 1114 return 0; 1115 } 1116 1117 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1118 { 1119 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1120 1121 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1122 &mlxsw_sp_port->vlans_list, list) 1123 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1124 } 1125 1126 static struct mlxsw_sp_port_vlan * 1127 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1128 { 1129 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1130 bool untagged = vid == 1; 1131 int err; 1132 1133 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1134 if (err) 1135 return ERR_PTR(err); 1136 1137 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1138 if (!mlxsw_sp_port_vlan) { 1139 err = -ENOMEM; 1140 goto err_port_vlan_alloc; 1141 } 1142 1143 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1144 mlxsw_sp_port_vlan->ref_count = 1; 1145 mlxsw_sp_port_vlan->vid = vid; 1146 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1147 1148 return mlxsw_sp_port_vlan; 1149 1150 err_port_vlan_alloc: 1151 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1152 return ERR_PTR(err); 1153 } 1154 1155 static void 1156 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1157 { 1158 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1159 u16 vid = mlxsw_sp_port_vlan->vid; 1160 1161 list_del(&mlxsw_sp_port_vlan->list); 1162 kfree(mlxsw_sp_port_vlan); 1163 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1164 } 1165 1166 struct mlxsw_sp_port_vlan * 1167 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1168 { 1169 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1170 1171 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1172 if (mlxsw_sp_port_vlan) { 1173 mlxsw_sp_port_vlan->ref_count++; 1174 return mlxsw_sp_port_vlan; 1175 } 1176 1177 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1178 } 1179 1180 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1181 { 1182 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1183 1184 if (--mlxsw_sp_port_vlan->ref_count != 0) 1185 return; 1186 1187 if (mlxsw_sp_port_vlan->bridge_port) 1188 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1189 else if (fid) 1190 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1191 1192 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1193 } 1194 1195 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1196 __be16 __always_unused proto, u16 vid) 1197 { 1198 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1199 1200 /* VLAN 0 is added to HW filter when device goes up, but it is 1201 * reserved in our case, so simply return. 1202 */ 1203 if (!vid) 1204 return 0; 1205 1206 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1207 } 1208 1209 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1210 __be16 __always_unused proto, u16 vid) 1211 { 1212 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1213 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1214 1215 /* VLAN 0 is removed from HW filter when device goes down, but 1216 * it is reserved in our case, so simply return. 1217 */ 1218 if (!vid) 1219 return 0; 1220 1221 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1222 if (!mlxsw_sp_port_vlan) 1223 return 0; 1224 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1225 1226 return 0; 1227 } 1228 1229 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1230 size_t len) 1231 { 1232 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1233 1234 return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, 1235 mlxsw_sp_port->local_port, 1236 name, len); 1237 } 1238 1239 static struct mlxsw_sp_port_mall_tc_entry * 1240 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1241 unsigned long cookie) { 1242 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1243 1244 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1245 if (mall_tc_entry->cookie == cookie) 1246 return mall_tc_entry; 1247 1248 return NULL; 1249 } 1250 1251 static int 1252 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1253 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1254 const struct tc_action *a, 1255 bool ingress) 1256 { 1257 enum mlxsw_sp_span_type span_type; 1258 struct net_device *to_dev; 1259 1260 to_dev = tcf_mirred_dev(a); 1261 if (!to_dev) { 1262 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1263 return -EINVAL; 1264 } 1265 1266 mirror->ingress = ingress; 1267 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1268 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, 1269 true, &mirror->span_id); 1270 } 1271 1272 static void 1273 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1274 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1275 { 1276 enum mlxsw_sp_span_type span_type; 1277 1278 span_type = mirror->ingress ? 1279 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1280 mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, 1281 span_type, true); 1282 } 1283 1284 static int 1285 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1286 struct tc_cls_matchall_offload *cls, 1287 const struct tc_action *a, 1288 bool ingress) 1289 { 1290 int err; 1291 1292 if (!mlxsw_sp_port->sample) 1293 return -EOPNOTSUPP; 1294 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1295 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1296 return -EEXIST; 1297 } 1298 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1299 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1300 return -EOPNOTSUPP; 1301 } 1302 1303 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1304 tcf_sample_psample_group(a)); 1305 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1306 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1307 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1308 1309 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1310 if (err) 1311 goto err_port_sample_set; 1312 return 0; 1313 1314 err_port_sample_set: 1315 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1316 return err; 1317 } 1318 1319 static void 1320 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1321 { 1322 if (!mlxsw_sp_port->sample) 1323 return; 1324 1325 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1326 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1327 } 1328 1329 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1330 struct tc_cls_matchall_offload *f, 1331 bool ingress) 1332 { 1333 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1334 __be16 protocol = f->common.protocol; 1335 const struct tc_action *a; 1336 LIST_HEAD(actions); 1337 int err; 1338 1339 if (!tcf_exts_has_one_action(f->exts)) { 1340 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1341 return -EOPNOTSUPP; 1342 } 1343 1344 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1345 if (!mall_tc_entry) 1346 return -ENOMEM; 1347 mall_tc_entry->cookie = f->cookie; 1348 1349 tcf_exts_to_list(f->exts, &actions); 1350 a = list_first_entry(&actions, struct tc_action, list); 1351 1352 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1353 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1354 1355 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1356 mirror = &mall_tc_entry->mirror; 1357 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1358 mirror, a, ingress); 1359 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1360 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1361 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, 1362 a, ingress); 1363 } else { 1364 err = -EOPNOTSUPP; 1365 } 1366 1367 if (err) 1368 goto err_add_action; 1369 1370 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1371 return 0; 1372 1373 err_add_action: 1374 kfree(mall_tc_entry); 1375 return err; 1376 } 1377 1378 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1379 struct tc_cls_matchall_offload *f) 1380 { 1381 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1382 1383 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1384 f->cookie); 1385 if (!mall_tc_entry) { 1386 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1387 return; 1388 } 1389 list_del(&mall_tc_entry->list); 1390 1391 switch (mall_tc_entry->type) { 1392 case MLXSW_SP_PORT_MALL_MIRROR: 1393 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1394 &mall_tc_entry->mirror); 1395 break; 1396 case MLXSW_SP_PORT_MALL_SAMPLE: 1397 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1398 break; 1399 default: 1400 WARN_ON(1); 1401 } 1402 1403 kfree(mall_tc_entry); 1404 } 1405 1406 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1407 struct tc_cls_matchall_offload *f, 1408 bool ingress) 1409 { 1410 switch (f->command) { 1411 case TC_CLSMATCHALL_REPLACE: 1412 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, 1413 ingress); 1414 case TC_CLSMATCHALL_DESTROY: 1415 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); 1416 return 0; 1417 default: 1418 return -EOPNOTSUPP; 1419 } 1420 } 1421 1422 static int 1423 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, 1424 struct tc_cls_flower_offload *f) 1425 { 1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); 1427 1428 switch (f->command) { 1429 case TC_CLSFLOWER_REPLACE: 1430 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); 1431 case TC_CLSFLOWER_DESTROY: 1432 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); 1433 return 0; 1434 case TC_CLSFLOWER_STATS: 1435 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); 1436 case TC_CLSFLOWER_TMPLT_CREATE: 1437 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); 1438 case TC_CLSFLOWER_TMPLT_DESTROY: 1439 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); 1440 return 0; 1441 default: 1442 return -EOPNOTSUPP; 1443 } 1444 } 1445 1446 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, 1447 void *type_data, 1448 void *cb_priv, bool ingress) 1449 { 1450 struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; 1451 1452 switch (type) { 1453 case TC_SETUP_CLSMATCHALL: 1454 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, 1455 type_data)) 1456 return -EOPNOTSUPP; 1457 1458 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, 1459 ingress); 1460 case TC_SETUP_CLSFLOWER: 1461 return 0; 1462 default: 1463 return -EOPNOTSUPP; 1464 } 1465 } 1466 1467 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, 1468 void *type_data, 1469 void *cb_priv) 1470 { 1471 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1472 cb_priv, true); 1473 } 1474 1475 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, 1476 void *type_data, 1477 void *cb_priv) 1478 { 1479 return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, 1480 cb_priv, false); 1481 } 1482 1483 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, 1484 void *type_data, void *cb_priv) 1485 { 1486 struct mlxsw_sp_acl_block *acl_block = cb_priv; 1487 1488 switch (type) { 1489 case TC_SETUP_CLSMATCHALL: 1490 return 0; 1491 case TC_SETUP_CLSFLOWER: 1492 if (mlxsw_sp_acl_block_disabled(acl_block)) 1493 return -EOPNOTSUPP; 1494 1495 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); 1496 default: 1497 return -EOPNOTSUPP; 1498 } 1499 } 1500 1501 static int 1502 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, 1503 struct tcf_block *block, bool ingress, 1504 struct netlink_ext_ack *extack) 1505 { 1506 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1507 struct mlxsw_sp_acl_block *acl_block; 1508 struct tcf_block_cb *block_cb; 1509 int err; 1510 1511 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1512 mlxsw_sp); 1513 if (!block_cb) { 1514 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); 1515 if (!acl_block) 1516 return -ENOMEM; 1517 block_cb = __tcf_block_cb_register(block, 1518 mlxsw_sp_setup_tc_block_cb_flower, 1519 mlxsw_sp, acl_block, extack); 1520 if (IS_ERR(block_cb)) { 1521 err = PTR_ERR(block_cb); 1522 goto err_cb_register; 1523 } 1524 } else { 1525 acl_block = tcf_block_cb_priv(block_cb); 1526 } 1527 tcf_block_cb_incref(block_cb); 1528 err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, 1529 mlxsw_sp_port, ingress); 1530 if (err) 1531 goto err_block_bind; 1532 1533 if (ingress) 1534 mlxsw_sp_port->ing_acl_block = acl_block; 1535 else 1536 mlxsw_sp_port->eg_acl_block = acl_block; 1537 1538 return 0; 1539 1540 err_block_bind: 1541 if (!tcf_block_cb_decref(block_cb)) { 1542 __tcf_block_cb_unregister(block, block_cb); 1543 err_cb_register: 1544 mlxsw_sp_acl_block_destroy(acl_block); 1545 } 1546 return err; 1547 } 1548 1549 static void 1550 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, 1551 struct tcf_block *block, bool ingress) 1552 { 1553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1554 struct mlxsw_sp_acl_block *acl_block; 1555 struct tcf_block_cb *block_cb; 1556 int err; 1557 1558 block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, 1559 mlxsw_sp); 1560 if (!block_cb) 1561 return; 1562 1563 if (ingress) 1564 mlxsw_sp_port->ing_acl_block = NULL; 1565 else 1566 mlxsw_sp_port->eg_acl_block = NULL; 1567 1568 acl_block = tcf_block_cb_priv(block_cb); 1569 err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, 1570 mlxsw_sp_port, ingress); 1571 if (!err && !tcf_block_cb_decref(block_cb)) { 1572 __tcf_block_cb_unregister(block, block_cb); 1573 mlxsw_sp_acl_block_destroy(acl_block); 1574 } 1575 } 1576 1577 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1578 struct tc_block_offload *f) 1579 { 1580 tc_setup_cb_t *cb; 1581 bool ingress; 1582 int err; 1583 1584 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1585 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; 1586 ingress = true; 1587 } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1588 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; 1589 ingress = false; 1590 } else { 1591 return -EOPNOTSUPP; 1592 } 1593 1594 switch (f->command) { 1595 case TC_BLOCK_BIND: 1596 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, 1597 mlxsw_sp_port, f->extack); 1598 if (err) 1599 return err; 1600 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, 1601 f->block, ingress, 1602 f->extack); 1603 if (err) { 1604 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1605 return err; 1606 } 1607 return 0; 1608 case TC_BLOCK_UNBIND: 1609 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, 1610 f->block, ingress); 1611 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); 1612 return 0; 1613 default: 1614 return -EOPNOTSUPP; 1615 } 1616 } 1617 1618 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1619 void *type_data) 1620 { 1621 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1622 1623 switch (type) { 1624 case TC_SETUP_BLOCK: 1625 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1626 case TC_SETUP_QDISC_RED: 1627 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1628 case TC_SETUP_QDISC_PRIO: 1629 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1630 default: 1631 return -EOPNOTSUPP; 1632 } 1633 } 1634 1635 1636 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1637 { 1638 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1639 1640 if (!enable) { 1641 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || 1642 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || 1643 !list_empty(&mlxsw_sp_port->mall_tc_list)) { 1644 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1645 return -EINVAL; 1646 } 1647 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); 1648 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); 1649 } else { 1650 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); 1651 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); 1652 } 1653 return 0; 1654 } 1655 1656 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1657 1658 static int mlxsw_sp_handle_feature(struct net_device *dev, 1659 netdev_features_t wanted_features, 1660 netdev_features_t feature, 1661 mlxsw_sp_feature_handler feature_handler) 1662 { 1663 netdev_features_t changes = wanted_features ^ dev->features; 1664 bool enable = !!(wanted_features & feature); 1665 int err; 1666 1667 if (!(changes & feature)) 1668 return 0; 1669 1670 err = feature_handler(dev, enable); 1671 if (err) { 1672 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1673 enable ? "Enable" : "Disable", &feature, err); 1674 return err; 1675 } 1676 1677 if (enable) 1678 dev->features |= feature; 1679 else 1680 dev->features &= ~feature; 1681 1682 return 0; 1683 } 1684 static int mlxsw_sp_set_features(struct net_device *dev, 1685 netdev_features_t features) 1686 { 1687 return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1688 mlxsw_sp_feature_hw_tc); 1689 } 1690 1691 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1692 .ndo_open = mlxsw_sp_port_open, 1693 .ndo_stop = mlxsw_sp_port_stop, 1694 .ndo_start_xmit = mlxsw_sp_port_xmit, 1695 .ndo_setup_tc = mlxsw_sp_setup_tc, 1696 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1697 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1698 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1699 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1700 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1701 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1702 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1703 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1704 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1705 .ndo_set_features = mlxsw_sp_set_features, 1706 }; 1707 1708 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1709 struct ethtool_drvinfo *drvinfo) 1710 { 1711 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1713 1714 strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, 1715 sizeof(drvinfo->driver)); 1716 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1717 sizeof(drvinfo->version)); 1718 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1719 "%d.%d.%d", 1720 mlxsw_sp->bus_info->fw_rev.major, 1721 mlxsw_sp->bus_info->fw_rev.minor, 1722 mlxsw_sp->bus_info->fw_rev.subminor); 1723 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1724 sizeof(drvinfo->bus_info)); 1725 } 1726 1727 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1728 struct ethtool_pauseparam *pause) 1729 { 1730 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1731 1732 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1733 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1734 } 1735 1736 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1737 struct ethtool_pauseparam *pause) 1738 { 1739 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1740 1741 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1742 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1743 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1744 1745 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1746 pfcc_pl); 1747 } 1748 1749 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1750 struct ethtool_pauseparam *pause) 1751 { 1752 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1753 bool pause_en = pause->tx_pause || pause->rx_pause; 1754 int err; 1755 1756 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1757 netdev_err(dev, "PFC already enabled on port\n"); 1758 return -EINVAL; 1759 } 1760 1761 if (pause->autoneg) { 1762 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1763 return -EINVAL; 1764 } 1765 1766 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1767 if (err) { 1768 netdev_err(dev, "Failed to configure port's headroom\n"); 1769 return err; 1770 } 1771 1772 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1773 if (err) { 1774 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1775 goto err_port_pause_configure; 1776 } 1777 1778 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1779 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1780 1781 return 0; 1782 1783 err_port_pause_configure: 1784 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1785 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1786 return err; 1787 } 1788 1789 struct mlxsw_sp_port_hw_stats { 1790 char str[ETH_GSTRING_LEN]; 1791 u64 (*getter)(const char *payload); 1792 bool cells_bytes; 1793 }; 1794 1795 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1796 { 1797 .str = "a_frames_transmitted_ok", 1798 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1799 }, 1800 { 1801 .str = "a_frames_received_ok", 1802 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1803 }, 1804 { 1805 .str = "a_frame_check_sequence_errors", 1806 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1807 }, 1808 { 1809 .str = "a_alignment_errors", 1810 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1811 }, 1812 { 1813 .str = "a_octets_transmitted_ok", 1814 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1815 }, 1816 { 1817 .str = "a_octets_received_ok", 1818 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1819 }, 1820 { 1821 .str = "a_multicast_frames_xmitted_ok", 1822 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1823 }, 1824 { 1825 .str = "a_broadcast_frames_xmitted_ok", 1826 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1827 }, 1828 { 1829 .str = "a_multicast_frames_received_ok", 1830 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1831 }, 1832 { 1833 .str = "a_broadcast_frames_received_ok", 1834 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1835 }, 1836 { 1837 .str = "a_in_range_length_errors", 1838 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1839 }, 1840 { 1841 .str = "a_out_of_range_length_field", 1842 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1843 }, 1844 { 1845 .str = "a_frame_too_long_errors", 1846 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1847 }, 1848 { 1849 .str = "a_symbol_error_during_carrier", 1850 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1851 }, 1852 { 1853 .str = "a_mac_control_frames_transmitted", 1854 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1855 }, 1856 { 1857 .str = "a_mac_control_frames_received", 1858 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1859 }, 1860 { 1861 .str = "a_unsupported_opcodes_received", 1862 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1863 }, 1864 { 1865 .str = "a_pause_mac_ctrl_frames_received", 1866 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1867 }, 1868 { 1869 .str = "a_pause_mac_ctrl_frames_xmitted", 1870 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1871 }, 1872 }; 1873 1874 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1875 1876 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { 1877 { 1878 .str = "ether_pkts64octets", 1879 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, 1880 }, 1881 { 1882 .str = "ether_pkts65to127octets", 1883 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, 1884 }, 1885 { 1886 .str = "ether_pkts128to255octets", 1887 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, 1888 }, 1889 { 1890 .str = "ether_pkts256to511octets", 1891 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, 1892 }, 1893 { 1894 .str = "ether_pkts512to1023octets", 1895 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, 1896 }, 1897 { 1898 .str = "ether_pkts1024to1518octets", 1899 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, 1900 }, 1901 { 1902 .str = "ether_pkts1519to2047octets", 1903 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, 1904 }, 1905 { 1906 .str = "ether_pkts2048to4095octets", 1907 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, 1908 }, 1909 { 1910 .str = "ether_pkts4096to8191octets", 1911 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, 1912 }, 1913 { 1914 .str = "ether_pkts8192to10239octets", 1915 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, 1916 }, 1917 }; 1918 1919 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ 1920 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) 1921 1922 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1923 { 1924 .str = "rx_octets_prio", 1925 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1926 }, 1927 { 1928 .str = "rx_frames_prio", 1929 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1930 }, 1931 { 1932 .str = "tx_octets_prio", 1933 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1934 }, 1935 { 1936 .str = "tx_frames_prio", 1937 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1938 }, 1939 { 1940 .str = "rx_pause_prio", 1941 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1942 }, 1943 { 1944 .str = "rx_pause_duration_prio", 1945 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1946 }, 1947 { 1948 .str = "tx_pause_prio", 1949 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1950 }, 1951 { 1952 .str = "tx_pause_duration_prio", 1953 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1954 }, 1955 }; 1956 1957 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1958 1959 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1960 { 1961 .str = "tc_transmit_queue_tc", 1962 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1963 .cells_bytes = true, 1964 }, 1965 { 1966 .str = "tc_no_buffer_discard_uc_tc", 1967 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1968 }, 1969 }; 1970 1971 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1972 1973 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1974 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ 1975 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ 1976 IEEE_8021QAZ_MAX_TCS) + \ 1977 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ 1978 TC_MAX_QUEUE)) 1979 1980 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1981 { 1982 int i; 1983 1984 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1985 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1986 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1987 *p += ETH_GSTRING_LEN; 1988 } 1989 } 1990 1991 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1992 { 1993 int i; 1994 1995 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1996 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1997 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1998 *p += ETH_GSTRING_LEN; 1999 } 2000 } 2001 2002 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2003 u32 stringset, u8 *data) 2004 { 2005 u8 *p = data; 2006 int i; 2007 2008 switch (stringset) { 2009 case ETH_SS_STATS: 2010 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2011 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2012 ETH_GSTRING_LEN); 2013 p += ETH_GSTRING_LEN; 2014 } 2015 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { 2016 memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, 2017 ETH_GSTRING_LEN); 2018 p += ETH_GSTRING_LEN; 2019 } 2020 2021 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2022 mlxsw_sp_port_get_prio_strings(&p, i); 2023 2024 for (i = 0; i < TC_MAX_QUEUE; i++) 2025 mlxsw_sp_port_get_tc_strings(&p, i); 2026 2027 break; 2028 } 2029 } 2030 2031 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2032 enum ethtool_phys_id_state state) 2033 { 2034 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2035 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2036 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2037 bool active; 2038 2039 switch (state) { 2040 case ETHTOOL_ID_ACTIVE: 2041 active = true; 2042 break; 2043 case ETHTOOL_ID_INACTIVE: 2044 active = false; 2045 break; 2046 default: 2047 return -EOPNOTSUPP; 2048 } 2049 2050 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2051 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2052 } 2053 2054 static int 2055 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2056 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2057 { 2058 switch (grp) { 2059 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2060 *p_hw_stats = mlxsw_sp_port_hw_stats; 2061 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2062 break; 2063 case MLXSW_REG_PPCNT_RFC_2819_CNT: 2064 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; 2065 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2066 break; 2067 case MLXSW_REG_PPCNT_PRIO_CNT: 2068 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2069 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2070 break; 2071 case MLXSW_REG_PPCNT_TC_CNT: 2072 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2073 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2074 break; 2075 default: 2076 WARN_ON(1); 2077 return -EOPNOTSUPP; 2078 } 2079 return 0; 2080 } 2081 2082 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2083 enum mlxsw_reg_ppcnt_grp grp, int prio, 2084 u64 *data, int data_index) 2085 { 2086 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2087 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2088 struct mlxsw_sp_port_hw_stats *hw_stats; 2089 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2090 int i, len; 2091 int err; 2092 2093 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2094 if (err) 2095 return; 2096 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2097 for (i = 0; i < len; i++) { 2098 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2099 if (!hw_stats[i].cells_bytes) 2100 continue; 2101 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2102 data[data_index + i]); 2103 } 2104 } 2105 2106 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2107 struct ethtool_stats *stats, u64 *data) 2108 { 2109 int i, data_index = 0; 2110 2111 /* IEEE 802.3 Counters */ 2112 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2113 data, data_index); 2114 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2115 2116 /* RFC 2819 Counters */ 2117 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, 2118 data, data_index); 2119 data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; 2120 2121 /* Per-Priority Counters */ 2122 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2123 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2124 data, data_index); 2125 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2126 } 2127 2128 /* Per-TC Counters */ 2129 for (i = 0; i < TC_MAX_QUEUE; i++) { 2130 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2131 data, data_index); 2132 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2133 } 2134 } 2135 2136 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2137 { 2138 switch (sset) { 2139 case ETH_SS_STATS: 2140 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2141 default: 2142 return -EOPNOTSUPP; 2143 } 2144 } 2145 2146 struct mlxsw_sp_port_link_mode { 2147 enum ethtool_link_mode_bit_indices mask_ethtool; 2148 u32 mask; 2149 u32 speed; 2150 }; 2151 2152 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2153 { 2154 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2155 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2156 .speed = SPEED_100, 2157 }, 2158 { 2159 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2160 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2161 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2162 .speed = SPEED_1000, 2163 }, 2164 { 2165 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2166 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2167 .speed = SPEED_10000, 2168 }, 2169 { 2170 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2171 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2172 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2173 .speed = SPEED_10000, 2174 }, 2175 { 2176 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2177 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2178 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2179 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2180 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2181 .speed = SPEED_10000, 2182 }, 2183 { 2184 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2185 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2186 .speed = SPEED_20000, 2187 }, 2188 { 2189 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2190 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2191 .speed = SPEED_40000, 2192 }, 2193 { 2194 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2195 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2196 .speed = SPEED_40000, 2197 }, 2198 { 2199 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2200 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2201 .speed = SPEED_40000, 2202 }, 2203 { 2204 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2205 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2206 .speed = SPEED_40000, 2207 }, 2208 { 2209 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2210 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2211 .speed = SPEED_25000, 2212 }, 2213 { 2214 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2215 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2216 .speed = SPEED_25000, 2217 }, 2218 { 2219 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2220 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2221 .speed = SPEED_25000, 2222 }, 2223 { 2224 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2225 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2226 .speed = SPEED_25000, 2227 }, 2228 { 2229 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2230 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2231 .speed = SPEED_50000, 2232 }, 2233 { 2234 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2235 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2236 .speed = SPEED_50000, 2237 }, 2238 { 2239 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2240 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2241 .speed = SPEED_50000, 2242 }, 2243 { 2244 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2245 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2246 .speed = SPEED_56000, 2247 }, 2248 { 2249 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2250 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2251 .speed = SPEED_56000, 2252 }, 2253 { 2254 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2255 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2256 .speed = SPEED_56000, 2257 }, 2258 { 2259 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2260 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2261 .speed = SPEED_56000, 2262 }, 2263 { 2264 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2265 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2266 .speed = SPEED_100000, 2267 }, 2268 { 2269 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2270 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2271 .speed = SPEED_100000, 2272 }, 2273 { 2274 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2275 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2276 .speed = SPEED_100000, 2277 }, 2278 { 2279 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2280 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2281 .speed = SPEED_100000, 2282 }, 2283 }; 2284 2285 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2286 2287 static void 2288 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2289 struct ethtool_link_ksettings *cmd) 2290 { 2291 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2292 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2293 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2294 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2295 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2296 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2297 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2298 2299 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2300 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2301 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2302 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2303 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2304 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2305 } 2306 2307 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2308 { 2309 int i; 2310 2311 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2312 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2313 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2314 mode); 2315 } 2316 } 2317 2318 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2319 struct ethtool_link_ksettings *cmd) 2320 { 2321 u32 speed = SPEED_UNKNOWN; 2322 u8 duplex = DUPLEX_UNKNOWN; 2323 int i; 2324 2325 if (!carrier_ok) 2326 goto out; 2327 2328 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2329 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2330 speed = mlxsw_sp_port_link_mode[i].speed; 2331 duplex = DUPLEX_FULL; 2332 break; 2333 } 2334 } 2335 out: 2336 cmd->base.speed = speed; 2337 cmd->base.duplex = duplex; 2338 } 2339 2340 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2341 { 2342 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2343 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2344 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2345 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2346 return PORT_FIBRE; 2347 2348 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2349 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2350 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2351 return PORT_DA; 2352 2353 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2354 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2355 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2356 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2357 return PORT_NONE; 2358 2359 return PORT_OTHER; 2360 } 2361 2362 static u32 2363 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2364 { 2365 u32 ptys_proto = 0; 2366 int i; 2367 2368 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2369 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2370 cmd->link_modes.advertising)) 2371 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2372 } 2373 return ptys_proto; 2374 } 2375 2376 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2377 { 2378 u32 ptys_proto = 0; 2379 int i; 2380 2381 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2382 if (speed == mlxsw_sp_port_link_mode[i].speed) 2383 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2384 } 2385 return ptys_proto; 2386 } 2387 2388 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2389 { 2390 u32 ptys_proto = 0; 2391 int i; 2392 2393 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2394 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2395 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2396 } 2397 return ptys_proto; 2398 } 2399 2400 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2401 struct ethtool_link_ksettings *cmd) 2402 { 2403 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2404 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2405 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2406 2407 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2408 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2409 } 2410 2411 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2412 struct ethtool_link_ksettings *cmd) 2413 { 2414 if (!autoneg) 2415 return; 2416 2417 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2418 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2419 } 2420 2421 static void 2422 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2423 struct ethtool_link_ksettings *cmd) 2424 { 2425 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2426 return; 2427 2428 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2429 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2430 } 2431 2432 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2433 struct ethtool_link_ksettings *cmd) 2434 { 2435 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2436 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2438 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2439 u8 autoneg_status; 2440 bool autoneg; 2441 int err; 2442 2443 autoneg = mlxsw_sp_port->link.autoneg; 2444 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2445 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2446 if (err) 2447 return err; 2448 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2449 ð_proto_oper); 2450 2451 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2452 2453 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2454 2455 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2456 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2457 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2458 2459 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2460 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2461 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2462 cmd); 2463 2464 return 0; 2465 } 2466 2467 static int 2468 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2469 const struct ethtool_link_ksettings *cmd) 2470 { 2471 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2472 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2473 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2474 u32 eth_proto_cap, eth_proto_new; 2475 bool autoneg; 2476 int err; 2477 2478 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); 2479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2480 if (err) 2481 return err; 2482 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2483 2484 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2485 eth_proto_new = autoneg ? 2486 mlxsw_sp_to_ptys_advert_link(cmd) : 2487 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2488 2489 eth_proto_new = eth_proto_new & eth_proto_cap; 2490 if (!eth_proto_new) { 2491 netdev_err(dev, "No supported speed requested\n"); 2492 return -EINVAL; 2493 } 2494 2495 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2496 eth_proto_new, autoneg); 2497 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2498 if (err) 2499 return err; 2500 2501 if (!netif_running(dev)) 2502 return 0; 2503 2504 mlxsw_sp_port->link.autoneg = autoneg; 2505 2506 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2507 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2508 2509 return 0; 2510 } 2511 2512 static int mlxsw_sp_flash_device(struct net_device *dev, 2513 struct ethtool_flash *flash) 2514 { 2515 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2517 const struct firmware *firmware; 2518 int err; 2519 2520 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2521 return -EOPNOTSUPP; 2522 2523 dev_hold(dev); 2524 rtnl_unlock(); 2525 2526 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2527 if (err) 2528 goto out; 2529 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2530 release_firmware(firmware); 2531 out: 2532 rtnl_lock(); 2533 dev_put(dev); 2534 return err; 2535 } 2536 2537 #define MLXSW_SP_I2C_ADDR_LOW 0x50 2538 #define MLXSW_SP_I2C_ADDR_HIGH 0x51 2539 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256 2540 2541 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2542 u16 offset, u16 size, void *data, 2543 unsigned int *p_read_size) 2544 { 2545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2546 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2547 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2548 u16 i2c_addr; 2549 int status; 2550 int err; 2551 2552 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2553 2554 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && 2555 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) 2556 /* Cross pages read, read until offset 256 in low page */ 2557 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; 2558 2559 i2c_addr = MLXSW_SP_I2C_ADDR_LOW; 2560 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { 2561 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; 2562 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; 2563 } 2564 2565 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2566 0, 0, offset, size, i2c_addr); 2567 2568 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2569 if (err) 2570 return err; 2571 2572 status = mlxsw_reg_mcia_status_get(mcia_pl); 2573 if (status) 2574 return -EIO; 2575 2576 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2577 memcpy(data, eeprom_tmp, size); 2578 *p_read_size = size; 2579 2580 return 0; 2581 } 2582 2583 enum mlxsw_sp_eeprom_module_info_rev_id { 2584 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2585 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2586 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2587 }; 2588 2589 enum mlxsw_sp_eeprom_module_info_id { 2590 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2591 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2592 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2593 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2594 }; 2595 2596 enum mlxsw_sp_eeprom_module_info { 2597 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2598 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2599 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2600 }; 2601 2602 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2603 struct ethtool_modinfo *modinfo) 2604 { 2605 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2606 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2607 u8 module_rev_id, module_id; 2608 unsigned int read_size; 2609 int err; 2610 2611 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2612 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2613 module_info, &read_size); 2614 if (err) 2615 return err; 2616 2617 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2618 return -EIO; 2619 2620 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2621 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2622 2623 switch (module_id) { 2624 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2625 modinfo->type = ETH_MODULE_SFF_8436; 2626 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2627 break; 2628 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2629 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2630 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2631 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2632 modinfo->type = ETH_MODULE_SFF_8636; 2633 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2634 } else { 2635 modinfo->type = ETH_MODULE_SFF_8436; 2636 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2637 } 2638 break; 2639 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2640 modinfo->type = ETH_MODULE_SFF_8472; 2641 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2642 break; 2643 default: 2644 return -EINVAL; 2645 } 2646 2647 return 0; 2648 } 2649 2650 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2651 struct ethtool_eeprom *ee, 2652 u8 *data) 2653 { 2654 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2655 int offset = ee->offset; 2656 unsigned int read_size; 2657 int i = 0; 2658 int err; 2659 2660 if (!ee->len) 2661 return -EINVAL; 2662 2663 memset(data, 0, ee->len); 2664 2665 while (i < ee->len) { 2666 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2667 ee->len - i, data + i, 2668 &read_size); 2669 if (err) { 2670 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2671 return err; 2672 } 2673 2674 i += read_size; 2675 offset += read_size; 2676 } 2677 2678 return 0; 2679 } 2680 2681 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2682 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2683 .get_link = ethtool_op_get_link, 2684 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2685 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2686 .get_strings = mlxsw_sp_port_get_strings, 2687 .set_phys_id = mlxsw_sp_port_set_phys_id, 2688 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2689 .get_sset_count = mlxsw_sp_port_get_sset_count, 2690 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2691 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2692 .flash_device = mlxsw_sp_flash_device, 2693 .get_module_info = mlxsw_sp_get_module_info, 2694 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2695 }; 2696 2697 static int 2698 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2699 { 2700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2701 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2702 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2703 u32 eth_proto_admin; 2704 2705 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2706 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2707 eth_proto_admin, mlxsw_sp_port->link.autoneg); 2708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2709 } 2710 2711 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2712 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2713 bool dwrr, u8 dwrr_weight) 2714 { 2715 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2716 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2717 2718 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2719 next_index); 2720 mlxsw_reg_qeec_de_set(qeec_pl, true); 2721 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2722 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2723 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2724 } 2725 2726 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2727 enum mlxsw_reg_qeec_hr hr, u8 index, 2728 u8 next_index, u32 maxrate) 2729 { 2730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2731 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2732 2733 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2734 next_index); 2735 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2736 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2738 } 2739 2740 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2741 u8 switch_prio, u8 tclass) 2742 { 2743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2744 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2745 2746 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2747 tclass); 2748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2749 } 2750 2751 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2752 { 2753 int err, i; 2754 2755 /* Setup the elements hierarcy, so that each TC is linked to 2756 * one subgroup, which are all member in the same group. 2757 */ 2758 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2759 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2760 0); 2761 if (err) 2762 return err; 2763 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2764 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2765 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2766 0, false, 0); 2767 if (err) 2768 return err; 2769 } 2770 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2771 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2772 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2773 false, 0); 2774 if (err) 2775 return err; 2776 2777 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2778 MLXSW_REG_QEEC_HIERARCY_TC, 2779 i + 8, i, 2780 false, 0); 2781 if (err) 2782 return err; 2783 } 2784 2785 /* Make sure the max shaper is disabled in all hierarchies that 2786 * support it. 2787 */ 2788 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2789 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2790 MLXSW_REG_QEEC_MAS_DIS); 2791 if (err) 2792 return err; 2793 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2794 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2795 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2796 i, 0, 2797 MLXSW_REG_QEEC_MAS_DIS); 2798 if (err) 2799 return err; 2800 } 2801 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2802 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2803 MLXSW_REG_QEEC_HIERARCY_TC, 2804 i, i, 2805 MLXSW_REG_QEEC_MAS_DIS); 2806 if (err) 2807 return err; 2808 } 2809 2810 /* Map all priorities to traffic class 0. */ 2811 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2812 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2813 if (err) 2814 return err; 2815 } 2816 2817 return 0; 2818 } 2819 2820 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 2821 bool enable) 2822 { 2823 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2824 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 2825 2826 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 2827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 2828 } 2829 2830 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2831 bool split, u8 module, u8 width, u8 lane) 2832 { 2833 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2834 struct mlxsw_sp_port *mlxsw_sp_port; 2835 struct net_device *dev; 2836 int err; 2837 2838 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2839 if (err) { 2840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2841 local_port); 2842 return err; 2843 } 2844 2845 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2846 if (!dev) { 2847 err = -ENOMEM; 2848 goto err_alloc_etherdev; 2849 } 2850 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2851 mlxsw_sp_port = netdev_priv(dev); 2852 mlxsw_sp_port->dev = dev; 2853 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2854 mlxsw_sp_port->local_port = local_port; 2855 mlxsw_sp_port->pvid = 1; 2856 mlxsw_sp_port->split = split; 2857 mlxsw_sp_port->mapping.module = module; 2858 mlxsw_sp_port->mapping.width = width; 2859 mlxsw_sp_port->mapping.lane = lane; 2860 mlxsw_sp_port->link.autoneg = 1; 2861 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2862 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2863 2864 mlxsw_sp_port->pcpu_stats = 2865 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2866 if (!mlxsw_sp_port->pcpu_stats) { 2867 err = -ENOMEM; 2868 goto err_alloc_stats; 2869 } 2870 2871 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2872 GFP_KERNEL); 2873 if (!mlxsw_sp_port->sample) { 2874 err = -ENOMEM; 2875 goto err_alloc_sample; 2876 } 2877 2878 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 2879 &update_stats_cache); 2880 2881 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2882 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2883 2884 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2885 if (err) { 2886 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2887 mlxsw_sp_port->local_port); 2888 goto err_port_module_map; 2889 } 2890 2891 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2892 if (err) { 2893 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2894 mlxsw_sp_port->local_port); 2895 goto err_port_swid_set; 2896 } 2897 2898 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2899 if (err) { 2900 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2901 mlxsw_sp_port->local_port); 2902 goto err_dev_addr_init; 2903 } 2904 2905 netif_carrier_off(dev); 2906 2907 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2908 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2909 dev->hw_features |= NETIF_F_HW_TC; 2910 2911 dev->min_mtu = 0; 2912 dev->max_mtu = ETH_MAX_MTU; 2913 2914 /* Each packet needs to have a Tx header (metadata) on top all other 2915 * headers. 2916 */ 2917 dev->needed_headroom = MLXSW_TXHDR_LEN; 2918 2919 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2920 if (err) { 2921 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2922 mlxsw_sp_port->local_port); 2923 goto err_port_system_port_mapping_set; 2924 } 2925 2926 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2927 if (err) { 2928 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2929 mlxsw_sp_port->local_port); 2930 goto err_port_speed_by_width_set; 2931 } 2932 2933 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2934 if (err) { 2935 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2936 mlxsw_sp_port->local_port); 2937 goto err_port_mtu_set; 2938 } 2939 2940 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2941 if (err) 2942 goto err_port_admin_status_set; 2943 2944 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2945 if (err) { 2946 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2947 mlxsw_sp_port->local_port); 2948 goto err_port_buffers_init; 2949 } 2950 2951 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2952 if (err) { 2953 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2954 mlxsw_sp_port->local_port); 2955 goto err_port_ets_init; 2956 } 2957 2958 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 2959 if (err) { 2960 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 2961 mlxsw_sp_port->local_port); 2962 goto err_port_tc_mc_mode; 2963 } 2964 2965 /* ETS and buffers must be initialized before DCB. */ 2966 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2967 if (err) { 2968 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2969 mlxsw_sp_port->local_port); 2970 goto err_port_dcb_init; 2971 } 2972 2973 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2974 if (err) { 2975 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2976 mlxsw_sp_port->local_port); 2977 goto err_port_fids_init; 2978 } 2979 2980 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 2981 if (err) { 2982 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 2983 mlxsw_sp_port->local_port); 2984 goto err_port_qdiscs_init; 2985 } 2986 2987 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 2988 if (IS_ERR(mlxsw_sp_port_vlan)) { 2989 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 2990 mlxsw_sp_port->local_port); 2991 err = PTR_ERR(mlxsw_sp_port_vlan); 2992 goto err_port_vlan_get; 2993 } 2994 2995 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2996 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2997 err = register_netdev(dev); 2998 if (err) { 2999 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 3000 mlxsw_sp_port->local_port); 3001 goto err_register_netdev; 3002 } 3003 3004 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 3005 mlxsw_sp_port, dev, module + 1, 3006 mlxsw_sp_port->split, lane / width); 3007 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 3008 return 0; 3009 3010 err_register_netdev: 3011 mlxsw_sp->ports[local_port] = NULL; 3012 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3013 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 3014 err_port_vlan_get: 3015 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3016 err_port_qdiscs_init: 3017 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3018 err_port_fids_init: 3019 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3020 err_port_dcb_init: 3021 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3022 err_port_tc_mc_mode: 3023 err_port_ets_init: 3024 err_port_buffers_init: 3025 err_port_admin_status_set: 3026 err_port_mtu_set: 3027 err_port_speed_by_width_set: 3028 err_port_system_port_mapping_set: 3029 err_dev_addr_init: 3030 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3031 err_port_swid_set: 3032 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3033 err_port_module_map: 3034 kfree(mlxsw_sp_port->sample); 3035 err_alloc_sample: 3036 free_percpu(mlxsw_sp_port->pcpu_stats); 3037 err_alloc_stats: 3038 free_netdev(dev); 3039 err_alloc_etherdev: 3040 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3041 return err; 3042 } 3043 3044 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3045 { 3046 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3047 3048 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 3049 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 3050 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 3051 mlxsw_sp->ports[local_port] = NULL; 3052 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 3053 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 3054 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 3055 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 3056 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 3057 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 3058 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3059 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3060 kfree(mlxsw_sp_port->sample); 3061 free_percpu(mlxsw_sp_port->pcpu_stats); 3062 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3063 free_netdev(mlxsw_sp_port->dev); 3064 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3065 } 3066 3067 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3068 { 3069 return mlxsw_sp->ports[local_port] != NULL; 3070 } 3071 3072 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3073 { 3074 int i; 3075 3076 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3077 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3078 mlxsw_sp_port_remove(mlxsw_sp, i); 3079 kfree(mlxsw_sp->port_to_module); 3080 kfree(mlxsw_sp->ports); 3081 } 3082 3083 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3084 { 3085 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3086 u8 module, width, lane; 3087 size_t alloc_size; 3088 int i; 3089 int err; 3090 3091 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3092 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3093 if (!mlxsw_sp->ports) 3094 return -ENOMEM; 3095 3096 mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int), 3097 GFP_KERNEL); 3098 if (!mlxsw_sp->port_to_module) { 3099 err = -ENOMEM; 3100 goto err_port_to_module_alloc; 3101 } 3102 3103 for (i = 1; i < max_ports; i++) { 3104 /* Mark as invalid */ 3105 mlxsw_sp->port_to_module[i] = -1; 3106 3107 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3108 &width, &lane); 3109 if (err) 3110 goto err_port_module_info_get; 3111 if (!width) 3112 continue; 3113 mlxsw_sp->port_to_module[i] = module; 3114 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3115 module, width, lane); 3116 if (err) 3117 goto err_port_create; 3118 } 3119 return 0; 3120 3121 err_port_create: 3122 err_port_module_info_get: 3123 for (i--; i >= 1; i--) 3124 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3125 mlxsw_sp_port_remove(mlxsw_sp, i); 3126 kfree(mlxsw_sp->port_to_module); 3127 err_port_to_module_alloc: 3128 kfree(mlxsw_sp->ports); 3129 return err; 3130 } 3131 3132 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3133 { 3134 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3135 3136 return local_port - offset; 3137 } 3138 3139 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3140 u8 module, unsigned int count) 3141 { 3142 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3143 int err, i; 3144 3145 for (i = 0; i < count; i++) { 3146 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3147 module, width, i * width); 3148 if (err) 3149 goto err_port_create; 3150 } 3151 3152 return 0; 3153 3154 err_port_create: 3155 for (i--; i >= 0; i--) 3156 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3157 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3158 return err; 3159 } 3160 3161 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3162 u8 base_port, unsigned int count) 3163 { 3164 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3165 int i; 3166 3167 /* Split by four means we need to re-create two ports, otherwise 3168 * only one. 3169 */ 3170 count = count / 2; 3171 3172 for (i = 0; i < count; i++) { 3173 local_port = base_port + i * 2; 3174 if (mlxsw_sp->port_to_module[local_port] < 0) 3175 continue; 3176 module = mlxsw_sp->port_to_module[local_port]; 3177 3178 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3179 width, 0); 3180 } 3181 } 3182 3183 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3184 unsigned int count, 3185 struct netlink_ext_ack *extack) 3186 { 3187 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3188 struct mlxsw_sp_port *mlxsw_sp_port; 3189 u8 module, cur_width, base_port; 3190 int i; 3191 int err; 3192 3193 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3194 if (!mlxsw_sp_port) { 3195 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3196 local_port); 3197 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3198 return -EINVAL; 3199 } 3200 3201 module = mlxsw_sp_port->mapping.module; 3202 cur_width = mlxsw_sp_port->mapping.width; 3203 3204 if (count != 2 && count != 4) { 3205 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3206 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); 3207 return -EINVAL; 3208 } 3209 3210 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3211 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3212 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); 3213 return -EINVAL; 3214 } 3215 3216 /* Make sure we have enough slave (even) ports for the split. */ 3217 if (count == 2) { 3218 base_port = local_port; 3219 if (mlxsw_sp->ports[base_port + 1]) { 3220 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3221 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3222 return -EINVAL; 3223 } 3224 } else { 3225 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3226 if (mlxsw_sp->ports[base_port + 1] || 3227 mlxsw_sp->ports[base_port + 3]) { 3228 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3229 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 3230 return -EINVAL; 3231 } 3232 } 3233 3234 for (i = 0; i < count; i++) 3235 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3236 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3237 3238 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3239 if (err) { 3240 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3241 goto err_port_split_create; 3242 } 3243 3244 return 0; 3245 3246 err_port_split_create: 3247 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3248 return err; 3249 } 3250 3251 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 3252 struct netlink_ext_ack *extack) 3253 { 3254 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3255 struct mlxsw_sp_port *mlxsw_sp_port; 3256 u8 cur_width, base_port; 3257 unsigned int count; 3258 int i; 3259 3260 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3261 if (!mlxsw_sp_port) { 3262 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3263 local_port); 3264 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 3265 return -EINVAL; 3266 } 3267 3268 if (!mlxsw_sp_port->split) { 3269 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 3270 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 3271 return -EINVAL; 3272 } 3273 3274 cur_width = mlxsw_sp_port->mapping.width; 3275 count = cur_width == 1 ? 4 : 2; 3276 3277 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3278 3279 /* Determine which ports to remove. */ 3280 if (count == 2 && local_port >= base_port + 2) 3281 base_port = base_port + 2; 3282 3283 for (i = 0; i < count; i++) 3284 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3285 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3286 3287 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3288 3289 return 0; 3290 } 3291 3292 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3293 char *pude_pl, void *priv) 3294 { 3295 struct mlxsw_sp *mlxsw_sp = priv; 3296 struct mlxsw_sp_port *mlxsw_sp_port; 3297 enum mlxsw_reg_pude_oper_status status; 3298 u8 local_port; 3299 3300 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3301 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3302 if (!mlxsw_sp_port) 3303 return; 3304 3305 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3306 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3307 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3308 netif_carrier_on(mlxsw_sp_port->dev); 3309 } else { 3310 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3311 netif_carrier_off(mlxsw_sp_port->dev); 3312 } 3313 } 3314 3315 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3316 u8 local_port, void *priv) 3317 { 3318 struct mlxsw_sp *mlxsw_sp = priv; 3319 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3320 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3321 3322 if (unlikely(!mlxsw_sp_port)) { 3323 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3324 local_port); 3325 return; 3326 } 3327 3328 skb->dev = mlxsw_sp_port->dev; 3329 3330 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3331 u64_stats_update_begin(&pcpu_stats->syncp); 3332 pcpu_stats->rx_packets++; 3333 pcpu_stats->rx_bytes += skb->len; 3334 u64_stats_update_end(&pcpu_stats->syncp); 3335 3336 skb->protocol = eth_type_trans(skb, skb->dev); 3337 netif_receive_skb(skb); 3338 } 3339 3340 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3341 void *priv) 3342 { 3343 skb->offload_fwd_mark = 1; 3344 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3345 } 3346 3347 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, 3348 u8 local_port, void *priv) 3349 { 3350 skb->offload_mr_fwd_mark = 1; 3351 skb->offload_fwd_mark = 1; 3352 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3353 } 3354 3355 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3356 void *priv) 3357 { 3358 struct mlxsw_sp *mlxsw_sp = priv; 3359 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3360 struct psample_group *psample_group; 3361 u32 size; 3362 3363 if (unlikely(!mlxsw_sp_port)) { 3364 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3365 local_port); 3366 goto out; 3367 } 3368 if (unlikely(!mlxsw_sp_port->sample)) { 3369 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3370 local_port); 3371 goto out; 3372 } 3373 3374 size = mlxsw_sp_port->sample->truncate ? 3375 mlxsw_sp_port->sample->trunc_size : skb->len; 3376 3377 rcu_read_lock(); 3378 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3379 if (!psample_group) 3380 goto out_unlock; 3381 psample_sample_packet(psample_group, skb, size, 3382 mlxsw_sp_port->dev->ifindex, 0, 3383 mlxsw_sp_port->sample->rate); 3384 out_unlock: 3385 rcu_read_unlock(); 3386 out: 3387 consume_skb(skb); 3388 } 3389 3390 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3391 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3392 _is_ctrl, SP_##_trap_group, DISCARD) 3393 3394 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3395 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3396 _is_ctrl, SP_##_trap_group, DISCARD) 3397 3398 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3399 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ 3400 _is_ctrl, SP_##_trap_group, DISCARD) 3401 3402 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3403 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3404 3405 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3406 /* Events */ 3407 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3408 /* L2 traps */ 3409 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3410 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3411 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3412 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3413 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3414 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3415 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3416 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3417 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3418 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3419 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3420 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3421 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, 3422 false), 3423 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3424 false), 3425 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, 3426 false), 3427 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, 3428 false), 3429 /* L3 traps */ 3430 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3431 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3432 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3433 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3434 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 3435 false), 3436 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), 3437 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 3438 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), 3439 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, 3440 false), 3441 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), 3442 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), 3443 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), 3444 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3445 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), 3446 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), 3447 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3448 false), 3449 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3450 false), 3451 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, 3452 false), 3453 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, 3454 false), 3455 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), 3456 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 3457 false), 3458 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), 3459 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), 3460 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), 3461 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 3462 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3463 MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3464 MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 3465 /* PKT Sample trap */ 3466 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3467 false, SP_IP2ME, DISCARD), 3468 /* ACL trap */ 3469 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3470 /* Multicast Router Traps */ 3471 MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), 3472 MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), 3473 MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), 3474 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 3475 MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3476 }; 3477 3478 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3479 { 3480 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3481 enum mlxsw_reg_qpcr_ir_units ir_units; 3482 int max_cpu_policers; 3483 bool is_bytes; 3484 u8 burst_size; 3485 u32 rate; 3486 int i, err; 3487 3488 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3489 return -EIO; 3490 3491 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3492 3493 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3494 for (i = 0; i < max_cpu_policers; i++) { 3495 is_bytes = false; 3496 switch (i) { 3497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3498 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3499 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3500 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3501 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3502 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3503 rate = 128; 3504 burst_size = 7; 3505 break; 3506 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3507 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3508 rate = 16 * 1024; 3509 burst_size = 10; 3510 break; 3511 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3512 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3513 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3514 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3515 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3516 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3517 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3518 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3519 rate = 1024; 3520 burst_size = 7; 3521 break; 3522 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3523 is_bytes = true; 3524 rate = 4 * 1024; 3525 burst_size = 4; 3526 break; 3527 default: 3528 continue; 3529 } 3530 3531 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3532 burst_size); 3533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3534 if (err) 3535 return err; 3536 } 3537 3538 return 0; 3539 } 3540 3541 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3542 { 3543 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3544 enum mlxsw_reg_htgt_trap_group i; 3545 int max_cpu_policers; 3546 int max_trap_groups; 3547 u8 priority, tc; 3548 u16 policer_id; 3549 int err; 3550 3551 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3552 return -EIO; 3553 3554 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3555 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3556 3557 for (i = 0; i < max_trap_groups; i++) { 3558 policer_id = i; 3559 switch (i) { 3560 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3561 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3562 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3563 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3564 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 3565 priority = 5; 3566 tc = 5; 3567 break; 3568 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: 3569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3570 priority = 4; 3571 tc = 4; 3572 break; 3573 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3574 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3575 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: 3576 priority = 3; 3577 tc = 3; 3578 break; 3579 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: 3581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: 3582 priority = 2; 3583 tc = 2; 3584 break; 3585 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: 3586 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3587 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3588 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 3589 priority = 1; 3590 tc = 1; 3591 break; 3592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3593 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3594 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3595 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3596 break; 3597 default: 3598 continue; 3599 } 3600 3601 if (max_cpu_policers <= policer_id && 3602 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3603 return -EIO; 3604 3605 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3606 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3607 if (err) 3608 return err; 3609 } 3610 3611 return 0; 3612 } 3613 3614 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3615 { 3616 int i; 3617 int err; 3618 3619 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3620 if (err) 3621 return err; 3622 3623 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3624 if (err) 3625 return err; 3626 3627 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3628 err = mlxsw_core_trap_register(mlxsw_sp->core, 3629 &mlxsw_sp_listener[i], 3630 mlxsw_sp); 3631 if (err) 3632 goto err_listener_register; 3633 3634 } 3635 return 0; 3636 3637 err_listener_register: 3638 for (i--; i >= 0; i--) { 3639 mlxsw_core_trap_unregister(mlxsw_sp->core, 3640 &mlxsw_sp_listener[i], 3641 mlxsw_sp); 3642 } 3643 return err; 3644 } 3645 3646 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3647 { 3648 int i; 3649 3650 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3651 mlxsw_core_trap_unregister(mlxsw_sp->core, 3652 &mlxsw_sp_listener[i], 3653 mlxsw_sp); 3654 } 3655 } 3656 3657 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3658 { 3659 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3660 int err; 3661 3662 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3663 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3664 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3665 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3666 MLXSW_REG_SLCR_LAG_HASH_SIP | 3667 MLXSW_REG_SLCR_LAG_HASH_DIP | 3668 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3669 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3670 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3671 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3672 if (err) 3673 return err; 3674 3675 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3676 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3677 return -EIO; 3678 3679 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3680 sizeof(struct mlxsw_sp_upper), 3681 GFP_KERNEL); 3682 if (!mlxsw_sp->lags) 3683 return -ENOMEM; 3684 3685 return 0; 3686 } 3687 3688 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3689 { 3690 kfree(mlxsw_sp->lags); 3691 } 3692 3693 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3694 { 3695 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3696 3697 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3698 MLXSW_REG_HTGT_INVALID_POLICER, 3699 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3700 MLXSW_REG_HTGT_DEFAULT_TC); 3701 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3702 } 3703 3704 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3705 unsigned long event, void *ptr); 3706 3707 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3708 const struct mlxsw_bus_info *mlxsw_bus_info) 3709 { 3710 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3711 int err; 3712 3713 mlxsw_sp->core = mlxsw_core; 3714 mlxsw_sp->bus_info = mlxsw_bus_info; 3715 3716 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3717 if (err) 3718 return err; 3719 3720 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3721 if (err) { 3722 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3723 return err; 3724 } 3725 3726 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3727 if (err) { 3728 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3729 return err; 3730 } 3731 3732 err = mlxsw_sp_fids_init(mlxsw_sp); 3733 if (err) { 3734 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3735 goto err_fids_init; 3736 } 3737 3738 err = mlxsw_sp_traps_init(mlxsw_sp); 3739 if (err) { 3740 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3741 goto err_traps_init; 3742 } 3743 3744 err = mlxsw_sp_buffers_init(mlxsw_sp); 3745 if (err) { 3746 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3747 goto err_buffers_init; 3748 } 3749 3750 err = mlxsw_sp_lag_init(mlxsw_sp); 3751 if (err) { 3752 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3753 goto err_lag_init; 3754 } 3755 3756 /* Initialize SPAN before router and switchdev, so that those components 3757 * can call mlxsw_sp_span_respin(). 3758 */ 3759 err = mlxsw_sp_span_init(mlxsw_sp); 3760 if (err) { 3761 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3762 goto err_span_init; 3763 } 3764 3765 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3766 if (err) { 3767 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3768 goto err_switchdev_init; 3769 } 3770 3771 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3772 if (err) { 3773 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3774 goto err_counter_pool_init; 3775 } 3776 3777 err = mlxsw_sp_afa_init(mlxsw_sp); 3778 if (err) { 3779 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3780 goto err_afa_init; 3781 } 3782 3783 err = mlxsw_sp_router_init(mlxsw_sp); 3784 if (err) { 3785 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3786 goto err_router_init; 3787 } 3788 3789 /* Initialize netdevice notifier after router and SPAN is initialized, 3790 * so that the event handler can use router structures and call SPAN 3791 * respin. 3792 */ 3793 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3794 err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3795 if (err) { 3796 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3797 goto err_netdev_notifier; 3798 } 3799 3800 err = mlxsw_sp_acl_init(mlxsw_sp); 3801 if (err) { 3802 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3803 goto err_acl_init; 3804 } 3805 3806 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3807 if (err) { 3808 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3809 goto err_dpipe_init; 3810 } 3811 3812 err = mlxsw_sp_ports_create(mlxsw_sp); 3813 if (err) { 3814 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3815 goto err_ports_create; 3816 } 3817 3818 return 0; 3819 3820 err_ports_create: 3821 mlxsw_sp_dpipe_fini(mlxsw_sp); 3822 err_dpipe_init: 3823 mlxsw_sp_acl_fini(mlxsw_sp); 3824 err_acl_init: 3825 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3826 err_netdev_notifier: 3827 mlxsw_sp_router_fini(mlxsw_sp); 3828 err_router_init: 3829 mlxsw_sp_afa_fini(mlxsw_sp); 3830 err_afa_init: 3831 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3832 err_counter_pool_init: 3833 mlxsw_sp_switchdev_fini(mlxsw_sp); 3834 err_switchdev_init: 3835 mlxsw_sp_span_fini(mlxsw_sp); 3836 err_span_init: 3837 mlxsw_sp_lag_fini(mlxsw_sp); 3838 err_lag_init: 3839 mlxsw_sp_buffers_fini(mlxsw_sp); 3840 err_buffers_init: 3841 mlxsw_sp_traps_fini(mlxsw_sp); 3842 err_traps_init: 3843 mlxsw_sp_fids_fini(mlxsw_sp); 3844 err_fids_init: 3845 mlxsw_sp_kvdl_fini(mlxsw_sp); 3846 return err; 3847 } 3848 3849 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3850 const struct mlxsw_bus_info *mlxsw_bus_info) 3851 { 3852 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3853 3854 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; 3855 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; 3856 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3857 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3858 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3859 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3860 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3861 3862 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 3863 } 3864 3865 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3866 const struct mlxsw_bus_info *mlxsw_bus_info) 3867 { 3868 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3869 3870 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3871 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3872 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3873 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3874 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3875 3876 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); 3877 } 3878 3879 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3880 { 3881 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3882 3883 mlxsw_sp_ports_remove(mlxsw_sp); 3884 mlxsw_sp_dpipe_fini(mlxsw_sp); 3885 mlxsw_sp_acl_fini(mlxsw_sp); 3886 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3887 mlxsw_sp_router_fini(mlxsw_sp); 3888 mlxsw_sp_afa_fini(mlxsw_sp); 3889 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3890 mlxsw_sp_switchdev_fini(mlxsw_sp); 3891 mlxsw_sp_span_fini(mlxsw_sp); 3892 mlxsw_sp_lag_fini(mlxsw_sp); 3893 mlxsw_sp_buffers_fini(mlxsw_sp); 3894 mlxsw_sp_traps_fini(mlxsw_sp); 3895 mlxsw_sp_fids_fini(mlxsw_sp); 3896 mlxsw_sp_kvdl_fini(mlxsw_sp); 3897 } 3898 3899 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3900 .used_max_mid = 1, 3901 .max_mid = MLXSW_SP_MID_MAX, 3902 .used_flood_tables = 1, 3903 .used_flood_mode = 1, 3904 .flood_mode = 3, 3905 .max_fid_offset_flood_tables = 3, 3906 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3907 .max_fid_flood_tables = 3, 3908 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3909 .used_max_ib_mc = 1, 3910 .max_ib_mc = 0, 3911 .used_max_pkey = 1, 3912 .max_pkey = 0, 3913 .used_kvd_sizes = 1, 3914 .kvd_hash_single_parts = 59, 3915 .kvd_hash_double_parts = 41, 3916 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3917 .swid_config = { 3918 { 3919 .used_type = 1, 3920 .type = MLXSW_PORT_SWID_TYPE_ETH, 3921 } 3922 }, 3923 }; 3924 3925 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3926 .used_max_mid = 1, 3927 .max_mid = MLXSW_SP_MID_MAX, 3928 .used_flood_tables = 1, 3929 .used_flood_mode = 1, 3930 .flood_mode = 3, 3931 .max_fid_offset_flood_tables = 3, 3932 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3933 .max_fid_flood_tables = 3, 3934 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3935 .used_max_ib_mc = 1, 3936 .max_ib_mc = 0, 3937 .used_max_pkey = 1, 3938 .max_pkey = 0, 3939 .swid_config = { 3940 { 3941 .used_type = 1, 3942 .type = MLXSW_PORT_SWID_TYPE_ETH, 3943 } 3944 }, 3945 }; 3946 3947 static void 3948 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3949 struct devlink_resource_size_params *kvd_size_params, 3950 struct devlink_resource_size_params *linear_size_params, 3951 struct devlink_resource_size_params *hash_double_size_params, 3952 struct devlink_resource_size_params *hash_single_size_params) 3953 { 3954 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3955 KVD_SINGLE_MIN_SIZE); 3956 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3957 KVD_DOUBLE_MIN_SIZE); 3958 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3959 u32 linear_size_min = 0; 3960 3961 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3962 MLXSW_SP_KVD_GRANULARITY, 3963 DEVLINK_RESOURCE_UNIT_ENTRY); 3964 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3965 kvd_size - single_size_min - 3966 double_size_min, 3967 MLXSW_SP_KVD_GRANULARITY, 3968 DEVLINK_RESOURCE_UNIT_ENTRY); 3969 devlink_resource_size_params_init(hash_double_size_params, 3970 double_size_min, 3971 kvd_size - single_size_min - 3972 linear_size_min, 3973 MLXSW_SP_KVD_GRANULARITY, 3974 DEVLINK_RESOURCE_UNIT_ENTRY); 3975 devlink_resource_size_params_init(hash_single_size_params, 3976 single_size_min, 3977 kvd_size - double_size_min - 3978 linear_size_min, 3979 MLXSW_SP_KVD_GRANULARITY, 3980 DEVLINK_RESOURCE_UNIT_ENTRY); 3981 } 3982 3983 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3984 { 3985 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3986 struct devlink_resource_size_params hash_single_size_params; 3987 struct devlink_resource_size_params hash_double_size_params; 3988 struct devlink_resource_size_params linear_size_params; 3989 struct devlink_resource_size_params kvd_size_params; 3990 u32 kvd_size, single_size, double_size, linear_size; 3991 const struct mlxsw_config_profile *profile; 3992 int err; 3993 3994 profile = &mlxsw_sp1_config_profile; 3995 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3996 return -EIO; 3997 3998 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3999 &linear_size_params, 4000 &hash_double_size_params, 4001 &hash_single_size_params); 4002 4003 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4004 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4005 kvd_size, MLXSW_SP_RESOURCE_KVD, 4006 DEVLINK_RESOURCE_ID_PARENT_TOP, 4007 &kvd_size_params); 4008 if (err) 4009 return err; 4010 4011 linear_size = profile->kvd_linear_size; 4012 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 4013 linear_size, 4014 MLXSW_SP_RESOURCE_KVD_LINEAR, 4015 MLXSW_SP_RESOURCE_KVD, 4016 &linear_size_params); 4017 if (err) 4018 return err; 4019 4020 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 4021 if (err) 4022 return err; 4023 4024 double_size = kvd_size - linear_size; 4025 double_size *= profile->kvd_hash_double_parts; 4026 double_size /= profile->kvd_hash_double_parts + 4027 profile->kvd_hash_single_parts; 4028 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 4029 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 4030 double_size, 4031 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4032 MLXSW_SP_RESOURCE_KVD, 4033 &hash_double_size_params); 4034 if (err) 4035 return err; 4036 4037 single_size = kvd_size - double_size - linear_size; 4038 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 4039 single_size, 4040 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4041 MLXSW_SP_RESOURCE_KVD, 4042 &hash_single_size_params); 4043 if (err) 4044 return err; 4045 4046 return 0; 4047 } 4048 4049 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 4050 { 4051 return mlxsw_sp1_resources_kvd_register(mlxsw_core); 4052 } 4053 4054 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 4055 { 4056 return 0; 4057 } 4058 4059 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 4060 const struct mlxsw_config_profile *profile, 4061 u64 *p_single_size, u64 *p_double_size, 4062 u64 *p_linear_size) 4063 { 4064 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4065 u32 double_size; 4066 int err; 4067 4068 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4069 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 4070 return -EIO; 4071 4072 /* The hash part is what left of the kvd without the 4073 * linear part. It is split to the single size and 4074 * double size by the parts ratio from the profile. 4075 * Both sizes must be a multiplications of the 4076 * granularity from the profile. In case the user 4077 * provided the sizes they are obtained via devlink. 4078 */ 4079 err = devlink_resource_size_get(devlink, 4080 MLXSW_SP_RESOURCE_KVD_LINEAR, 4081 p_linear_size); 4082 if (err) 4083 *p_linear_size = profile->kvd_linear_size; 4084 4085 err = devlink_resource_size_get(devlink, 4086 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4087 p_double_size); 4088 if (err) { 4089 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4090 *p_linear_size; 4091 double_size *= profile->kvd_hash_double_parts; 4092 double_size /= profile->kvd_hash_double_parts + 4093 profile->kvd_hash_single_parts; 4094 *p_double_size = rounddown(double_size, 4095 MLXSW_SP_KVD_GRANULARITY); 4096 } 4097 4098 err = devlink_resource_size_get(devlink, 4099 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4100 p_single_size); 4101 if (err) 4102 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 4103 *p_double_size - *p_linear_size; 4104 4105 /* Check results are legal. */ 4106 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 4107 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 4108 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 4109 return -EIO; 4110 4111 return 0; 4112 } 4113 4114 static struct mlxsw_driver mlxsw_sp1_driver = { 4115 .kind = mlxsw_sp1_driver_name, 4116 .priv_size = sizeof(struct mlxsw_sp), 4117 .init = mlxsw_sp1_init, 4118 .fini = mlxsw_sp_fini, 4119 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4120 .port_split = mlxsw_sp_port_split, 4121 .port_unsplit = mlxsw_sp_port_unsplit, 4122 .sb_pool_get = mlxsw_sp_sb_pool_get, 4123 .sb_pool_set = mlxsw_sp_sb_pool_set, 4124 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4125 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4126 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4127 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4128 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4129 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4130 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4131 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4132 .txhdr_construct = mlxsw_sp_txhdr_construct, 4133 .resources_register = mlxsw_sp1_resources_register, 4134 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 4135 .txhdr_len = MLXSW_TXHDR_LEN, 4136 .profile = &mlxsw_sp1_config_profile, 4137 .res_query_enabled = true, 4138 }; 4139 4140 static struct mlxsw_driver mlxsw_sp2_driver = { 4141 .kind = mlxsw_sp2_driver_name, 4142 .priv_size = sizeof(struct mlxsw_sp), 4143 .init = mlxsw_sp2_init, 4144 .fini = mlxsw_sp_fini, 4145 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 4146 .port_split = mlxsw_sp_port_split, 4147 .port_unsplit = mlxsw_sp_port_unsplit, 4148 .sb_pool_get = mlxsw_sp_sb_pool_get, 4149 .sb_pool_set = mlxsw_sp_sb_pool_set, 4150 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4151 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4152 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4153 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4154 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4155 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4156 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4157 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4158 .txhdr_construct = mlxsw_sp_txhdr_construct, 4159 .resources_register = mlxsw_sp2_resources_register, 4160 .txhdr_len = MLXSW_TXHDR_LEN, 4161 .profile = &mlxsw_sp2_config_profile, 4162 .res_query_enabled = true, 4163 }; 4164 4165 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4166 { 4167 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4168 } 4169 4170 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 4171 { 4172 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 4173 int ret = 0; 4174 4175 if (mlxsw_sp_port_dev_check(lower_dev)) { 4176 *p_mlxsw_sp_port = netdev_priv(lower_dev); 4177 ret = 1; 4178 } 4179 4180 return ret; 4181 } 4182 4183 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4184 { 4185 struct mlxsw_sp_port *mlxsw_sp_port; 4186 4187 if (mlxsw_sp_port_dev_check(dev)) 4188 return netdev_priv(dev); 4189 4190 mlxsw_sp_port = NULL; 4191 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 4192 4193 return mlxsw_sp_port; 4194 } 4195 4196 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4197 { 4198 struct mlxsw_sp_port *mlxsw_sp_port; 4199 4200 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4201 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4202 } 4203 4204 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4205 { 4206 struct mlxsw_sp_port *mlxsw_sp_port; 4207 4208 if (mlxsw_sp_port_dev_check(dev)) 4209 return netdev_priv(dev); 4210 4211 mlxsw_sp_port = NULL; 4212 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4213 &mlxsw_sp_port); 4214 4215 return mlxsw_sp_port; 4216 } 4217 4218 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4219 { 4220 struct mlxsw_sp_port *mlxsw_sp_port; 4221 4222 rcu_read_lock(); 4223 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4224 if (mlxsw_sp_port) 4225 dev_hold(mlxsw_sp_port->dev); 4226 rcu_read_unlock(); 4227 return mlxsw_sp_port; 4228 } 4229 4230 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4231 { 4232 dev_put(mlxsw_sp_port->dev); 4233 } 4234 4235 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4236 { 4237 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4238 4239 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4241 } 4242 4243 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4244 { 4245 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4246 4247 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4249 } 4250 4251 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4252 u16 lag_id, u8 port_index) 4253 { 4254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4255 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4256 4257 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4258 lag_id, port_index); 4259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4260 } 4261 4262 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4263 u16 lag_id) 4264 { 4265 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4266 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4267 4268 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4269 lag_id); 4270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4271 } 4272 4273 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4274 u16 lag_id) 4275 { 4276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4277 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4278 4279 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4280 lag_id); 4281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4282 } 4283 4284 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4285 u16 lag_id) 4286 { 4287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4288 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4289 4290 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4291 lag_id); 4292 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4293 } 4294 4295 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4296 struct net_device *lag_dev, 4297 u16 *p_lag_id) 4298 { 4299 struct mlxsw_sp_upper *lag; 4300 int free_lag_id = -1; 4301 u64 max_lag; 4302 int i; 4303 4304 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4305 for (i = 0; i < max_lag; i++) { 4306 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4307 if (lag->ref_count) { 4308 if (lag->dev == lag_dev) { 4309 *p_lag_id = i; 4310 return 0; 4311 } 4312 } else if (free_lag_id < 0) { 4313 free_lag_id = i; 4314 } 4315 } 4316 if (free_lag_id < 0) 4317 return -EBUSY; 4318 *p_lag_id = free_lag_id; 4319 return 0; 4320 } 4321 4322 static bool 4323 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4324 struct net_device *lag_dev, 4325 struct netdev_lag_upper_info *lag_upper_info, 4326 struct netlink_ext_ack *extack) 4327 { 4328 u16 lag_id; 4329 4330 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4331 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4332 return false; 4333 } 4334 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4335 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4336 return false; 4337 } 4338 return true; 4339 } 4340 4341 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4342 u16 lag_id, u8 *p_port_index) 4343 { 4344 u64 max_lag_members; 4345 int i; 4346 4347 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4348 MAX_LAG_MEMBERS); 4349 for (i = 0; i < max_lag_members; i++) { 4350 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4351 *p_port_index = i; 4352 return 0; 4353 } 4354 } 4355 return -EBUSY; 4356 } 4357 4358 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4359 struct net_device *lag_dev) 4360 { 4361 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4362 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 4363 struct mlxsw_sp_upper *lag; 4364 u16 lag_id; 4365 u8 port_index; 4366 int err; 4367 4368 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4369 if (err) 4370 return err; 4371 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4372 if (!lag->ref_count) { 4373 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4374 if (err) 4375 return err; 4376 lag->dev = lag_dev; 4377 } 4378 4379 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4380 if (err) 4381 return err; 4382 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4383 if (err) 4384 goto err_col_port_add; 4385 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 4386 if (err) 4387 goto err_col_port_enable; 4388 4389 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4390 mlxsw_sp_port->local_port); 4391 mlxsw_sp_port->lag_id = lag_id; 4392 mlxsw_sp_port->lagged = 1; 4393 lag->ref_count++; 4394 4395 /* Port is no longer usable as a router interface */ 4396 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 4397 if (mlxsw_sp_port_vlan->fid) 4398 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 4399 4400 return 0; 4401 4402 err_col_port_enable: 4403 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4404 err_col_port_add: 4405 if (!lag->ref_count) 4406 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4407 return err; 4408 } 4409 4410 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4411 struct net_device *lag_dev) 4412 { 4413 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4414 u16 lag_id = mlxsw_sp_port->lag_id; 4415 struct mlxsw_sp_upper *lag; 4416 4417 if (!mlxsw_sp_port->lagged) 4418 return; 4419 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4420 WARN_ON(lag->ref_count == 0); 4421 4422 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4423 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4424 4425 /* Any VLANs configured on the port are no longer valid */ 4426 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4427 4428 if (lag->ref_count == 1) 4429 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4430 4431 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4432 mlxsw_sp_port->local_port); 4433 mlxsw_sp_port->lagged = 0; 4434 lag->ref_count--; 4435 4436 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4437 /* Make sure untagged frames are allowed to ingress */ 4438 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4439 } 4440 4441 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4442 u16 lag_id) 4443 { 4444 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4445 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4446 4447 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4448 mlxsw_sp_port->local_port); 4449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4450 } 4451 4452 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4453 u16 lag_id) 4454 { 4455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4456 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4457 4458 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4459 mlxsw_sp_port->local_port); 4460 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4461 } 4462 4463 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4464 bool lag_tx_enabled) 4465 { 4466 if (lag_tx_enabled) 4467 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4468 mlxsw_sp_port->lag_id); 4469 else 4470 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4471 mlxsw_sp_port->lag_id); 4472 } 4473 4474 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4475 struct netdev_lag_lower_state_info *info) 4476 { 4477 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4478 } 4479 4480 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4481 bool enable) 4482 { 4483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4484 enum mlxsw_reg_spms_state spms_state; 4485 char *spms_pl; 4486 u16 vid; 4487 int err; 4488 4489 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4490 MLXSW_REG_SPMS_STATE_DISCARDING; 4491 4492 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4493 if (!spms_pl) 4494 return -ENOMEM; 4495 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4496 4497 for (vid = 0; vid < VLAN_N_VID; vid++) 4498 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4499 4500 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4501 kfree(spms_pl); 4502 return err; 4503 } 4504 4505 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4506 { 4507 u16 vid = 1; 4508 int err; 4509 4510 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4511 if (err) 4512 return err; 4513 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4514 if (err) 4515 goto err_port_stp_set; 4516 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4517 true, false); 4518 if (err) 4519 goto err_port_vlan_set; 4520 4521 for (; vid <= VLAN_N_VID - 1; vid++) { 4522 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4523 vid, false); 4524 if (err) 4525 goto err_vid_learning_set; 4526 } 4527 4528 return 0; 4529 4530 err_vid_learning_set: 4531 for (vid--; vid >= 1; vid--) 4532 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4533 err_port_vlan_set: 4534 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4535 err_port_stp_set: 4536 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4537 return err; 4538 } 4539 4540 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4541 { 4542 u16 vid; 4543 4544 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4545 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4546 vid, true); 4547 4548 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4549 false, false); 4550 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4551 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4552 } 4553 4554 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4555 struct net_device *dev, 4556 unsigned long event, void *ptr) 4557 { 4558 struct netdev_notifier_changeupper_info *info; 4559 struct mlxsw_sp_port *mlxsw_sp_port; 4560 struct netlink_ext_ack *extack; 4561 struct net_device *upper_dev; 4562 struct mlxsw_sp *mlxsw_sp; 4563 int err = 0; 4564 4565 mlxsw_sp_port = netdev_priv(dev); 4566 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4567 info = ptr; 4568 extack = netdev_notifier_info_to_extack(&info->info); 4569 4570 switch (event) { 4571 case NETDEV_PRECHANGEUPPER: 4572 upper_dev = info->upper_dev; 4573 if (!is_vlan_dev(upper_dev) && 4574 !netif_is_lag_master(upper_dev) && 4575 !netif_is_bridge_master(upper_dev) && 4576 !netif_is_ovs_master(upper_dev) && 4577 !netif_is_macvlan(upper_dev)) { 4578 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4579 return -EINVAL; 4580 } 4581 if (!info->linking) 4582 break; 4583 if (netdev_has_any_upper_dev(upper_dev) && 4584 (!netif_is_bridge_master(upper_dev) || 4585 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4586 upper_dev))) { 4587 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4588 return -EINVAL; 4589 } 4590 if (netif_is_lag_master(upper_dev) && 4591 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4592 info->upper_info, extack)) 4593 return -EINVAL; 4594 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4595 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4596 return -EINVAL; 4597 } 4598 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4599 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4600 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4601 return -EINVAL; 4602 } 4603 if (netif_is_macvlan(upper_dev) && 4604 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { 4605 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4606 return -EOPNOTSUPP; 4607 } 4608 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4609 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4610 return -EINVAL; 4611 } 4612 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4613 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4614 return -EINVAL; 4615 } 4616 if (is_vlan_dev(upper_dev) && 4617 vlan_dev_vlan_id(upper_dev) == 1) { 4618 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4619 return -EINVAL; 4620 } 4621 break; 4622 case NETDEV_CHANGEUPPER: 4623 upper_dev = info->upper_dev; 4624 if (netif_is_bridge_master(upper_dev)) { 4625 if (info->linking) 4626 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4627 lower_dev, 4628 upper_dev, 4629 extack); 4630 else 4631 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4632 lower_dev, 4633 upper_dev); 4634 } else if (netif_is_lag_master(upper_dev)) { 4635 if (info->linking) 4636 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4637 upper_dev); 4638 else 4639 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4640 upper_dev); 4641 } else if (netif_is_ovs_master(upper_dev)) { 4642 if (info->linking) 4643 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4644 else 4645 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4646 } else if (netif_is_macvlan(upper_dev)) { 4647 if (!info->linking) 4648 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4649 } 4650 break; 4651 } 4652 4653 return err; 4654 } 4655 4656 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4657 unsigned long event, void *ptr) 4658 { 4659 struct netdev_notifier_changelowerstate_info *info; 4660 struct mlxsw_sp_port *mlxsw_sp_port; 4661 int err; 4662 4663 mlxsw_sp_port = netdev_priv(dev); 4664 info = ptr; 4665 4666 switch (event) { 4667 case NETDEV_CHANGELOWERSTATE: 4668 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4669 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4670 info->lower_state_info); 4671 if (err) 4672 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4673 } 4674 break; 4675 } 4676 4677 return 0; 4678 } 4679 4680 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4681 struct net_device *port_dev, 4682 unsigned long event, void *ptr) 4683 { 4684 switch (event) { 4685 case NETDEV_PRECHANGEUPPER: 4686 case NETDEV_CHANGEUPPER: 4687 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4688 event, ptr); 4689 case NETDEV_CHANGELOWERSTATE: 4690 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4691 ptr); 4692 } 4693 4694 return 0; 4695 } 4696 4697 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4698 unsigned long event, void *ptr) 4699 { 4700 struct net_device *dev; 4701 struct list_head *iter; 4702 int ret; 4703 4704 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4705 if (mlxsw_sp_port_dev_check(dev)) { 4706 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4707 ptr); 4708 if (ret) 4709 return ret; 4710 } 4711 } 4712 4713 return 0; 4714 } 4715 4716 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4717 struct net_device *dev, 4718 unsigned long event, void *ptr, 4719 u16 vid) 4720 { 4721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4722 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4723 struct netdev_notifier_changeupper_info *info = ptr; 4724 struct netlink_ext_ack *extack; 4725 struct net_device *upper_dev; 4726 int err = 0; 4727 4728 extack = netdev_notifier_info_to_extack(&info->info); 4729 4730 switch (event) { 4731 case NETDEV_PRECHANGEUPPER: 4732 upper_dev = info->upper_dev; 4733 if (!netif_is_bridge_master(upper_dev) && 4734 !netif_is_macvlan(upper_dev)) { 4735 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4736 return -EINVAL; 4737 } 4738 if (!info->linking) 4739 break; 4740 if (netdev_has_any_upper_dev(upper_dev) && 4741 (!netif_is_bridge_master(upper_dev) || 4742 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4743 upper_dev))) { 4744 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4745 return -EINVAL; 4746 } 4747 if (netif_is_macvlan(upper_dev) && 4748 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { 4749 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4750 return -EOPNOTSUPP; 4751 } 4752 break; 4753 case NETDEV_CHANGEUPPER: 4754 upper_dev = info->upper_dev; 4755 if (netif_is_bridge_master(upper_dev)) { 4756 if (info->linking) 4757 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4758 vlan_dev, 4759 upper_dev, 4760 extack); 4761 else 4762 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4763 vlan_dev, 4764 upper_dev); 4765 } else if (netif_is_macvlan(upper_dev)) { 4766 if (!info->linking) 4767 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4768 } else { 4769 err = -EINVAL; 4770 WARN_ON(1); 4771 } 4772 break; 4773 } 4774 4775 return err; 4776 } 4777 4778 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4779 struct net_device *lag_dev, 4780 unsigned long event, 4781 void *ptr, u16 vid) 4782 { 4783 struct net_device *dev; 4784 struct list_head *iter; 4785 int ret; 4786 4787 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4788 if (mlxsw_sp_port_dev_check(dev)) { 4789 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4790 event, ptr, 4791 vid); 4792 if (ret) 4793 return ret; 4794 } 4795 } 4796 4797 return 0; 4798 } 4799 4800 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4801 unsigned long event, void *ptr) 4802 { 4803 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4804 u16 vid = vlan_dev_vlan_id(vlan_dev); 4805 4806 if (mlxsw_sp_port_dev_check(real_dev)) 4807 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4808 event, ptr, vid); 4809 else if (netif_is_lag_master(real_dev)) 4810 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4811 real_dev, event, 4812 ptr, vid); 4813 4814 return 0; 4815 } 4816 4817 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4818 unsigned long event, void *ptr) 4819 { 4820 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4821 struct netdev_notifier_changeupper_info *info = ptr; 4822 struct netlink_ext_ack *extack; 4823 struct net_device *upper_dev; 4824 4825 if (!mlxsw_sp) 4826 return 0; 4827 4828 extack = netdev_notifier_info_to_extack(&info->info); 4829 4830 switch (event) { 4831 case NETDEV_PRECHANGEUPPER: 4832 upper_dev = info->upper_dev; 4833 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4834 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4835 return -EOPNOTSUPP; 4836 } 4837 if (!info->linking) 4838 break; 4839 if (netif_is_macvlan(upper_dev) && 4840 !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { 4841 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4842 return -EOPNOTSUPP; 4843 } 4844 break; 4845 case NETDEV_CHANGEUPPER: 4846 upper_dev = info->upper_dev; 4847 if (info->linking) 4848 break; 4849 if (netif_is_macvlan(upper_dev)) 4850 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4851 break; 4852 } 4853 4854 return 0; 4855 } 4856 4857 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4858 unsigned long event, void *ptr) 4859 { 4860 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4861 struct netdev_notifier_changeupper_info *info = ptr; 4862 struct netlink_ext_ack *extack; 4863 4864 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4865 return 0; 4866 4867 extack = netdev_notifier_info_to_extack(&info->info); 4868 4869 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4870 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4871 4872 return -EOPNOTSUPP; 4873 } 4874 4875 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4876 { 4877 struct netdev_notifier_changeupper_info *info = ptr; 4878 4879 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4880 return false; 4881 return netif_is_l3_master(info->upper_dev); 4882 } 4883 4884 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4885 unsigned long event, void *ptr) 4886 { 4887 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4888 struct mlxsw_sp_span_entry *span_entry; 4889 struct mlxsw_sp *mlxsw_sp; 4890 int err = 0; 4891 4892 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4893 if (event == NETDEV_UNREGISTER) { 4894 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4895 if (span_entry) 4896 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4897 } 4898 mlxsw_sp_span_respin(mlxsw_sp); 4899 4900 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4901 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4902 event, ptr); 4903 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4904 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4905 event, ptr); 4906 else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4907 err = mlxsw_sp_netdevice_router_port_event(dev); 4908 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4909 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4910 else if (mlxsw_sp_port_dev_check(dev)) 4911 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4912 else if (netif_is_lag_master(dev)) 4913 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4914 else if (is_vlan_dev(dev)) 4915 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4916 else if (netif_is_bridge_master(dev)) 4917 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4918 else if (netif_is_macvlan(dev)) 4919 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4920 4921 return notifier_from_errno(err); 4922 } 4923 4924 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4925 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4926 }; 4927 4928 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4929 .notifier_call = mlxsw_sp_inetaddr_event, 4930 }; 4931 4932 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4933 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4934 }; 4935 4936 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { 4937 .notifier_call = mlxsw_sp_inet6addr_event, 4938 }; 4939 4940 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4941 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4942 {0, }, 4943 }; 4944 4945 static struct pci_driver mlxsw_sp1_pci_driver = { 4946 .name = mlxsw_sp1_driver_name, 4947 .id_table = mlxsw_sp1_pci_id_table, 4948 }; 4949 4950 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4951 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4952 {0, }, 4953 }; 4954 4955 static struct pci_driver mlxsw_sp2_pci_driver = { 4956 .name = mlxsw_sp2_driver_name, 4957 .id_table = mlxsw_sp2_pci_id_table, 4958 }; 4959 4960 static int __init mlxsw_sp_module_init(void) 4961 { 4962 int err; 4963 4964 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4965 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4966 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4967 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4968 4969 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4970 if (err) 4971 goto err_sp1_core_driver_register; 4972 4973 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4974 if (err) 4975 goto err_sp2_core_driver_register; 4976 4977 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4978 if (err) 4979 goto err_sp1_pci_driver_register; 4980 4981 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4982 if (err) 4983 goto err_sp2_pci_driver_register; 4984 4985 return 0; 4986 4987 err_sp2_pci_driver_register: 4988 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4989 err_sp1_pci_driver_register: 4990 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4991 err_sp2_core_driver_register: 4992 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4993 err_sp1_core_driver_register: 4994 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 4995 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4996 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4997 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4998 return err; 4999 } 5000 5001 static void __exit mlxsw_sp_module_exit(void) 5002 { 5003 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5004 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5005 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5006 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5007 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); 5008 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5009 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 5010 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5011 } 5012 5013 module_init(mlxsw_sp_module_init); 5014 module_exit(mlxsw_sp_module_exit); 5015 5016 MODULE_LICENSE("Dual BSD/GPL"); 5017 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5018 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5019 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5020 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5021 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5022