1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/ethtool.h> 44 #include <linux/slab.h> 45 #include <linux/device.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_bridge.h> 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/bitops.h> 52 #include <linux/list.h> 53 #include <linux/notifier.h> 54 #include <linux/dcbnl.h> 55 #include <linux/inetdevice.h> 56 #include <net/switchdev.h> 57 #include <net/pkt_cls.h> 58 #include <net/tc_act/tc_mirred.h> 59 #include <net/netevent.h> 60 #include <net/tc_act/tc_sample.h> 61 62 #include "spectrum.h" 63 #include "pci.h" 64 #include "core.h" 65 #include "reg.h" 66 #include "port.h" 67 #include "trap.h" 68 #include "txheader.h" 69 #include "spectrum_cnt.h" 70 #include "spectrum_dpipe.h" 71 #include "../mlxfw/mlxfw.h" 72 73 #define MLXSW_FWREV_MAJOR 13 74 #define MLXSW_FWREV_MINOR 1420 75 #define MLXSW_FWREV_SUBMINOR 122 76 77 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { 78 .major = MLXSW_FWREV_MAJOR, 79 .minor = MLXSW_FWREV_MINOR, 80 .subminor = MLXSW_FWREV_SUBMINOR 81 }; 82 83 #define MLXSW_SP_FW_FILENAME \ 84 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ 85 "." __stringify(MLXSW_FWREV_MINOR) \ 86 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" 87 88 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 89 static const char mlxsw_sp_driver_version[] = "1.0"; 90 91 /* tx_hdr_version 92 * Tx header version. 93 * Must be set to 1. 94 */ 95 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 96 97 /* tx_hdr_ctl 98 * Packet control type. 99 * 0 - Ethernet control (e.g. EMADs, LACP) 100 * 1 - Ethernet data 101 */ 102 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 103 104 /* tx_hdr_proto 105 * Packet protocol type. Must be set to 1 (Ethernet). 106 */ 107 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 108 109 /* tx_hdr_rx_is_router 110 * Packet is sent from the router. Valid for data packets only. 111 */ 112 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 113 114 /* tx_hdr_fid_valid 115 * Indicates if the 'fid' field is valid and should be used for 116 * forwarding lookup. Valid for data packets only. 117 */ 118 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 119 120 /* tx_hdr_swid 121 * Switch partition ID. Must be set to 0. 122 */ 123 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 124 125 /* tx_hdr_control_tclass 126 * Indicates if the packet should use the control TClass and not one 127 * of the data TClasses. 128 */ 129 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 130 131 /* tx_hdr_etclass 132 * Egress TClass to be used on the egress device on the egress port. 133 */ 134 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 135 136 /* tx_hdr_port_mid 137 * Destination local port for unicast packets. 138 * Destination multicast ID for multicast packets. 139 * 140 * Control packets are directed to a specific egress port, while data 141 * packets are transmitted through the CPU port (0) into the switch partition, 142 * where forwarding rules are applied. 143 */ 144 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 145 146 /* tx_hdr_fid 147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 148 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 149 * Valid for data packets only. 150 */ 151 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 152 153 /* tx_hdr_type 154 * 0 - Data packets 155 * 6 - Control packets 156 */ 157 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 158 159 struct mlxsw_sp_mlxfw_dev { 160 struct mlxfw_dev mlxfw_dev; 161 struct mlxsw_sp *mlxsw_sp; 162 }; 163 164 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, 165 u16 component_index, u32 *p_max_size, 166 u8 *p_align_bits, u16 *p_max_write_size) 167 { 168 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 169 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 171 char mcqi_pl[MLXSW_REG_MCQI_LEN]; 172 int err; 173 174 mlxsw_reg_mcqi_pack(mcqi_pl, component_index); 175 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl); 176 if (err) 177 return err; 178 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, 179 p_max_write_size); 180 181 *p_align_bits = max_t(u8, *p_align_bits, 2); 182 *p_max_write_size = min_t(u16, *p_max_write_size, 183 MLXSW_REG_MCDA_MAX_DATA_LEN); 184 return 0; 185 } 186 187 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 188 { 189 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 190 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 192 char mcc_pl[MLXSW_REG_MCC_LEN]; 193 u8 control_state; 194 int err; 195 196 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0); 197 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 198 if (err) 199 return err; 200 201 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state); 202 if (control_state != MLXFW_FSM_STATE_IDLE) 203 return -EBUSY; 204 205 mlxsw_reg_mcc_pack(mcc_pl, 206 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 207 0, *fwhandle, 0); 208 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 209 } 210 211 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev, 212 u32 fwhandle, u16 component_index, 213 u32 component_size) 214 { 215 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 216 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 218 char mcc_pl[MLXSW_REG_MCC_LEN]; 219 220 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 221 component_index, fwhandle, component_size); 222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 223 } 224 225 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev, 226 u32 fwhandle, u8 *data, u16 size, 227 u32 offset) 228 { 229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 232 char mcda_pl[MLXSW_REG_MCDA_LEN]; 233 234 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data); 235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl); 236 } 237 238 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, 239 u32 fwhandle, u16 component_index) 240 { 241 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 242 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 244 char mcc_pl[MLXSW_REG_MCC_LEN]; 245 246 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 247 component_index, fwhandle, 0); 248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 249 } 250 251 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 252 { 253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 256 char mcc_pl[MLXSW_REG_MCC_LEN]; 257 258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, 259 fwhandle, 0); 260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 261 } 262 263 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 264 enum mlxfw_fsm_state *fsm_state, 265 enum mlxfw_fsm_state_err *fsm_state_err) 266 { 267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 270 char mcc_pl[MLXSW_REG_MCC_LEN]; 271 u8 control_state; 272 u8 error_code; 273 int err; 274 275 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0); 276 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 277 if (err) 278 return err; 279 280 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state); 281 *fsm_state = control_state; 282 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 283 MLXFW_FSM_STATE_ERR_MAX); 284 return 0; 285 } 286 287 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 288 { 289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 292 char mcc_pl[MLXSW_REG_MCC_LEN]; 293 294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, 295 fwhandle, 0); 296 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 297 } 298 299 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 300 { 301 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev = 302 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev); 303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp; 304 char mcc_pl[MLXSW_REG_MCC_LEN]; 305 306 mlxsw_reg_mcc_pack(mcc_pl, 307 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 308 fwhandle, 0); 309 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl); 310 } 311 312 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = { 313 .component_query = mlxsw_sp_component_query, 314 .fsm_lock = mlxsw_sp_fsm_lock, 315 .fsm_component_update = mlxsw_sp_fsm_component_update, 316 .fsm_block_download = mlxsw_sp_fsm_block_download, 317 .fsm_component_verify = mlxsw_sp_fsm_component_verify, 318 .fsm_activate = mlxsw_sp_fsm_activate, 319 .fsm_query_state = mlxsw_sp_fsm_query_state, 320 .fsm_cancel = mlxsw_sp_fsm_cancel, 321 .fsm_release = mlxsw_sp_fsm_release 322 }; 323 324 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, 325 const struct firmware *firmware) 326 { 327 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = { 328 .mlxfw_dev = { 329 .ops = &mlxsw_sp_mlxfw_dev_ops, 330 .psid = mlxsw_sp->bus_info->psid, 331 .psid_size = strlen(mlxsw_sp->bus_info->psid), 332 }, 333 .mlxsw_sp = mlxsw_sp 334 }; 335 336 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 337 } 338 339 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, 340 const struct mlxsw_fw_rev *b) 341 { 342 if (a->major != b->major) 343 return a->major > b->major; 344 if (a->minor != b->minor) 345 return a->minor > b->minor; 346 return a->subminor >= b->subminor; 347 } 348 349 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 350 { 351 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; 352 const struct firmware *firmware; 353 int err; 354 355 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) 356 return 0; 357 358 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", 359 rev->major, rev->minor, rev->subminor); 360 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", 361 MLXSW_SP_FW_FILENAME); 362 363 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, 364 mlxsw_sp->bus_info->dev); 365 if (err) { 366 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", 367 MLXSW_SP_FW_FILENAME); 368 return err; 369 } 370 371 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 372 release_firmware(firmware); 373 return err; 374 } 375 376 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 377 unsigned int counter_index, u64 *packets, 378 u64 *bytes) 379 { 380 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 381 int err; 382 383 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 384 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); 385 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 386 if (err) 387 return err; 388 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 389 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 390 return 0; 391 } 392 393 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 394 unsigned int counter_index) 395 { 396 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 397 398 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 399 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); 400 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 401 } 402 403 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 404 unsigned int *p_counter_index) 405 { 406 int err; 407 408 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 409 p_counter_index); 410 if (err) 411 return err; 412 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 413 if (err) 414 goto err_counter_clear; 415 return 0; 416 417 err_counter_clear: 418 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 419 *p_counter_index); 420 return err; 421 } 422 423 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 424 unsigned int counter_index) 425 { 426 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 427 counter_index); 428 } 429 430 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 431 const struct mlxsw_tx_info *tx_info) 432 { 433 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 434 435 memset(txhdr, 0, MLXSW_TXHDR_LEN); 436 437 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 438 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 439 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 440 mlxsw_tx_hdr_swid_set(txhdr, 0); 441 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 442 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 443 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 444 } 445 446 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 447 u8 state) 448 { 449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 450 enum mlxsw_reg_spms_state spms_state; 451 char *spms_pl; 452 int err; 453 454 switch (state) { 455 case BR_STATE_FORWARDING: 456 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 457 break; 458 case BR_STATE_LEARNING: 459 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 460 break; 461 case BR_STATE_LISTENING: /* fall-through */ 462 case BR_STATE_DISABLED: /* fall-through */ 463 case BR_STATE_BLOCKING: 464 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 465 break; 466 default: 467 BUG(); 468 } 469 470 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 471 if (!spms_pl) 472 return -ENOMEM; 473 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 474 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 475 476 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 477 kfree(spms_pl); 478 return err; 479 } 480 481 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 482 { 483 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 484 int err; 485 486 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 487 if (err) 488 return err; 489 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 490 return 0; 491 } 492 493 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 494 { 495 int i; 496 497 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 498 return -EIO; 499 500 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 501 MAX_SPAN); 502 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 503 sizeof(struct mlxsw_sp_span_entry), 504 GFP_KERNEL); 505 if (!mlxsw_sp->span.entries) 506 return -ENOMEM; 507 508 for (i = 0; i < mlxsw_sp->span.entries_count; i++) 509 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); 510 511 return 0; 512 } 513 514 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 515 { 516 int i; 517 518 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 519 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 520 521 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 522 } 523 kfree(mlxsw_sp->span.entries); 524 } 525 526 static struct mlxsw_sp_span_entry * 527 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) 528 { 529 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 530 struct mlxsw_sp_span_entry *span_entry; 531 char mpat_pl[MLXSW_REG_MPAT_LEN]; 532 u8 local_port = port->local_port; 533 int index; 534 int i; 535 int err; 536 537 /* find a free entry to use */ 538 index = -1; 539 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 540 if (!mlxsw_sp->span.entries[i].used) { 541 index = i; 542 span_entry = &mlxsw_sp->span.entries[i]; 543 break; 544 } 545 } 546 if (index < 0) 547 return NULL; 548 549 /* create a new port analayzer entry for local_port */ 550 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); 551 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 552 if (err) 553 return NULL; 554 555 span_entry->used = true; 556 span_entry->id = index; 557 span_entry->ref_count = 1; 558 span_entry->local_port = local_port; 559 return span_entry; 560 } 561 562 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 563 struct mlxsw_sp_span_entry *span_entry) 564 { 565 u8 local_port = span_entry->local_port; 566 char mpat_pl[MLXSW_REG_MPAT_LEN]; 567 int pa_id = span_entry->id; 568 569 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); 570 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 571 span_entry->used = false; 572 } 573 574 static struct mlxsw_sp_span_entry * 575 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) 576 { 577 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 578 int i; 579 580 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 581 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 582 583 if (curr->used && curr->local_port == port->local_port) 584 return curr; 585 } 586 return NULL; 587 } 588 589 static struct mlxsw_sp_span_entry 590 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) 591 { 592 struct mlxsw_sp_span_entry *span_entry; 593 594 span_entry = mlxsw_sp_span_entry_find(port); 595 if (span_entry) { 596 /* Already exists, just take a reference */ 597 span_entry->ref_count++; 598 return span_entry; 599 } 600 601 return mlxsw_sp_span_entry_create(port); 602 } 603 604 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 605 struct mlxsw_sp_span_entry *span_entry) 606 { 607 WARN_ON(!span_entry->ref_count); 608 if (--span_entry->ref_count == 0) 609 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 610 return 0; 611 } 612 613 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 614 { 615 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 616 struct mlxsw_sp_span_inspected_port *p; 617 int i; 618 619 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 620 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 621 622 list_for_each_entry(p, &curr->bound_ports_list, list) 623 if (p->local_port == port->local_port && 624 p->type == MLXSW_SP_SPAN_EGRESS) 625 return true; 626 } 627 628 return false; 629 } 630 631 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 632 int mtu) 633 { 634 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 635 } 636 637 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 638 { 639 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 640 char sbib_pl[MLXSW_REG_SBIB_LEN]; 641 int err; 642 643 /* If port is egress mirrored, the shared buffer size should be 644 * updated according to the mtu value 645 */ 646 if (mlxsw_sp_span_is_egress_mirror(port)) { 647 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 648 649 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 650 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 651 if (err) { 652 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 653 return err; 654 } 655 } 656 657 return 0; 658 } 659 660 static struct mlxsw_sp_span_inspected_port * 661 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 662 struct mlxsw_sp_span_entry *span_entry) 663 { 664 struct mlxsw_sp_span_inspected_port *p; 665 666 list_for_each_entry(p, &span_entry->bound_ports_list, list) 667 if (port->local_port == p->local_port) 668 return p; 669 return NULL; 670 } 671 672 static int 673 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 674 struct mlxsw_sp_span_entry *span_entry, 675 enum mlxsw_sp_span_type type) 676 { 677 struct mlxsw_sp_span_inspected_port *inspected_port; 678 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 679 char mpar_pl[MLXSW_REG_MPAR_LEN]; 680 char sbib_pl[MLXSW_REG_SBIB_LEN]; 681 int pa_id = span_entry->id; 682 int err; 683 684 /* if it is an egress SPAN, bind a shared buffer to it */ 685 if (type == MLXSW_SP_SPAN_EGRESS) { 686 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 687 port->dev->mtu); 688 689 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 690 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 691 if (err) { 692 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 693 return err; 694 } 695 } 696 697 /* bind the port to the SPAN entry */ 698 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 699 (enum mlxsw_reg_mpar_i_e) type, true, pa_id); 700 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 701 if (err) 702 goto err_mpar_reg_write; 703 704 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 705 if (!inspected_port) { 706 err = -ENOMEM; 707 goto err_inspected_port_alloc; 708 } 709 inspected_port->local_port = port->local_port; 710 inspected_port->type = type; 711 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 712 713 return 0; 714 715 err_mpar_reg_write: 716 err_inspected_port_alloc: 717 if (type == MLXSW_SP_SPAN_EGRESS) { 718 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 719 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 720 } 721 return err; 722 } 723 724 static void 725 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, 726 struct mlxsw_sp_span_entry *span_entry, 727 enum mlxsw_sp_span_type type) 728 { 729 struct mlxsw_sp_span_inspected_port *inspected_port; 730 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 731 char mpar_pl[MLXSW_REG_MPAR_LEN]; 732 char sbib_pl[MLXSW_REG_SBIB_LEN]; 733 int pa_id = span_entry->id; 734 735 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 736 if (!inspected_port) 737 return; 738 739 /* remove the inspected port */ 740 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 741 (enum mlxsw_reg_mpar_i_e) type, false, pa_id); 742 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 743 744 /* remove the SBIB buffer if it was egress SPAN */ 745 if (type == MLXSW_SP_SPAN_EGRESS) { 746 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 748 } 749 750 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 751 752 list_del(&inspected_port->list); 753 kfree(inspected_port); 754 } 755 756 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 757 struct mlxsw_sp_port *to, 758 enum mlxsw_sp_span_type type) 759 { 760 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 761 struct mlxsw_sp_span_entry *span_entry; 762 int err; 763 764 span_entry = mlxsw_sp_span_entry_get(to); 765 if (!span_entry) 766 return -ENOENT; 767 768 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 769 span_entry->id); 770 771 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); 772 if (err) 773 goto err_port_bind; 774 775 return 0; 776 777 err_port_bind: 778 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 779 return err; 780 } 781 782 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, 783 struct mlxsw_sp_port *to, 784 enum mlxsw_sp_span_type type) 785 { 786 struct mlxsw_sp_span_entry *span_entry; 787 788 span_entry = mlxsw_sp_span_entry_find(to); 789 if (!span_entry) { 790 netdev_err(from->dev, "no span entry found\n"); 791 return; 792 } 793 794 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 795 span_entry->id); 796 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); 797 } 798 799 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 800 bool enable, u32 rate) 801 { 802 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 803 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 804 805 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 807 } 808 809 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 810 bool is_up) 811 { 812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 813 char paos_pl[MLXSW_REG_PAOS_LEN]; 814 815 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 816 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 817 MLXSW_PORT_ADMIN_STATUS_DOWN); 818 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 819 } 820 821 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 822 unsigned char *addr) 823 { 824 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 825 char ppad_pl[MLXSW_REG_PPAD_LEN]; 826 827 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 828 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 829 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 830 } 831 832 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 833 { 834 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 835 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 836 837 ether_addr_copy(addr, mlxsw_sp->base_mac); 838 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 839 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 840 } 841 842 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 843 { 844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 845 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 846 int max_mtu; 847 int err; 848 849 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 850 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 851 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 852 if (err) 853 return err; 854 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 855 856 if (mtu > max_mtu) 857 return -EINVAL; 858 859 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 860 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 861 } 862 863 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 864 { 865 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 866 char pspa_pl[MLXSW_REG_PSPA_LEN]; 867 868 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 870 } 871 872 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 873 { 874 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 875 char svpe_pl[MLXSW_REG_SVPE_LEN]; 876 877 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 878 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 879 } 880 881 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 882 bool learn_enable) 883 { 884 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 885 char *spvmlr_pl; 886 int err; 887 888 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 889 if (!spvmlr_pl) 890 return -ENOMEM; 891 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 892 learn_enable); 893 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 894 kfree(spvmlr_pl); 895 return err; 896 } 897 898 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 899 u16 vid) 900 { 901 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 902 char spvid_pl[MLXSW_REG_SPVID_LEN]; 903 904 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); 905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 906 } 907 908 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 909 bool allow) 910 { 911 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 912 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 913 914 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 916 } 917 918 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 919 { 920 int err; 921 922 if (!vid) { 923 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 924 if (err) 925 return err; 926 } else { 927 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); 928 if (err) 929 return err; 930 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 931 if (err) 932 goto err_port_allow_untagged_set; 933 } 934 935 mlxsw_sp_port->pvid = vid; 936 return 0; 937 938 err_port_allow_untagged_set: 939 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); 940 return err; 941 } 942 943 static int 944 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 945 { 946 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 947 char sspr_pl[MLXSW_REG_SSPR_LEN]; 948 949 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 950 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 951 } 952 953 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 954 u8 local_port, u8 *p_module, 955 u8 *p_width, u8 *p_lane) 956 { 957 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 958 int err; 959 960 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 961 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 962 if (err) 963 return err; 964 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 965 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 966 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 967 return 0; 968 } 969 970 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port, 971 u8 module, u8 width, u8 lane) 972 { 973 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 974 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 975 int i; 976 977 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 978 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 979 for (i = 0; i < width; i++) { 980 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 981 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 982 } 983 984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 985 } 986 987 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 988 { 989 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 990 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 991 992 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 993 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 994 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 995 } 996 997 static int mlxsw_sp_port_open(struct net_device *dev) 998 { 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 int err; 1001 1002 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1003 if (err) 1004 return err; 1005 netif_start_queue(dev); 1006 return 0; 1007 } 1008 1009 static int mlxsw_sp_port_stop(struct net_device *dev) 1010 { 1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1012 1013 netif_stop_queue(dev); 1014 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1015 } 1016 1017 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 1018 struct net_device *dev) 1019 { 1020 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1021 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1022 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1023 const struct mlxsw_tx_info tx_info = { 1024 .local_port = mlxsw_sp_port->local_port, 1025 .is_emad = false, 1026 }; 1027 u64 len; 1028 int err; 1029 1030 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 1031 return NETDEV_TX_BUSY; 1032 1033 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 1034 struct sk_buff *skb_orig = skb; 1035 1036 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 1037 if (!skb) { 1038 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1039 dev_kfree_skb_any(skb_orig); 1040 return NETDEV_TX_OK; 1041 } 1042 dev_consume_skb_any(skb_orig); 1043 } 1044 1045 if (eth_skb_pad(skb)) { 1046 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1047 return NETDEV_TX_OK; 1048 } 1049 1050 mlxsw_sp_txhdr_construct(skb, &tx_info); 1051 /* TX header is consumed by HW on the way so we shouldn't count its 1052 * bytes as being sent. 1053 */ 1054 len = skb->len - MLXSW_TXHDR_LEN; 1055 1056 /* Due to a race we might fail here because of a full queue. In that 1057 * unlikely case we simply drop the packet. 1058 */ 1059 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 1060 1061 if (!err) { 1062 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1063 u64_stats_update_begin(&pcpu_stats->syncp); 1064 pcpu_stats->tx_packets++; 1065 pcpu_stats->tx_bytes += len; 1066 u64_stats_update_end(&pcpu_stats->syncp); 1067 } else { 1068 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 1069 dev_kfree_skb_any(skb); 1070 } 1071 return NETDEV_TX_OK; 1072 } 1073 1074 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 1075 { 1076 } 1077 1078 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 1079 { 1080 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1081 struct sockaddr *addr = p; 1082 int err; 1083 1084 if (!is_valid_ether_addr(addr->sa_data)) 1085 return -EADDRNOTAVAIL; 1086 1087 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 1088 if (err) 1089 return err; 1090 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1091 return 0; 1092 } 1093 1094 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, 1095 int mtu) 1096 { 1097 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); 1098 } 1099 1100 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 1101 1102 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1103 u16 delay) 1104 { 1105 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, 1106 BITS_PER_BYTE)); 1107 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, 1108 mtu); 1109 } 1110 1111 /* Maximum delay buffer needed in case of PAUSE frames, in bytes. 1112 * Assumes 100m cable and maximum MTU. 1113 */ 1114 #define MLXSW_SP_PAUSE_DELAY 58752 1115 1116 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, 1117 u16 delay, bool pfc, bool pause) 1118 { 1119 if (pfc) 1120 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); 1121 else if (pause) 1122 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); 1123 else 1124 return 0; 1125 } 1126 1127 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, 1128 bool lossy) 1129 { 1130 if (lossy) 1131 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); 1132 else 1133 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, 1134 thres); 1135 } 1136 1137 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 1138 u8 *prio_tc, bool pause_en, 1139 struct ieee_pfc *my_pfc) 1140 { 1141 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1142 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 1143 u16 delay = !!my_pfc ? my_pfc->delay : 0; 1144 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 1145 int i, j, err; 1146 1147 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 1148 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1149 if (err) 1150 return err; 1151 1152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1153 bool configure = false; 1154 bool pfc = false; 1155 bool lossy; 1156 u16 thres; 1157 1158 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 1159 if (prio_tc[j] == i) { 1160 pfc = pfc_en & BIT(j); 1161 configure = true; 1162 break; 1163 } 1164 } 1165 1166 if (!configure) 1167 continue; 1168 1169 lossy = !(pfc || pause_en); 1170 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); 1171 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, 1172 pause_en); 1173 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); 1174 } 1175 1176 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 1177 } 1178 1179 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 1180 int mtu, bool pause_en) 1181 { 1182 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 1183 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 1184 struct ieee_pfc *my_pfc; 1185 u8 *prio_tc; 1186 1187 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 1188 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 1189 1190 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 1191 pause_en, my_pfc); 1192 } 1193 1194 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 1195 { 1196 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1197 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1198 int err; 1199 1200 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 1201 if (err) 1202 return err; 1203 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); 1204 if (err) 1205 goto err_span_port_mtu_update; 1206 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 1207 if (err) 1208 goto err_port_mtu_set; 1209 dev->mtu = mtu; 1210 return 0; 1211 1212 err_port_mtu_set: 1213 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); 1214 err_span_port_mtu_update: 1215 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1216 return err; 1217 } 1218 1219 static int 1220 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 1221 struct rtnl_link_stats64 *stats) 1222 { 1223 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1224 struct mlxsw_sp_port_pcpu_stats *p; 1225 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1226 u32 tx_dropped = 0; 1227 unsigned int start; 1228 int i; 1229 1230 for_each_possible_cpu(i) { 1231 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 1232 do { 1233 start = u64_stats_fetch_begin_irq(&p->syncp); 1234 rx_packets = p->rx_packets; 1235 rx_bytes = p->rx_bytes; 1236 tx_packets = p->tx_packets; 1237 tx_bytes = p->tx_bytes; 1238 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1239 1240 stats->rx_packets += rx_packets; 1241 stats->rx_bytes += rx_bytes; 1242 stats->tx_packets += tx_packets; 1243 stats->tx_bytes += tx_bytes; 1244 /* tx_dropped is u32, updated without syncp protection. */ 1245 tx_dropped += p->tx_dropped; 1246 } 1247 stats->tx_dropped = tx_dropped; 1248 return 0; 1249 } 1250 1251 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 1252 { 1253 switch (attr_id) { 1254 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1255 return true; 1256 } 1257 1258 return false; 1259 } 1260 1261 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 1262 void *sp) 1263 { 1264 switch (attr_id) { 1265 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 1266 return mlxsw_sp_port_get_sw_stats64(dev, sp); 1267 } 1268 1269 return -EINVAL; 1270 } 1271 1272 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 1273 int prio, char *ppcnt_pl) 1274 { 1275 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1277 1278 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 1279 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1280 } 1281 1282 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 1283 struct rtnl_link_stats64 *stats) 1284 { 1285 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1286 int err; 1287 1288 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 1289 0, ppcnt_pl); 1290 if (err) 1291 goto out; 1292 1293 stats->tx_packets = 1294 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 1295 stats->rx_packets = 1296 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 1297 stats->tx_bytes = 1298 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 1299 stats->rx_bytes = 1300 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 1301 stats->multicast = 1302 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 1303 1304 stats->rx_crc_errors = 1305 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 1306 stats->rx_frame_errors = 1307 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 1308 1309 stats->rx_length_errors = ( 1310 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 1311 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 1312 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 1313 1314 stats->rx_errors = (stats->rx_crc_errors + 1315 stats->rx_frame_errors + stats->rx_length_errors); 1316 1317 out: 1318 return err; 1319 } 1320 1321 static void update_stats_cache(struct work_struct *work) 1322 { 1323 struct mlxsw_sp_port *mlxsw_sp_port = 1324 container_of(work, struct mlxsw_sp_port, 1325 hw_stats.update_dw.work); 1326 1327 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 1328 goto out; 1329 1330 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 1331 mlxsw_sp_port->hw_stats.cache); 1332 1333 out: 1334 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 1335 MLXSW_HW_STATS_UPDATE_TIME); 1336 } 1337 1338 /* Return the stats from a cache that is updated periodically, 1339 * as this function might get called in an atomic context. 1340 */ 1341 static void 1342 mlxsw_sp_port_get_stats64(struct net_device *dev, 1343 struct rtnl_link_stats64 *stats) 1344 { 1345 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1346 1347 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); 1348 } 1349 1350 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 1351 u16 vid_begin, u16 vid_end, 1352 bool is_member, bool untagged) 1353 { 1354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1355 char *spvm_pl; 1356 int err; 1357 1358 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1359 if (!spvm_pl) 1360 return -ENOMEM; 1361 1362 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1363 vid_end, is_member, untagged); 1364 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1365 kfree(spvm_pl); 1366 return err; 1367 } 1368 1369 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1370 u16 vid_end, bool is_member, bool untagged) 1371 { 1372 u16 vid, vid_e; 1373 int err; 1374 1375 for (vid = vid_begin; vid <= vid_end; 1376 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1377 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1378 vid_end); 1379 1380 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1381 is_member, untagged); 1382 if (err) 1383 return err; 1384 } 1385 1386 return 0; 1387 } 1388 1389 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) 1390 { 1391 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1392 1393 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1394 &mlxsw_sp_port->vlans_list, list) 1395 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1396 } 1397 1398 static struct mlxsw_sp_port_vlan * 1399 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1400 { 1401 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1402 bool untagged = vid == 1; 1403 int err; 1404 1405 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1406 if (err) 1407 return ERR_PTR(err); 1408 1409 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1410 if (!mlxsw_sp_port_vlan) { 1411 err = -ENOMEM; 1412 goto err_port_vlan_alloc; 1413 } 1414 1415 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1416 mlxsw_sp_port_vlan->vid = vid; 1417 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1418 1419 return mlxsw_sp_port_vlan; 1420 1421 err_port_vlan_alloc: 1422 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1423 return ERR_PTR(err); 1424 } 1425 1426 static void 1427 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1428 { 1429 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1430 u16 vid = mlxsw_sp_port_vlan->vid; 1431 1432 list_del(&mlxsw_sp_port_vlan->list); 1433 kfree(mlxsw_sp_port_vlan); 1434 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1435 } 1436 1437 struct mlxsw_sp_port_vlan * 1438 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1439 { 1440 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1441 1442 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1443 if (mlxsw_sp_port_vlan) 1444 return mlxsw_sp_port_vlan; 1445 1446 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1447 } 1448 1449 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1450 { 1451 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1452 1453 if (mlxsw_sp_port_vlan->bridge_port) 1454 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1455 else if (fid) 1456 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1457 1458 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1459 } 1460 1461 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1462 __be16 __always_unused proto, u16 vid) 1463 { 1464 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1465 1466 /* VLAN 0 is added to HW filter when device goes up, but it is 1467 * reserved in our case, so simply return. 1468 */ 1469 if (!vid) 1470 return 0; 1471 1472 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); 1473 } 1474 1475 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1476 __be16 __always_unused proto, u16 vid) 1477 { 1478 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1479 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1480 1481 /* VLAN 0 is removed from HW filter when device goes down, but 1482 * it is reserved in our case, so simply return. 1483 */ 1484 if (!vid) 1485 return 0; 1486 1487 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1488 if (!mlxsw_sp_port_vlan) 1489 return 0; 1490 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 1491 1492 return 0; 1493 } 1494 1495 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 1496 size_t len) 1497 { 1498 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1499 u8 module = mlxsw_sp_port->mapping.module; 1500 u8 width = mlxsw_sp_port->mapping.width; 1501 u8 lane = mlxsw_sp_port->mapping.lane; 1502 int err; 1503 1504 if (!mlxsw_sp_port->split) 1505 err = snprintf(name, len, "p%d", module + 1); 1506 else 1507 err = snprintf(name, len, "p%ds%d", module + 1, 1508 lane / width); 1509 1510 if (err >= len) 1511 return -EINVAL; 1512 1513 return 0; 1514 } 1515 1516 static struct mlxsw_sp_port_mall_tc_entry * 1517 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, 1518 unsigned long cookie) { 1519 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1520 1521 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) 1522 if (mall_tc_entry->cookie == cookie) 1523 return mall_tc_entry; 1524 1525 return NULL; 1526 } 1527 1528 static int 1529 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1530 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, 1531 const struct tc_action *a, 1532 bool ingress) 1533 { 1534 struct net *net = dev_net(mlxsw_sp_port->dev); 1535 enum mlxsw_sp_span_type span_type; 1536 struct mlxsw_sp_port *to_port; 1537 struct net_device *to_dev; 1538 int ifindex; 1539 1540 ifindex = tcf_mirred_ifindex(a); 1541 to_dev = __dev_get_by_index(net, ifindex); 1542 if (!to_dev) { 1543 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); 1544 return -EINVAL; 1545 } 1546 1547 if (!mlxsw_sp_port_dev_check(to_dev)) { 1548 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); 1549 return -EOPNOTSUPP; 1550 } 1551 to_port = netdev_priv(to_dev); 1552 1553 mirror->to_local_port = to_port->local_port; 1554 mirror->ingress = ingress; 1555 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1556 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); 1557 } 1558 1559 static void 1560 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, 1561 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) 1562 { 1563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1564 enum mlxsw_sp_span_type span_type; 1565 struct mlxsw_sp_port *to_port; 1566 1567 to_port = mlxsw_sp->ports[mirror->to_local_port]; 1568 span_type = mirror->ingress ? 1569 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; 1570 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); 1571 } 1572 1573 static int 1574 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, 1575 struct tc_cls_matchall_offload *cls, 1576 const struct tc_action *a, 1577 bool ingress) 1578 { 1579 int err; 1580 1581 if (!mlxsw_sp_port->sample) 1582 return -EOPNOTSUPP; 1583 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { 1584 netdev_err(mlxsw_sp_port->dev, "sample already active\n"); 1585 return -EEXIST; 1586 } 1587 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { 1588 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); 1589 return -EOPNOTSUPP; 1590 } 1591 1592 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, 1593 tcf_sample_psample_group(a)); 1594 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); 1595 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); 1596 mlxsw_sp_port->sample->rate = tcf_sample_rate(a); 1597 1598 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); 1599 if (err) 1600 goto err_port_sample_set; 1601 return 0; 1602 1603 err_port_sample_set: 1604 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1605 return err; 1606 } 1607 1608 static void 1609 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) 1610 { 1611 if (!mlxsw_sp_port->sample) 1612 return; 1613 1614 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); 1615 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); 1616 } 1617 1618 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1619 __be16 protocol, 1620 struct tc_cls_matchall_offload *cls, 1621 bool ingress) 1622 { 1623 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1624 const struct tc_action *a; 1625 LIST_HEAD(actions); 1626 int err; 1627 1628 if (!tc_single_action(cls->exts)) { 1629 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); 1630 return -EOPNOTSUPP; 1631 } 1632 1633 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1634 if (!mall_tc_entry) 1635 return -ENOMEM; 1636 mall_tc_entry->cookie = cls->cookie; 1637 1638 tcf_exts_to_list(cls->exts, &actions); 1639 a = list_first_entry(&actions, struct tc_action, list); 1640 1641 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1642 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1643 1644 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; 1645 mirror = &mall_tc_entry->mirror; 1646 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, 1647 mirror, a, ingress); 1648 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { 1649 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; 1650 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, 1651 a, ingress); 1652 } else { 1653 err = -EOPNOTSUPP; 1654 } 1655 1656 if (err) 1657 goto err_add_action; 1658 1659 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); 1660 return 0; 1661 1662 err_add_action: 1663 kfree(mall_tc_entry); 1664 return err; 1665 } 1666 1667 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, 1668 struct tc_cls_matchall_offload *cls) 1669 { 1670 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; 1671 1672 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, 1673 cls->cookie); 1674 if (!mall_tc_entry) { 1675 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); 1676 return; 1677 } 1678 list_del(&mall_tc_entry->list); 1679 1680 switch (mall_tc_entry->type) { 1681 case MLXSW_SP_PORT_MALL_MIRROR: 1682 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, 1683 &mall_tc_entry->mirror); 1684 break; 1685 case MLXSW_SP_PORT_MALL_SAMPLE: 1686 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); 1687 break; 1688 default: 1689 WARN_ON(1); 1690 } 1691 1692 kfree(mall_tc_entry); 1693 } 1694 1695 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, 1696 u32 chain_index, __be16 proto, 1697 struct tc_to_netdev *tc) 1698 { 1699 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1700 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); 1701 1702 if (chain_index) 1703 return -EOPNOTSUPP; 1704 1705 switch (tc->type) { 1706 case TC_SETUP_MATCHALL: 1707 switch (tc->cls_mall->command) { 1708 case TC_CLSMATCHALL_REPLACE: 1709 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, 1710 proto, 1711 tc->cls_mall, 1712 ingress); 1713 case TC_CLSMATCHALL_DESTROY: 1714 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, 1715 tc->cls_mall); 1716 return 0; 1717 default: 1718 return -EOPNOTSUPP; 1719 } 1720 case TC_SETUP_CLSFLOWER: 1721 switch (tc->cls_flower->command) { 1722 case TC_CLSFLOWER_REPLACE: 1723 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, 1724 proto, tc->cls_flower); 1725 case TC_CLSFLOWER_DESTROY: 1726 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, 1727 tc->cls_flower); 1728 return 0; 1729 case TC_CLSFLOWER_STATS: 1730 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, 1731 tc->cls_flower); 1732 default: 1733 return -EOPNOTSUPP; 1734 } 1735 } 1736 1737 return -EOPNOTSUPP; 1738 } 1739 1740 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1741 .ndo_open = mlxsw_sp_port_open, 1742 .ndo_stop = mlxsw_sp_port_stop, 1743 .ndo_start_xmit = mlxsw_sp_port_xmit, 1744 .ndo_setup_tc = mlxsw_sp_setup_tc, 1745 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1746 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1747 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1748 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1749 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1750 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1751 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1752 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1753 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 1754 }; 1755 1756 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 1757 struct ethtool_drvinfo *drvinfo) 1758 { 1759 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1760 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1761 1762 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 1763 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 1764 sizeof(drvinfo->version)); 1765 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 1766 "%d.%d.%d", 1767 mlxsw_sp->bus_info->fw_rev.major, 1768 mlxsw_sp->bus_info->fw_rev.minor, 1769 mlxsw_sp->bus_info->fw_rev.subminor); 1770 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 1771 sizeof(drvinfo->bus_info)); 1772 } 1773 1774 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 1775 struct ethtool_pauseparam *pause) 1776 { 1777 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1778 1779 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 1780 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 1781 } 1782 1783 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1784 struct ethtool_pauseparam *pause) 1785 { 1786 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1787 1788 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1789 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1790 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1791 1792 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1793 pfcc_pl); 1794 } 1795 1796 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1797 struct ethtool_pauseparam *pause) 1798 { 1799 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1800 bool pause_en = pause->tx_pause || pause->rx_pause; 1801 int err; 1802 1803 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1804 netdev_err(dev, "PFC already enabled on port\n"); 1805 return -EINVAL; 1806 } 1807 1808 if (pause->autoneg) { 1809 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1810 return -EINVAL; 1811 } 1812 1813 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1814 if (err) { 1815 netdev_err(dev, "Failed to configure port's headroom\n"); 1816 return err; 1817 } 1818 1819 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1820 if (err) { 1821 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1822 goto err_port_pause_configure; 1823 } 1824 1825 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1826 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1827 1828 return 0; 1829 1830 err_port_pause_configure: 1831 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1832 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1833 return err; 1834 } 1835 1836 struct mlxsw_sp_port_hw_stats { 1837 char str[ETH_GSTRING_LEN]; 1838 u64 (*getter)(const char *payload); 1839 bool cells_bytes; 1840 }; 1841 1842 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1843 { 1844 .str = "a_frames_transmitted_ok", 1845 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1846 }, 1847 { 1848 .str = "a_frames_received_ok", 1849 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1850 }, 1851 { 1852 .str = "a_frame_check_sequence_errors", 1853 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1854 }, 1855 { 1856 .str = "a_alignment_errors", 1857 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1858 }, 1859 { 1860 .str = "a_octets_transmitted_ok", 1861 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1862 }, 1863 { 1864 .str = "a_octets_received_ok", 1865 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1866 }, 1867 { 1868 .str = "a_multicast_frames_xmitted_ok", 1869 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1870 }, 1871 { 1872 .str = "a_broadcast_frames_xmitted_ok", 1873 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1874 }, 1875 { 1876 .str = "a_multicast_frames_received_ok", 1877 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1878 }, 1879 { 1880 .str = "a_broadcast_frames_received_ok", 1881 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1882 }, 1883 { 1884 .str = "a_in_range_length_errors", 1885 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1886 }, 1887 { 1888 .str = "a_out_of_range_length_field", 1889 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1890 }, 1891 { 1892 .str = "a_frame_too_long_errors", 1893 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1894 }, 1895 { 1896 .str = "a_symbol_error_during_carrier", 1897 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1898 }, 1899 { 1900 .str = "a_mac_control_frames_transmitted", 1901 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1902 }, 1903 { 1904 .str = "a_mac_control_frames_received", 1905 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1906 }, 1907 { 1908 .str = "a_unsupported_opcodes_received", 1909 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1910 }, 1911 { 1912 .str = "a_pause_mac_ctrl_frames_received", 1913 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1914 }, 1915 { 1916 .str = "a_pause_mac_ctrl_frames_xmitted", 1917 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1918 }, 1919 }; 1920 1921 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1922 1923 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { 1924 { 1925 .str = "rx_octets_prio", 1926 .getter = mlxsw_reg_ppcnt_rx_octets_get, 1927 }, 1928 { 1929 .str = "rx_frames_prio", 1930 .getter = mlxsw_reg_ppcnt_rx_frames_get, 1931 }, 1932 { 1933 .str = "tx_octets_prio", 1934 .getter = mlxsw_reg_ppcnt_tx_octets_get, 1935 }, 1936 { 1937 .str = "tx_frames_prio", 1938 .getter = mlxsw_reg_ppcnt_tx_frames_get, 1939 }, 1940 { 1941 .str = "rx_pause_prio", 1942 .getter = mlxsw_reg_ppcnt_rx_pause_get, 1943 }, 1944 { 1945 .str = "rx_pause_duration_prio", 1946 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, 1947 }, 1948 { 1949 .str = "tx_pause_prio", 1950 .getter = mlxsw_reg_ppcnt_tx_pause_get, 1951 }, 1952 { 1953 .str = "tx_pause_duration_prio", 1954 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, 1955 }, 1956 }; 1957 1958 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) 1959 1960 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { 1961 { 1962 .str = "tc_transmit_queue_tc", 1963 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, 1964 .cells_bytes = true, 1965 }, 1966 { 1967 .str = "tc_no_buffer_discard_uc_tc", 1968 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, 1969 }, 1970 }; 1971 1972 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) 1973 1974 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ 1975 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ 1976 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ 1977 IEEE_8021QAZ_MAX_TCS) 1978 1979 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) 1980 { 1981 int i; 1982 1983 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { 1984 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1985 mlxsw_sp_port_hw_prio_stats[i].str, prio); 1986 *p += ETH_GSTRING_LEN; 1987 } 1988 } 1989 1990 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) 1991 { 1992 int i; 1993 1994 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { 1995 snprintf(*p, ETH_GSTRING_LEN, "%s_%d", 1996 mlxsw_sp_port_hw_tc_stats[i].str, tc); 1997 *p += ETH_GSTRING_LEN; 1998 } 1999 } 2000 2001 static void mlxsw_sp_port_get_strings(struct net_device *dev, 2002 u32 stringset, u8 *data) 2003 { 2004 u8 *p = data; 2005 int i; 2006 2007 switch (stringset) { 2008 case ETH_SS_STATS: 2009 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 2010 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 2011 ETH_GSTRING_LEN); 2012 p += ETH_GSTRING_LEN; 2013 } 2014 2015 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2016 mlxsw_sp_port_get_prio_strings(&p, i); 2017 2018 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 2019 mlxsw_sp_port_get_tc_strings(&p, i); 2020 2021 break; 2022 } 2023 } 2024 2025 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 2026 enum ethtool_phys_id_state state) 2027 { 2028 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2029 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2030 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 2031 bool active; 2032 2033 switch (state) { 2034 case ETHTOOL_ID_ACTIVE: 2035 active = true; 2036 break; 2037 case ETHTOOL_ID_INACTIVE: 2038 active = false; 2039 break; 2040 default: 2041 return -EOPNOTSUPP; 2042 } 2043 2044 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 2045 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 2046 } 2047 2048 static int 2049 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, 2050 int *p_len, enum mlxsw_reg_ppcnt_grp grp) 2051 { 2052 switch (grp) { 2053 case MLXSW_REG_PPCNT_IEEE_8023_CNT: 2054 *p_hw_stats = mlxsw_sp_port_hw_stats; 2055 *p_len = MLXSW_SP_PORT_HW_STATS_LEN; 2056 break; 2057 case MLXSW_REG_PPCNT_PRIO_CNT: 2058 *p_hw_stats = mlxsw_sp_port_hw_prio_stats; 2059 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2060 break; 2061 case MLXSW_REG_PPCNT_TC_CNT: 2062 *p_hw_stats = mlxsw_sp_port_hw_tc_stats; 2063 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; 2064 break; 2065 default: 2066 WARN_ON(1); 2067 return -EOPNOTSUPP; 2068 } 2069 return 0; 2070 } 2071 2072 static void __mlxsw_sp_port_get_stats(struct net_device *dev, 2073 enum mlxsw_reg_ppcnt_grp grp, int prio, 2074 u64 *data, int data_index) 2075 { 2076 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2077 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2078 struct mlxsw_sp_port_hw_stats *hw_stats; 2079 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 2080 int i, len; 2081 int err; 2082 2083 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); 2084 if (err) 2085 return; 2086 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); 2087 for (i = 0; i < len; i++) { 2088 data[data_index + i] = hw_stats[i].getter(ppcnt_pl); 2089 if (!hw_stats[i].cells_bytes) 2090 continue; 2091 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, 2092 data[data_index + i]); 2093 } 2094 } 2095 2096 static void mlxsw_sp_port_get_stats(struct net_device *dev, 2097 struct ethtool_stats *stats, u64 *data) 2098 { 2099 int i, data_index = 0; 2100 2101 /* IEEE 802.3 Counters */ 2102 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, 2103 data, data_index); 2104 data_index = MLXSW_SP_PORT_HW_STATS_LEN; 2105 2106 /* Per-Priority Counters */ 2107 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2108 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, 2109 data, data_index); 2110 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; 2111 } 2112 2113 /* Per-TC Counters */ 2114 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2115 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, 2116 data, data_index); 2117 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; 2118 } 2119 } 2120 2121 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 2122 { 2123 switch (sset) { 2124 case ETH_SS_STATS: 2125 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; 2126 default: 2127 return -EOPNOTSUPP; 2128 } 2129 } 2130 2131 struct mlxsw_sp_port_link_mode { 2132 enum ethtool_link_mode_bit_indices mask_ethtool; 2133 u32 mask; 2134 u32 speed; 2135 }; 2136 2137 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 2138 { 2139 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 2140 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2141 .speed = SPEED_100, 2142 }, 2143 { 2144 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 2145 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 2146 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2147 .speed = SPEED_1000, 2148 }, 2149 { 2150 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 2151 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2152 .speed = SPEED_10000, 2153 }, 2154 { 2155 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 2156 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 2157 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2158 .speed = SPEED_10000, 2159 }, 2160 { 2161 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2162 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2163 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2164 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 2165 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2166 .speed = SPEED_10000, 2167 }, 2168 { 2169 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 2170 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 2171 .speed = SPEED_20000, 2172 }, 2173 { 2174 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 2175 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2176 .speed = SPEED_40000, 2177 }, 2178 { 2179 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 2180 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2181 .speed = SPEED_40000, 2182 }, 2183 { 2184 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 2185 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2186 .speed = SPEED_40000, 2187 }, 2188 { 2189 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 2190 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2191 .speed = SPEED_40000, 2192 }, 2193 { 2194 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, 2195 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2196 .speed = SPEED_25000, 2197 }, 2198 { 2199 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, 2200 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2201 .speed = SPEED_25000, 2202 }, 2203 { 2204 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2205 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2206 .speed = SPEED_25000, 2207 }, 2208 { 2209 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 2210 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2211 .speed = SPEED_25000, 2212 }, 2213 { 2214 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, 2215 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2216 .speed = SPEED_50000, 2217 }, 2218 { 2219 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 2220 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2221 .speed = SPEED_50000, 2222 }, 2223 { 2224 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, 2225 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2226 .speed = SPEED_50000, 2227 }, 2228 { 2229 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2230 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 2231 .speed = SPEED_56000, 2232 }, 2233 { 2234 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2235 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 2236 .speed = SPEED_56000, 2237 }, 2238 { 2239 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2240 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 2241 .speed = SPEED_56000, 2242 }, 2243 { 2244 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 2245 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 2246 .speed = SPEED_56000, 2247 }, 2248 { 2249 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, 2250 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2251 .speed = SPEED_100000, 2252 }, 2253 { 2254 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, 2255 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2256 .speed = SPEED_100000, 2257 }, 2258 { 2259 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, 2260 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2261 .speed = SPEED_100000, 2262 }, 2263 { 2264 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 2265 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2266 .speed = SPEED_100000, 2267 }, 2268 }; 2269 2270 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 2271 2272 static void 2273 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, 2274 struct ethtool_link_ksettings *cmd) 2275 { 2276 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2277 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2278 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2279 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2280 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2281 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2282 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 2283 2284 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2285 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2286 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2287 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 2288 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 2289 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); 2290 } 2291 2292 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) 2293 { 2294 int i; 2295 2296 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2297 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 2298 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2299 mode); 2300 } 2301 } 2302 2303 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 2304 struct ethtool_link_ksettings *cmd) 2305 { 2306 u32 speed = SPEED_UNKNOWN; 2307 u8 duplex = DUPLEX_UNKNOWN; 2308 int i; 2309 2310 if (!carrier_ok) 2311 goto out; 2312 2313 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2314 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 2315 speed = mlxsw_sp_port_link_mode[i].speed; 2316 duplex = DUPLEX_FULL; 2317 break; 2318 } 2319 } 2320 out: 2321 cmd->base.speed = speed; 2322 cmd->base.duplex = duplex; 2323 } 2324 2325 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 2326 { 2327 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 2328 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 2329 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 2330 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 2331 return PORT_FIBRE; 2332 2333 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 2334 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 2335 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 2336 return PORT_DA; 2337 2338 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 2339 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 2340 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 2341 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 2342 return PORT_NONE; 2343 2344 return PORT_OTHER; 2345 } 2346 2347 static u32 2348 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) 2349 { 2350 u32 ptys_proto = 0; 2351 int i; 2352 2353 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2354 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, 2355 cmd->link_modes.advertising)) 2356 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2357 } 2358 return ptys_proto; 2359 } 2360 2361 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 2362 { 2363 u32 ptys_proto = 0; 2364 int i; 2365 2366 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2367 if (speed == mlxsw_sp_port_link_mode[i].speed) 2368 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2369 } 2370 return ptys_proto; 2371 } 2372 2373 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 2374 { 2375 u32 ptys_proto = 0; 2376 int i; 2377 2378 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 2379 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 2380 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 2381 } 2382 return ptys_proto; 2383 } 2384 2385 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, 2386 struct ethtool_link_ksettings *cmd) 2387 { 2388 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); 2389 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); 2390 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 2391 2392 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); 2393 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); 2394 } 2395 2396 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, 2397 struct ethtool_link_ksettings *cmd) 2398 { 2399 if (!autoneg) 2400 return; 2401 2402 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); 2403 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); 2404 } 2405 2406 static void 2407 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, 2408 struct ethtool_link_ksettings *cmd) 2409 { 2410 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) 2411 return; 2412 2413 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); 2414 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); 2415 } 2416 2417 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, 2418 struct ethtool_link_ksettings *cmd) 2419 { 2420 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; 2421 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2422 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2423 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2424 u8 autoneg_status; 2425 bool autoneg; 2426 int err; 2427 2428 autoneg = mlxsw_sp_port->link.autoneg; 2429 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2430 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2431 if (err) 2432 return err; 2433 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 2434 ð_proto_oper); 2435 2436 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); 2437 2438 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); 2439 2440 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); 2441 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); 2442 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); 2443 2444 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 2445 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); 2446 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, 2447 cmd); 2448 2449 return 0; 2450 } 2451 2452 static int 2453 mlxsw_sp_port_set_link_ksettings(struct net_device *dev, 2454 const struct ethtool_link_ksettings *cmd) 2455 { 2456 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2458 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2459 u32 eth_proto_cap, eth_proto_new; 2460 bool autoneg; 2461 int err; 2462 2463 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 2464 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2465 if (err) 2466 return err; 2467 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); 2468 2469 autoneg = cmd->base.autoneg == AUTONEG_ENABLE; 2470 eth_proto_new = autoneg ? 2471 mlxsw_sp_to_ptys_advert_link(cmd) : 2472 mlxsw_sp_to_ptys_speed(cmd->base.speed); 2473 2474 eth_proto_new = eth_proto_new & eth_proto_cap; 2475 if (!eth_proto_new) { 2476 netdev_err(dev, "No supported speed requested\n"); 2477 return -EINVAL; 2478 } 2479 2480 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2481 eth_proto_new); 2482 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2483 if (err) 2484 return err; 2485 2486 if (!netif_running(dev)) 2487 return 0; 2488 2489 mlxsw_sp_port->link.autoneg = autoneg; 2490 2491 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2492 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 2493 2494 return 0; 2495 } 2496 2497 static int mlxsw_sp_flash_device(struct net_device *dev, 2498 struct ethtool_flash *flash) 2499 { 2500 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2501 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2502 const struct firmware *firmware; 2503 int err; 2504 2505 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) 2506 return -EOPNOTSUPP; 2507 2508 dev_hold(dev); 2509 rtnl_unlock(); 2510 2511 err = request_firmware_direct(&firmware, flash->data, &dev->dev); 2512 if (err) 2513 goto out; 2514 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); 2515 release_firmware(firmware); 2516 out: 2517 rtnl_lock(); 2518 dev_put(dev); 2519 return err; 2520 } 2521 2522 #define MLXSW_SP_QSFP_I2C_ADDR 0x50 2523 2524 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, 2525 u16 offset, u16 size, void *data, 2526 unsigned int *p_read_size) 2527 { 2528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2529 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; 2530 char mcia_pl[MLXSW_REG_MCIA_LEN]; 2531 int status; 2532 int err; 2533 2534 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); 2535 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, 2536 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); 2537 2538 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); 2539 if (err) 2540 return err; 2541 2542 status = mlxsw_reg_mcia_status_get(mcia_pl); 2543 if (status) 2544 return -EIO; 2545 2546 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); 2547 memcpy(data, eeprom_tmp, size); 2548 *p_read_size = size; 2549 2550 return 0; 2551 } 2552 2553 enum mlxsw_sp_eeprom_module_info_rev_id { 2554 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, 2555 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, 2556 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, 2557 }; 2558 2559 enum mlxsw_sp_eeprom_module_info_id { 2560 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, 2561 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, 2562 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, 2563 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, 2564 }; 2565 2566 enum mlxsw_sp_eeprom_module_info { 2567 MLXSW_SP_EEPROM_MODULE_INFO_ID, 2568 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, 2569 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2570 }; 2571 2572 static int mlxsw_sp_get_module_info(struct net_device *netdev, 2573 struct ethtool_modinfo *modinfo) 2574 { 2575 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2576 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; 2577 u8 module_rev_id, module_id; 2578 unsigned int read_size; 2579 int err; 2580 2581 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, 2582 MLXSW_SP_EEPROM_MODULE_INFO_SIZE, 2583 module_info, &read_size); 2584 if (err) 2585 return err; 2586 2587 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) 2588 return -EIO; 2589 2590 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; 2591 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; 2592 2593 switch (module_id) { 2594 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: 2595 modinfo->type = ETH_MODULE_SFF_8436; 2596 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2597 break; 2598 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: 2599 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: 2600 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || 2601 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { 2602 modinfo->type = ETH_MODULE_SFF_8636; 2603 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2604 } else { 2605 modinfo->type = ETH_MODULE_SFF_8436; 2606 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2607 } 2608 break; 2609 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: 2610 modinfo->type = ETH_MODULE_SFF_8472; 2611 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2612 break; 2613 default: 2614 return -EINVAL; 2615 } 2616 2617 return 0; 2618 } 2619 2620 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, 2621 struct ethtool_eeprom *ee, 2622 u8 *data) 2623 { 2624 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); 2625 int offset = ee->offset; 2626 unsigned int read_size; 2627 int i = 0; 2628 int err; 2629 2630 if (!ee->len) 2631 return -EINVAL; 2632 2633 memset(data, 0, ee->len); 2634 2635 while (i < ee->len) { 2636 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, 2637 ee->len - i, data + i, 2638 &read_size); 2639 if (err) { 2640 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); 2641 return err; 2642 } 2643 2644 i += read_size; 2645 offset += read_size; 2646 } 2647 2648 return 0; 2649 } 2650 2651 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 2652 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 2653 .get_link = ethtool_op_get_link, 2654 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 2655 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 2656 .get_strings = mlxsw_sp_port_get_strings, 2657 .set_phys_id = mlxsw_sp_port_set_phys_id, 2658 .get_ethtool_stats = mlxsw_sp_port_get_stats, 2659 .get_sset_count = mlxsw_sp_port_get_sset_count, 2660 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, 2661 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, 2662 .flash_device = mlxsw_sp_flash_device, 2663 .get_module_info = mlxsw_sp_get_module_info, 2664 .get_module_eeprom = mlxsw_sp_get_module_eeprom, 2665 }; 2666 2667 static int 2668 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 2669 { 2670 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2671 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 2672 char ptys_pl[MLXSW_REG_PTYS_LEN]; 2673 u32 eth_proto_admin; 2674 2675 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 2676 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 2677 eth_proto_admin); 2678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 2679 } 2680 2681 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 2682 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 2683 bool dwrr, u8 dwrr_weight) 2684 { 2685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2686 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2687 2688 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2689 next_index); 2690 mlxsw_reg_qeec_de_set(qeec_pl, true); 2691 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 2692 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 2693 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2694 } 2695 2696 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 2697 enum mlxsw_reg_qeec_hr hr, u8 index, 2698 u8 next_index, u32 maxrate) 2699 { 2700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2701 char qeec_pl[MLXSW_REG_QEEC_LEN]; 2702 2703 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 2704 next_index); 2705 mlxsw_reg_qeec_mase_set(qeec_pl, true); 2706 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 2707 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 2708 } 2709 2710 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 2711 u8 switch_prio, u8 tclass) 2712 { 2713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2714 char qtct_pl[MLXSW_REG_QTCT_LEN]; 2715 2716 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 2717 tclass); 2718 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 2719 } 2720 2721 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 2722 { 2723 int err, i; 2724 2725 /* Setup the elements hierarcy, so that each TC is linked to 2726 * one subgroup, which are all member in the same group. 2727 */ 2728 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2729 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 2730 0); 2731 if (err) 2732 return err; 2733 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2734 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2735 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 2736 0, false, 0); 2737 if (err) 2738 return err; 2739 } 2740 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2741 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 2742 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 2743 false, 0); 2744 if (err) 2745 return err; 2746 } 2747 2748 /* Make sure the max shaper is disabled in all hierarcies that 2749 * support it. 2750 */ 2751 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2752 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 2753 MLXSW_REG_QEEC_MAS_DIS); 2754 if (err) 2755 return err; 2756 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2757 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2758 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 2759 i, 0, 2760 MLXSW_REG_QEEC_MAS_DIS); 2761 if (err) 2762 return err; 2763 } 2764 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2765 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 2766 MLXSW_REG_QEEC_HIERARCY_TC, 2767 i, i, 2768 MLXSW_REG_QEEC_MAS_DIS); 2769 if (err) 2770 return err; 2771 } 2772 2773 /* Map all priorities to traffic class 0. */ 2774 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 2775 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 2776 if (err) 2777 return err; 2778 } 2779 2780 return 0; 2781 } 2782 2783 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2784 bool split, u8 module, u8 width, u8 lane) 2785 { 2786 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 2787 struct mlxsw_sp_port *mlxsw_sp_port; 2788 struct net_device *dev; 2789 int err; 2790 2791 err = mlxsw_core_port_init(mlxsw_sp->core, local_port); 2792 if (err) { 2793 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 2794 local_port); 2795 return err; 2796 } 2797 2798 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 2799 if (!dev) { 2800 err = -ENOMEM; 2801 goto err_alloc_etherdev; 2802 } 2803 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 2804 mlxsw_sp_port = netdev_priv(dev); 2805 mlxsw_sp_port->dev = dev; 2806 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 2807 mlxsw_sp_port->local_port = local_port; 2808 mlxsw_sp_port->pvid = 1; 2809 mlxsw_sp_port->split = split; 2810 mlxsw_sp_port->mapping.module = module; 2811 mlxsw_sp_port->mapping.width = width; 2812 mlxsw_sp_port->mapping.lane = lane; 2813 mlxsw_sp_port->link.autoneg = 1; 2814 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 2815 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); 2816 2817 mlxsw_sp_port->pcpu_stats = 2818 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 2819 if (!mlxsw_sp_port->pcpu_stats) { 2820 err = -ENOMEM; 2821 goto err_alloc_stats; 2822 } 2823 2824 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), 2825 GFP_KERNEL); 2826 if (!mlxsw_sp_port->sample) { 2827 err = -ENOMEM; 2828 goto err_alloc_sample; 2829 } 2830 2831 mlxsw_sp_port->hw_stats.cache = 2832 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); 2833 2834 if (!mlxsw_sp_port->hw_stats.cache) { 2835 err = -ENOMEM; 2836 goto err_alloc_hw_stats; 2837 } 2838 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, 2839 &update_stats_cache); 2840 2841 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2842 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2843 2844 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane); 2845 if (err) { 2846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 2847 mlxsw_sp_port->local_port); 2848 goto err_port_module_map; 2849 } 2850 2851 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 2852 if (err) { 2853 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 2854 mlxsw_sp_port->local_port); 2855 goto err_port_swid_set; 2856 } 2857 2858 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2859 if (err) { 2860 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2861 mlxsw_sp_port->local_port); 2862 goto err_dev_addr_init; 2863 } 2864 2865 netif_carrier_off(dev); 2866 2867 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 2868 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 2869 dev->hw_features |= NETIF_F_HW_TC; 2870 2871 dev->min_mtu = 0; 2872 dev->max_mtu = ETH_MAX_MTU; 2873 2874 /* Each packet needs to have a Tx header (metadata) on top all other 2875 * headers. 2876 */ 2877 dev->needed_headroom = MLXSW_TXHDR_LEN; 2878 2879 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 2880 if (err) { 2881 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 2882 mlxsw_sp_port->local_port); 2883 goto err_port_system_port_mapping_set; 2884 } 2885 2886 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2887 if (err) { 2888 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2889 mlxsw_sp_port->local_port); 2890 goto err_port_speed_by_width_set; 2891 } 2892 2893 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 2894 if (err) { 2895 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 2896 mlxsw_sp_port->local_port); 2897 goto err_port_mtu_set; 2898 } 2899 2900 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 2901 if (err) 2902 goto err_port_admin_status_set; 2903 2904 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 2905 if (err) { 2906 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 2907 mlxsw_sp_port->local_port); 2908 goto err_port_buffers_init; 2909 } 2910 2911 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 2912 if (err) { 2913 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 2914 mlxsw_sp_port->local_port); 2915 goto err_port_ets_init; 2916 } 2917 2918 /* ETS and buffers must be initialized before DCB. */ 2919 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 2920 if (err) { 2921 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 2922 mlxsw_sp_port->local_port); 2923 goto err_port_dcb_init; 2924 } 2925 2926 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 2927 if (err) { 2928 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 2929 mlxsw_sp_port->local_port); 2930 goto err_port_fids_init; 2931 } 2932 2933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 2934 if (IS_ERR(mlxsw_sp_port_vlan)) { 2935 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 2936 mlxsw_sp_port->local_port); 2937 goto err_port_vlan_get; 2938 } 2939 2940 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2941 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 2942 err = register_netdev(dev); 2943 if (err) { 2944 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2945 mlxsw_sp_port->local_port); 2946 goto err_register_netdev; 2947 } 2948 2949 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 2950 mlxsw_sp_port, dev, mlxsw_sp_port->split, 2951 module); 2952 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); 2953 return 0; 2954 2955 err_register_netdev: 2956 mlxsw_sp->ports[local_port] = NULL; 2957 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2958 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); 2959 err_port_vlan_get: 2960 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 2961 err_port_fids_init: 2962 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2963 err_port_dcb_init: 2964 err_port_ets_init: 2965 err_port_buffers_init: 2966 err_port_admin_status_set: 2967 err_port_mtu_set: 2968 err_port_speed_by_width_set: 2969 err_port_system_port_mapping_set: 2970 err_dev_addr_init: 2971 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2972 err_port_swid_set: 2973 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 2974 err_port_module_map: 2975 kfree(mlxsw_sp_port->hw_stats.cache); 2976 err_alloc_hw_stats: 2977 kfree(mlxsw_sp_port->sample); 2978 err_alloc_sample: 2979 free_percpu(mlxsw_sp_port->pcpu_stats); 2980 err_alloc_stats: 2981 free_netdev(dev); 2982 err_alloc_etherdev: 2983 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 2984 return err; 2985 } 2986 2987 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2988 { 2989 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2990 2991 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); 2992 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 2993 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2994 mlxsw_sp->ports[local_port] = NULL; 2995 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2996 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 2997 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 2998 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2999 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 3000 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 3001 kfree(mlxsw_sp_port->hw_stats.cache); 3002 kfree(mlxsw_sp_port->sample); 3003 free_percpu(mlxsw_sp_port->pcpu_stats); 3004 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 3005 free_netdev(mlxsw_sp_port->dev); 3006 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 3007 } 3008 3009 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 3010 { 3011 return mlxsw_sp->ports[local_port] != NULL; 3012 } 3013 3014 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 3015 { 3016 int i; 3017 3018 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 3019 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3020 mlxsw_sp_port_remove(mlxsw_sp, i); 3021 kfree(mlxsw_sp->port_to_module); 3022 kfree(mlxsw_sp->ports); 3023 } 3024 3025 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 3026 { 3027 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 3028 u8 module, width, lane; 3029 size_t alloc_size; 3030 int i; 3031 int err; 3032 3033 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 3034 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 3035 if (!mlxsw_sp->ports) 3036 return -ENOMEM; 3037 3038 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); 3039 if (!mlxsw_sp->port_to_module) { 3040 err = -ENOMEM; 3041 goto err_port_to_module_alloc; 3042 } 3043 3044 for (i = 1; i < max_ports; i++) { 3045 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 3046 &width, &lane); 3047 if (err) 3048 goto err_port_module_info_get; 3049 if (!width) 3050 continue; 3051 mlxsw_sp->port_to_module[i] = module; 3052 err = mlxsw_sp_port_create(mlxsw_sp, i, false, 3053 module, width, lane); 3054 if (err) 3055 goto err_port_create; 3056 } 3057 return 0; 3058 3059 err_port_create: 3060 err_port_module_info_get: 3061 for (i--; i >= 1; i--) 3062 if (mlxsw_sp_port_created(mlxsw_sp, i)) 3063 mlxsw_sp_port_remove(mlxsw_sp, i); 3064 kfree(mlxsw_sp->port_to_module); 3065 err_port_to_module_alloc: 3066 kfree(mlxsw_sp->ports); 3067 return err; 3068 } 3069 3070 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 3071 { 3072 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 3073 3074 return local_port - offset; 3075 } 3076 3077 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 3078 u8 module, unsigned int count) 3079 { 3080 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 3081 int err, i; 3082 3083 for (i = 0; i < count; i++) { 3084 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 3085 module, width, i * width); 3086 if (err) 3087 goto err_port_create; 3088 } 3089 3090 return 0; 3091 3092 err_port_create: 3093 for (i--; i >= 0; i--) 3094 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3095 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3096 return err; 3097 } 3098 3099 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 3100 u8 base_port, unsigned int count) 3101 { 3102 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 3103 int i; 3104 3105 /* Split by four means we need to re-create two ports, otherwise 3106 * only one. 3107 */ 3108 count = count / 2; 3109 3110 for (i = 0; i < count; i++) { 3111 local_port = base_port + i * 2; 3112 module = mlxsw_sp->port_to_module[local_port]; 3113 3114 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 3115 width, 0); 3116 } 3117 } 3118 3119 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 3120 unsigned int count) 3121 { 3122 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3123 struct mlxsw_sp_port *mlxsw_sp_port; 3124 u8 module, cur_width, base_port; 3125 int i; 3126 int err; 3127 3128 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3129 if (!mlxsw_sp_port) { 3130 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3131 local_port); 3132 return -EINVAL; 3133 } 3134 3135 module = mlxsw_sp_port->mapping.module; 3136 cur_width = mlxsw_sp_port->mapping.width; 3137 3138 if (count != 2 && count != 4) { 3139 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 3140 return -EINVAL; 3141 } 3142 3143 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 3144 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 3145 return -EINVAL; 3146 } 3147 3148 /* Make sure we have enough slave (even) ports for the split. */ 3149 if (count == 2) { 3150 base_port = local_port; 3151 if (mlxsw_sp->ports[base_port + 1]) { 3152 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3153 return -EINVAL; 3154 } 3155 } else { 3156 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3157 if (mlxsw_sp->ports[base_port + 1] || 3158 mlxsw_sp->ports[base_port + 3]) { 3159 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 3160 return -EINVAL; 3161 } 3162 } 3163 3164 for (i = 0; i < count; i++) 3165 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3166 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3167 3168 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 3169 if (err) { 3170 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 3171 goto err_port_split_create; 3172 } 3173 3174 return 0; 3175 3176 err_port_split_create: 3177 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3178 return err; 3179 } 3180 3181 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 3182 { 3183 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3184 struct mlxsw_sp_port *mlxsw_sp_port; 3185 u8 cur_width, base_port; 3186 unsigned int count; 3187 int i; 3188 3189 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3190 if (!mlxsw_sp_port) { 3191 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 3192 local_port); 3193 return -EINVAL; 3194 } 3195 3196 if (!mlxsw_sp_port->split) { 3197 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 3198 return -EINVAL; 3199 } 3200 3201 cur_width = mlxsw_sp_port->mapping.width; 3202 count = cur_width == 1 ? 4 : 2; 3203 3204 base_port = mlxsw_sp_cluster_base_port_get(local_port); 3205 3206 /* Determine which ports to remove. */ 3207 if (count == 2 && local_port >= base_port + 2) 3208 base_port = base_port + 2; 3209 3210 for (i = 0; i < count; i++) 3211 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) 3212 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 3213 3214 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 3215 3216 return 0; 3217 } 3218 3219 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 3220 char *pude_pl, void *priv) 3221 { 3222 struct mlxsw_sp *mlxsw_sp = priv; 3223 struct mlxsw_sp_port *mlxsw_sp_port; 3224 enum mlxsw_reg_pude_oper_status status; 3225 u8 local_port; 3226 3227 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 3228 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3229 if (!mlxsw_sp_port) 3230 return; 3231 3232 status = mlxsw_reg_pude_oper_status_get(pude_pl); 3233 if (status == MLXSW_PORT_OPER_STATUS_UP) { 3234 netdev_info(mlxsw_sp_port->dev, "link up\n"); 3235 netif_carrier_on(mlxsw_sp_port->dev); 3236 } else { 3237 netdev_info(mlxsw_sp_port->dev, "link down\n"); 3238 netif_carrier_off(mlxsw_sp_port->dev); 3239 } 3240 } 3241 3242 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 3243 u8 local_port, void *priv) 3244 { 3245 struct mlxsw_sp *mlxsw_sp = priv; 3246 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3247 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 3248 3249 if (unlikely(!mlxsw_sp_port)) { 3250 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 3251 local_port); 3252 return; 3253 } 3254 3255 skb->dev = mlxsw_sp_port->dev; 3256 3257 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 3258 u64_stats_update_begin(&pcpu_stats->syncp); 3259 pcpu_stats->rx_packets++; 3260 pcpu_stats->rx_bytes += skb->len; 3261 u64_stats_update_end(&pcpu_stats->syncp); 3262 3263 skb->protocol = eth_type_trans(skb, skb->dev); 3264 netif_receive_skb(skb); 3265 } 3266 3267 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 3268 void *priv) 3269 { 3270 skb->offload_fwd_mark = 1; 3271 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 3272 } 3273 3274 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, 3275 void *priv) 3276 { 3277 struct mlxsw_sp *mlxsw_sp = priv; 3278 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 3279 struct psample_group *psample_group; 3280 u32 size; 3281 3282 if (unlikely(!mlxsw_sp_port)) { 3283 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", 3284 local_port); 3285 goto out; 3286 } 3287 if (unlikely(!mlxsw_sp_port->sample)) { 3288 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", 3289 local_port); 3290 goto out; 3291 } 3292 3293 size = mlxsw_sp_port->sample->truncate ? 3294 mlxsw_sp_port->sample->trunc_size : skb->len; 3295 3296 rcu_read_lock(); 3297 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); 3298 if (!psample_group) 3299 goto out_unlock; 3300 psample_sample_packet(psample_group, skb, size, 3301 mlxsw_sp_port->dev->ifindex, 0, 3302 mlxsw_sp_port->sample->rate); 3303 out_unlock: 3304 rcu_read_unlock(); 3305 out: 3306 consume_skb(skb); 3307 } 3308 3309 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3310 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 3311 _is_ctrl, SP_##_trap_group, DISCARD) 3312 3313 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 3314 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 3315 _is_ctrl, SP_##_trap_group, DISCARD) 3316 3317 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 3318 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 3319 3320 static const struct mlxsw_listener mlxsw_sp_listener[] = { 3321 /* Events */ 3322 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 3323 /* L2 traps */ 3324 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), 3325 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), 3326 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), 3327 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), 3328 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), 3329 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), 3330 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), 3331 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), 3332 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), 3333 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), 3334 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), 3335 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), 3336 /* L3 traps */ 3337 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3338 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3339 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), 3340 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), 3341 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), 3342 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), 3343 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), 3344 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), 3345 /* PKT Sample trap */ 3346 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 3347 false, SP_IP2ME, DISCARD), 3348 /* ACL trap */ 3349 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), 3350 }; 3351 3352 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3353 { 3354 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 3355 enum mlxsw_reg_qpcr_ir_units ir_units; 3356 int max_cpu_policers; 3357 bool is_bytes; 3358 u8 burst_size; 3359 u32 rate; 3360 int i, err; 3361 3362 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 3363 return -EIO; 3364 3365 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3366 3367 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 3368 for (i = 0; i < max_cpu_policers; i++) { 3369 is_bytes = false; 3370 switch (i) { 3371 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3372 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3373 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3374 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3375 rate = 128; 3376 burst_size = 7; 3377 break; 3378 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3379 rate = 16 * 1024; 3380 burst_size = 10; 3381 break; 3382 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 3383 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3384 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3385 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 3386 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3387 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3388 rate = 1024; 3389 burst_size = 7; 3390 break; 3391 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3392 is_bytes = true; 3393 rate = 4 * 1024; 3394 burst_size = 4; 3395 break; 3396 default: 3397 continue; 3398 } 3399 3400 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 3401 burst_size); 3402 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 3403 if (err) 3404 return err; 3405 } 3406 3407 return 0; 3408 } 3409 3410 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 3411 { 3412 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3413 enum mlxsw_reg_htgt_trap_group i; 3414 int max_cpu_policers; 3415 int max_trap_groups; 3416 u8 priority, tc; 3417 u16 policer_id; 3418 int err; 3419 3420 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 3421 return -EIO; 3422 3423 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 3424 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 3425 3426 for (i = 0; i < max_trap_groups; i++) { 3427 policer_id = i; 3428 switch (i) { 3429 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: 3430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: 3431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: 3432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 3433 priority = 5; 3434 tc = 5; 3435 break; 3436 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: 3437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: 3438 priority = 4; 3439 tc = 4; 3440 break; 3441 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: 3442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: 3443 priority = 3; 3444 tc = 3; 3445 break; 3446 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: 3447 priority = 2; 3448 tc = 2; 3449 break; 3450 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: 3451 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 3452 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: 3453 priority = 1; 3454 tc = 1; 3455 break; 3456 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 3457 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 3458 tc = MLXSW_REG_HTGT_DEFAULT_TC; 3459 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 3460 break; 3461 default: 3462 continue; 3463 } 3464 3465 if (max_cpu_policers <= policer_id && 3466 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 3467 return -EIO; 3468 3469 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 3470 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3471 if (err) 3472 return err; 3473 } 3474 3475 return 0; 3476 } 3477 3478 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 3479 { 3480 int i; 3481 int err; 3482 3483 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 3484 if (err) 3485 return err; 3486 3487 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 3488 if (err) 3489 return err; 3490 3491 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3492 err = mlxsw_core_trap_register(mlxsw_sp->core, 3493 &mlxsw_sp_listener[i], 3494 mlxsw_sp); 3495 if (err) 3496 goto err_listener_register; 3497 3498 } 3499 return 0; 3500 3501 err_listener_register: 3502 for (i--; i >= 0; i--) { 3503 mlxsw_core_trap_unregister(mlxsw_sp->core, 3504 &mlxsw_sp_listener[i], 3505 mlxsw_sp); 3506 } 3507 return err; 3508 } 3509 3510 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 3511 { 3512 int i; 3513 3514 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { 3515 mlxsw_core_trap_unregister(mlxsw_sp->core, 3516 &mlxsw_sp_listener[i], 3517 mlxsw_sp); 3518 } 3519 } 3520 3521 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 3522 { 3523 char slcr_pl[MLXSW_REG_SLCR_LEN]; 3524 int err; 3525 3526 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 3527 MLXSW_REG_SLCR_LAG_HASH_DMAC | 3528 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 3529 MLXSW_REG_SLCR_LAG_HASH_VLANID | 3530 MLXSW_REG_SLCR_LAG_HASH_SIP | 3531 MLXSW_REG_SLCR_LAG_HASH_DIP | 3532 MLXSW_REG_SLCR_LAG_HASH_SPORT | 3533 MLXSW_REG_SLCR_LAG_HASH_DPORT | 3534 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 3535 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 3536 if (err) 3537 return err; 3538 3539 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 3540 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 3541 return -EIO; 3542 3543 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 3544 sizeof(struct mlxsw_sp_upper), 3545 GFP_KERNEL); 3546 if (!mlxsw_sp->lags) 3547 return -ENOMEM; 3548 3549 return 0; 3550 } 3551 3552 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 3553 { 3554 kfree(mlxsw_sp->lags); 3555 } 3556 3557 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 3558 { 3559 char htgt_pl[MLXSW_REG_HTGT_LEN]; 3560 3561 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 3562 MLXSW_REG_HTGT_INVALID_POLICER, 3563 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 3564 MLXSW_REG_HTGT_DEFAULT_TC); 3565 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 3566 } 3567 3568 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3569 const struct mlxsw_bus_info *mlxsw_bus_info) 3570 { 3571 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3572 int err; 3573 3574 mlxsw_sp->core = mlxsw_core; 3575 mlxsw_sp->bus_info = mlxsw_bus_info; 3576 3577 err = mlxsw_sp_fw_rev_validate(mlxsw_sp); 3578 if (err) { 3579 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); 3580 return err; 3581 } 3582 3583 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3584 if (err) { 3585 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3586 return err; 3587 } 3588 3589 err = mlxsw_sp_fids_init(mlxsw_sp); 3590 if (err) { 3591 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3592 return err; 3593 } 3594 3595 err = mlxsw_sp_traps_init(mlxsw_sp); 3596 if (err) { 3597 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3598 goto err_traps_init; 3599 } 3600 3601 err = mlxsw_sp_buffers_init(mlxsw_sp); 3602 if (err) { 3603 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3604 goto err_buffers_init; 3605 } 3606 3607 err = mlxsw_sp_lag_init(mlxsw_sp); 3608 if (err) { 3609 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3610 goto err_lag_init; 3611 } 3612 3613 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3614 if (err) { 3615 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3616 goto err_switchdev_init; 3617 } 3618 3619 err = mlxsw_sp_router_init(mlxsw_sp); 3620 if (err) { 3621 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3622 goto err_router_init; 3623 } 3624 3625 err = mlxsw_sp_span_init(mlxsw_sp); 3626 if (err) { 3627 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3628 goto err_span_init; 3629 } 3630 3631 err = mlxsw_sp_acl_init(mlxsw_sp); 3632 if (err) { 3633 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3634 goto err_acl_init; 3635 } 3636 3637 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3638 if (err) { 3639 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3640 goto err_counter_pool_init; 3641 } 3642 3643 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3644 if (err) { 3645 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3646 goto err_dpipe_init; 3647 } 3648 3649 err = mlxsw_sp_ports_create(mlxsw_sp); 3650 if (err) { 3651 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3652 goto err_ports_create; 3653 } 3654 3655 return 0; 3656 3657 err_ports_create: 3658 mlxsw_sp_dpipe_fini(mlxsw_sp); 3659 err_dpipe_init: 3660 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3661 err_counter_pool_init: 3662 mlxsw_sp_acl_fini(mlxsw_sp); 3663 err_acl_init: 3664 mlxsw_sp_span_fini(mlxsw_sp); 3665 err_span_init: 3666 mlxsw_sp_router_fini(mlxsw_sp); 3667 err_router_init: 3668 mlxsw_sp_switchdev_fini(mlxsw_sp); 3669 err_switchdev_init: 3670 mlxsw_sp_lag_fini(mlxsw_sp); 3671 err_lag_init: 3672 mlxsw_sp_buffers_fini(mlxsw_sp); 3673 err_buffers_init: 3674 mlxsw_sp_traps_fini(mlxsw_sp); 3675 err_traps_init: 3676 mlxsw_sp_fids_fini(mlxsw_sp); 3677 return err; 3678 } 3679 3680 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3681 { 3682 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3683 3684 mlxsw_sp_ports_remove(mlxsw_sp); 3685 mlxsw_sp_dpipe_fini(mlxsw_sp); 3686 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3687 mlxsw_sp_acl_fini(mlxsw_sp); 3688 mlxsw_sp_span_fini(mlxsw_sp); 3689 mlxsw_sp_router_fini(mlxsw_sp); 3690 mlxsw_sp_switchdev_fini(mlxsw_sp); 3691 mlxsw_sp_lag_fini(mlxsw_sp); 3692 mlxsw_sp_buffers_fini(mlxsw_sp); 3693 mlxsw_sp_traps_fini(mlxsw_sp); 3694 mlxsw_sp_fids_fini(mlxsw_sp); 3695 } 3696 3697 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 3698 .used_max_vepa_channels = 1, 3699 .max_vepa_channels = 0, 3700 .used_max_mid = 1, 3701 .max_mid = MLXSW_SP_MID_MAX, 3702 .used_max_pgt = 1, 3703 .max_pgt = 0, 3704 .used_flood_tables = 1, 3705 .used_flood_mode = 1, 3706 .flood_mode = 3, 3707 .max_fid_offset_flood_tables = 3, 3708 .fid_offset_flood_table_size = VLAN_N_VID - 1, 3709 .max_fid_flood_tables = 3, 3710 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, 3711 .used_max_ib_mc = 1, 3712 .max_ib_mc = 0, 3713 .used_max_pkey = 1, 3714 .max_pkey = 0, 3715 .used_kvd_split_data = 1, 3716 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, 3717 .kvd_hash_single_parts = 2, 3718 .kvd_hash_double_parts = 1, 3719 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3720 .swid_config = { 3721 { 3722 .used_type = 1, 3723 .type = MLXSW_PORT_SWID_TYPE_ETH, 3724 } 3725 }, 3726 .resource_query_enable = 1, 3727 }; 3728 3729 static struct mlxsw_driver mlxsw_sp_driver = { 3730 .kind = mlxsw_sp_driver_name, 3731 .priv_size = sizeof(struct mlxsw_sp), 3732 .init = mlxsw_sp_init, 3733 .fini = mlxsw_sp_fini, 3734 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3735 .port_split = mlxsw_sp_port_split, 3736 .port_unsplit = mlxsw_sp_port_unsplit, 3737 .sb_pool_get = mlxsw_sp_sb_pool_get, 3738 .sb_pool_set = mlxsw_sp_sb_pool_set, 3739 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3740 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3741 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3742 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3743 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3744 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3745 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3746 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3747 .txhdr_construct = mlxsw_sp_txhdr_construct, 3748 .txhdr_len = MLXSW_TXHDR_LEN, 3749 .profile = &mlxsw_sp_config_profile, 3750 }; 3751 3752 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3753 { 3754 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3755 } 3756 3757 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) 3758 { 3759 struct mlxsw_sp_port **p_mlxsw_sp_port = data; 3760 int ret = 0; 3761 3762 if (mlxsw_sp_port_dev_check(lower_dev)) { 3763 *p_mlxsw_sp_port = netdev_priv(lower_dev); 3764 ret = 1; 3765 } 3766 3767 return ret; 3768 } 3769 3770 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3771 { 3772 struct mlxsw_sp_port *mlxsw_sp_port; 3773 3774 if (mlxsw_sp_port_dev_check(dev)) 3775 return netdev_priv(dev); 3776 3777 mlxsw_sp_port = NULL; 3778 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); 3779 3780 return mlxsw_sp_port; 3781 } 3782 3783 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3784 { 3785 struct mlxsw_sp_port *mlxsw_sp_port; 3786 3787 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3788 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3789 } 3790 3791 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3792 { 3793 struct mlxsw_sp_port *mlxsw_sp_port; 3794 3795 if (mlxsw_sp_port_dev_check(dev)) 3796 return netdev_priv(dev); 3797 3798 mlxsw_sp_port = NULL; 3799 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3800 &mlxsw_sp_port); 3801 3802 return mlxsw_sp_port; 3803 } 3804 3805 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3806 { 3807 struct mlxsw_sp_port *mlxsw_sp_port; 3808 3809 rcu_read_lock(); 3810 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3811 if (mlxsw_sp_port) 3812 dev_hold(mlxsw_sp_port->dev); 3813 rcu_read_unlock(); 3814 return mlxsw_sp_port; 3815 } 3816 3817 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3818 { 3819 dev_put(mlxsw_sp_port->dev); 3820 } 3821 3822 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3823 { 3824 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3825 3826 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3827 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3828 } 3829 3830 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3831 { 3832 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3833 3834 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3836 } 3837 3838 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3839 u16 lag_id, u8 port_index) 3840 { 3841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3842 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3843 3844 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3845 lag_id, port_index); 3846 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3847 } 3848 3849 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3850 u16 lag_id) 3851 { 3852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3853 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3854 3855 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3856 lag_id); 3857 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3858 } 3859 3860 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3861 u16 lag_id) 3862 { 3863 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3864 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3865 3866 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3867 lag_id); 3868 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3869 } 3870 3871 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3872 u16 lag_id) 3873 { 3874 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3875 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3876 3877 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3878 lag_id); 3879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3880 } 3881 3882 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3883 struct net_device *lag_dev, 3884 u16 *p_lag_id) 3885 { 3886 struct mlxsw_sp_upper *lag; 3887 int free_lag_id = -1; 3888 u64 max_lag; 3889 int i; 3890 3891 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3892 for (i = 0; i < max_lag; i++) { 3893 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3894 if (lag->ref_count) { 3895 if (lag->dev == lag_dev) { 3896 *p_lag_id = i; 3897 return 0; 3898 } 3899 } else if (free_lag_id < 0) { 3900 free_lag_id = i; 3901 } 3902 } 3903 if (free_lag_id < 0) 3904 return -EBUSY; 3905 *p_lag_id = free_lag_id; 3906 return 0; 3907 } 3908 3909 static bool 3910 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3911 struct net_device *lag_dev, 3912 struct netdev_lag_upper_info *lag_upper_info) 3913 { 3914 u16 lag_id; 3915 3916 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 3917 return false; 3918 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 3919 return false; 3920 return true; 3921 } 3922 3923 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3924 u16 lag_id, u8 *p_port_index) 3925 { 3926 u64 max_lag_members; 3927 int i; 3928 3929 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3930 MAX_LAG_MEMBERS); 3931 for (i = 0; i < max_lag_members; i++) { 3932 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3933 *p_port_index = i; 3934 return 0; 3935 } 3936 } 3937 return -EBUSY; 3938 } 3939 3940 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3941 struct net_device *lag_dev) 3942 { 3943 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3944 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 3945 struct mlxsw_sp_upper *lag; 3946 u16 lag_id; 3947 u8 port_index; 3948 int err; 3949 3950 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3951 if (err) 3952 return err; 3953 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3954 if (!lag->ref_count) { 3955 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3956 if (err) 3957 return err; 3958 lag->dev = lag_dev; 3959 } 3960 3961 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3962 if (err) 3963 return err; 3964 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3965 if (err) 3966 goto err_col_port_add; 3967 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 3968 if (err) 3969 goto err_col_port_enable; 3970 3971 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3972 mlxsw_sp_port->local_port); 3973 mlxsw_sp_port->lag_id = lag_id; 3974 mlxsw_sp_port->lagged = 1; 3975 lag->ref_count++; 3976 3977 /* Port is no longer usable as a router interface */ 3978 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); 3979 if (mlxsw_sp_port_vlan->fid) 3980 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 3981 3982 return 0; 3983 3984 err_col_port_enable: 3985 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3986 err_col_port_add: 3987 if (!lag->ref_count) 3988 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3989 return err; 3990 } 3991 3992 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3993 struct net_device *lag_dev) 3994 { 3995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3996 u16 lag_id = mlxsw_sp_port->lag_id; 3997 struct mlxsw_sp_upper *lag; 3998 3999 if (!mlxsw_sp_port->lagged) 4000 return; 4001 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4002 WARN_ON(lag->ref_count == 0); 4003 4004 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 4005 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4006 4007 /* Any VLANs configured on the port are no longer valid */ 4008 mlxsw_sp_port_vlan_flush(mlxsw_sp_port); 4009 4010 if (lag->ref_count == 1) 4011 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4012 4013 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4014 mlxsw_sp_port->local_port); 4015 mlxsw_sp_port->lagged = 0; 4016 lag->ref_count--; 4017 4018 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); 4019 /* Make sure untagged frames are allowed to ingress */ 4020 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 4021 } 4022 4023 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4024 u16 lag_id) 4025 { 4026 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4027 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4028 4029 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4030 mlxsw_sp_port->local_port); 4031 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4032 } 4033 4034 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4035 u16 lag_id) 4036 { 4037 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4038 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4039 4040 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4041 mlxsw_sp_port->local_port); 4042 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4043 } 4044 4045 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 4046 bool lag_tx_enabled) 4047 { 4048 if (lag_tx_enabled) 4049 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 4050 mlxsw_sp_port->lag_id); 4051 else 4052 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4053 mlxsw_sp_port->lag_id); 4054 } 4055 4056 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4057 struct netdev_lag_lower_state_info *info) 4058 { 4059 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 4060 } 4061 4062 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4063 bool enable) 4064 { 4065 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4066 enum mlxsw_reg_spms_state spms_state; 4067 char *spms_pl; 4068 u16 vid; 4069 int err; 4070 4071 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4072 MLXSW_REG_SPMS_STATE_DISCARDING; 4073 4074 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4075 if (!spms_pl) 4076 return -ENOMEM; 4077 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4078 4079 for (vid = 0; vid < VLAN_N_VID; vid++) 4080 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4081 4082 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4083 kfree(spms_pl); 4084 return err; 4085 } 4086 4087 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4088 { 4089 int err; 4090 4091 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4092 if (err) 4093 return err; 4094 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4095 if (err) 4096 goto err_port_stp_set; 4097 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4098 true, false); 4099 if (err) 4100 goto err_port_vlan_set; 4101 return 0; 4102 4103 err_port_vlan_set: 4104 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4105 err_port_stp_set: 4106 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4107 return err; 4108 } 4109 4110 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4111 { 4112 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4113 false, false); 4114 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4115 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4116 } 4117 4118 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4119 struct net_device *dev, 4120 unsigned long event, void *ptr) 4121 { 4122 struct netdev_notifier_changeupper_info *info; 4123 struct mlxsw_sp_port *mlxsw_sp_port; 4124 struct net_device *upper_dev; 4125 struct mlxsw_sp *mlxsw_sp; 4126 int err = 0; 4127 4128 mlxsw_sp_port = netdev_priv(dev); 4129 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4130 info = ptr; 4131 4132 switch (event) { 4133 case NETDEV_PRECHANGEUPPER: 4134 upper_dev = info->upper_dev; 4135 if (!is_vlan_dev(upper_dev) && 4136 !netif_is_lag_master(upper_dev) && 4137 !netif_is_bridge_master(upper_dev) && 4138 !netif_is_ovs_master(upper_dev)) 4139 return -EINVAL; 4140 if (!info->linking) 4141 break; 4142 if (netif_is_lag_master(upper_dev) && 4143 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4144 info->upper_info)) 4145 return -EINVAL; 4146 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) 4147 return -EINVAL; 4148 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4149 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) 4150 return -EINVAL; 4151 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) 4152 return -EINVAL; 4153 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) 4154 return -EINVAL; 4155 break; 4156 case NETDEV_CHANGEUPPER: 4157 upper_dev = info->upper_dev; 4158 if (netif_is_bridge_master(upper_dev)) { 4159 if (info->linking) 4160 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4161 lower_dev, 4162 upper_dev); 4163 else 4164 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4165 lower_dev, 4166 upper_dev); 4167 } else if (netif_is_lag_master(upper_dev)) { 4168 if (info->linking) 4169 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4170 upper_dev); 4171 else 4172 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4173 upper_dev); 4174 } else if (netif_is_ovs_master(upper_dev)) { 4175 if (info->linking) 4176 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4177 else 4178 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4179 } 4180 break; 4181 } 4182 4183 return err; 4184 } 4185 4186 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4187 unsigned long event, void *ptr) 4188 { 4189 struct netdev_notifier_changelowerstate_info *info; 4190 struct mlxsw_sp_port *mlxsw_sp_port; 4191 int err; 4192 4193 mlxsw_sp_port = netdev_priv(dev); 4194 info = ptr; 4195 4196 switch (event) { 4197 case NETDEV_CHANGELOWERSTATE: 4198 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4199 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4200 info->lower_state_info); 4201 if (err) 4202 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4203 } 4204 break; 4205 } 4206 4207 return 0; 4208 } 4209 4210 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4211 struct net_device *port_dev, 4212 unsigned long event, void *ptr) 4213 { 4214 switch (event) { 4215 case NETDEV_PRECHANGEUPPER: 4216 case NETDEV_CHANGEUPPER: 4217 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4218 event, ptr); 4219 case NETDEV_CHANGELOWERSTATE: 4220 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4221 ptr); 4222 } 4223 4224 return 0; 4225 } 4226 4227 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4228 unsigned long event, void *ptr) 4229 { 4230 struct net_device *dev; 4231 struct list_head *iter; 4232 int ret; 4233 4234 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4235 if (mlxsw_sp_port_dev_check(dev)) { 4236 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4237 ptr); 4238 if (ret) 4239 return ret; 4240 } 4241 } 4242 4243 return 0; 4244 } 4245 4246 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4247 struct net_device *dev, 4248 unsigned long event, void *ptr, 4249 u16 vid) 4250 { 4251 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4252 struct netdev_notifier_changeupper_info *info = ptr; 4253 struct net_device *upper_dev; 4254 int err = 0; 4255 4256 switch (event) { 4257 case NETDEV_PRECHANGEUPPER: 4258 upper_dev = info->upper_dev; 4259 if (!netif_is_bridge_master(upper_dev)) 4260 return -EINVAL; 4261 break; 4262 case NETDEV_CHANGEUPPER: 4263 upper_dev = info->upper_dev; 4264 if (netif_is_bridge_master(upper_dev)) { 4265 if (info->linking) 4266 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4267 vlan_dev, 4268 upper_dev); 4269 else 4270 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4271 vlan_dev, 4272 upper_dev); 4273 } else { 4274 err = -EINVAL; 4275 WARN_ON(1); 4276 } 4277 break; 4278 } 4279 4280 return err; 4281 } 4282 4283 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4284 struct net_device *lag_dev, 4285 unsigned long event, 4286 void *ptr, u16 vid) 4287 { 4288 struct net_device *dev; 4289 struct list_head *iter; 4290 int ret; 4291 4292 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4293 if (mlxsw_sp_port_dev_check(dev)) { 4294 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4295 event, ptr, 4296 vid); 4297 if (ret) 4298 return ret; 4299 } 4300 } 4301 4302 return 0; 4303 } 4304 4305 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4306 unsigned long event, void *ptr) 4307 { 4308 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4309 u16 vid = vlan_dev_vlan_id(vlan_dev); 4310 4311 if (mlxsw_sp_port_dev_check(real_dev)) 4312 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4313 event, ptr, vid); 4314 else if (netif_is_lag_master(real_dev)) 4315 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4316 real_dev, event, 4317 ptr, vid); 4318 4319 return 0; 4320 } 4321 4322 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4323 { 4324 struct netdev_notifier_changeupper_info *info = ptr; 4325 4326 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4327 return false; 4328 return netif_is_l3_master(info->upper_dev); 4329 } 4330 4331 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 4332 unsigned long event, void *ptr) 4333 { 4334 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4335 int err = 0; 4336 4337 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) 4338 err = mlxsw_sp_netdevice_router_port_event(dev); 4339 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4340 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4341 else if (mlxsw_sp_port_dev_check(dev)) 4342 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4343 else if (netif_is_lag_master(dev)) 4344 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4345 else if (is_vlan_dev(dev)) 4346 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4347 4348 return notifier_from_errno(err); 4349 } 4350 4351 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 4352 .notifier_call = mlxsw_sp_netdevice_event, 4353 }; 4354 4355 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { 4356 .notifier_call = mlxsw_sp_inetaddr_event, 4357 .priority = 10, /* Must be called before FIB notifier block */ 4358 }; 4359 4360 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { 4361 .notifier_call = mlxsw_sp_router_netevent_event, 4362 }; 4363 4364 static const struct pci_device_id mlxsw_sp_pci_id_table[] = { 4365 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4366 {0, }, 4367 }; 4368 4369 static struct pci_driver mlxsw_sp_pci_driver = { 4370 .name = mlxsw_sp_driver_name, 4371 .id_table = mlxsw_sp_pci_id_table, 4372 }; 4373 4374 static int __init mlxsw_sp_module_init(void) 4375 { 4376 int err; 4377 4378 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4379 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4380 register_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4381 4382 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4383 if (err) 4384 goto err_core_driver_register; 4385 4386 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); 4387 if (err) 4388 goto err_pci_driver_register; 4389 4390 return 0; 4391 4392 err_pci_driver_register: 4393 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4394 err_core_driver_register: 4395 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4396 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4397 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4398 return err; 4399 } 4400 4401 static void __exit mlxsw_sp_module_exit(void) 4402 { 4403 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); 4404 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4405 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); 4406 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4407 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4408 } 4409 4410 module_init(mlxsw_sp_module_init); 4411 module_exit(mlxsw_sp_module_exit); 4412 4413 MODULE_LICENSE("Dual BSD/GPL"); 4414 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4415 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4416 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); 4417 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); 4418