1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP_FWREV_MINOR 2010 49 #define MLXSW_SP_FWREV_SUBMINOR 1006 50 51 #define MLXSW_SP1_FWREV_MAJOR 13 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP_FWREV_MINOR, 57 .subminor = MLXSW_SP_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP_FWREV_MINOR, 71 .subminor = MLXSW_SP_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 81 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 82 .major = MLXSW_SP3_FWREV_MAJOR, 83 .minor = MLXSW_SP_FWREV_MINOR, 84 .subminor = MLXSW_SP_FWREV_SUBMINOR, 85 }; 86 87 #define MLXSW_SP3_FW_FILENAME \ 88 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 89 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 90 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 91 92 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 93 "mellanox/lc_ini_bundle_" \ 94 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 95 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 96 97 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 98 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 99 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 100 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 101 102 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 104 }; 105 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 106 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 107 }; 108 109 /* tx_hdr_version 110 * Tx header version. 111 * Must be set to 1. 112 */ 113 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 114 115 /* tx_hdr_ctl 116 * Packet control type. 117 * 0 - Ethernet control (e.g. EMADs, LACP) 118 * 1 - Ethernet data 119 */ 120 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 121 122 /* tx_hdr_proto 123 * Packet protocol type. Must be set to 1 (Ethernet). 124 */ 125 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 126 127 /* tx_hdr_rx_is_router 128 * Packet is sent from the router. Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 131 132 /* tx_hdr_fid_valid 133 * Indicates if the 'fid' field is valid and should be used for 134 * forwarding lookup. Valid for data packets only. 135 */ 136 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 137 138 /* tx_hdr_swid 139 * Switch partition ID. Must be set to 0. 140 */ 141 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 142 143 /* tx_hdr_control_tclass 144 * Indicates if the packet should use the control TClass and not one 145 * of the data TClasses. 146 */ 147 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 148 149 /* tx_hdr_etclass 150 * Egress TClass to be used on the egress device on the egress port. 151 */ 152 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 153 154 /* tx_hdr_port_mid 155 * Destination local port for unicast packets. 156 * Destination multicast ID for multicast packets. 157 * 158 * Control packets are directed to a specific egress port, while data 159 * packets are transmitted through the CPU port (0) into the switch partition, 160 * where forwarding rules are applied. 161 */ 162 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 163 164 /* tx_hdr_fid 165 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 166 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 167 * Valid for data packets only. 168 */ 169 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 170 171 /* tx_hdr_type 172 * 0 - Data packets 173 * 6 - Control packets 174 */ 175 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 176 177 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 178 unsigned int counter_index, u64 *packets, 179 u64 *bytes) 180 { 181 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 182 int err; 183 184 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 185 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 186 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 187 if (err) 188 return err; 189 if (packets) 190 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 191 if (bytes) 192 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 193 return 0; 194 } 195 196 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 197 unsigned int counter_index) 198 { 199 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 200 201 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 202 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 204 } 205 206 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 207 unsigned int *p_counter_index) 208 { 209 int err; 210 211 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 212 p_counter_index); 213 if (err) 214 return err; 215 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 216 if (err) 217 goto err_counter_clear; 218 return 0; 219 220 err_counter_clear: 221 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 222 *p_counter_index); 223 return err; 224 } 225 226 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 227 unsigned int counter_index) 228 { 229 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 230 counter_index); 231 } 232 233 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 234 const struct mlxsw_tx_info *tx_info) 235 { 236 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 237 238 memset(txhdr, 0, MLXSW_TXHDR_LEN); 239 240 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 241 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 242 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 243 mlxsw_tx_hdr_swid_set(txhdr, 0); 244 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 245 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 246 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 247 } 248 249 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 250 { 251 switch (state) { 252 case BR_STATE_FORWARDING: 253 return MLXSW_REG_SPMS_STATE_FORWARDING; 254 case BR_STATE_LEARNING: 255 return MLXSW_REG_SPMS_STATE_LEARNING; 256 case BR_STATE_LISTENING: 257 case BR_STATE_DISABLED: 258 case BR_STATE_BLOCKING: 259 return MLXSW_REG_SPMS_STATE_DISCARDING; 260 default: 261 BUG(); 262 } 263 } 264 265 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 266 u8 state) 267 { 268 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 270 char *spms_pl; 271 int err; 272 273 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 274 if (!spms_pl) 275 return -ENOMEM; 276 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 277 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 278 279 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 280 kfree(spms_pl); 281 return err; 282 } 283 284 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 285 { 286 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 287 int err; 288 289 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 290 if (err) 291 return err; 292 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 293 return 0; 294 } 295 296 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 297 bool is_up) 298 { 299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 300 char paos_pl[MLXSW_REG_PAOS_LEN]; 301 302 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 303 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 304 MLXSW_PORT_ADMIN_STATUS_DOWN); 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 306 } 307 308 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 309 const unsigned char *addr) 310 { 311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 312 char ppad_pl[MLXSW_REG_PPAD_LEN]; 313 314 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 315 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 317 } 318 319 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 320 { 321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 322 323 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 324 mlxsw_sp_port->local_port); 325 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 326 mlxsw_sp_port->dev->dev_addr); 327 } 328 329 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 330 { 331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 332 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 333 int err; 334 335 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 336 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 337 if (err) 338 return err; 339 340 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 341 return 0; 342 } 343 344 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 345 { 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 348 349 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 350 if (mtu > mlxsw_sp_port->max_mtu) 351 return -EINVAL; 352 353 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 354 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 355 } 356 357 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 358 u16 local_port, u8 swid) 359 { 360 char pspa_pl[MLXSW_REG_PSPA_LEN]; 361 362 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 364 } 365 366 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 367 { 368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 369 char svpe_pl[MLXSW_REG_SVPE_LEN]; 370 371 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 373 } 374 375 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 376 bool learn_enable) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 char *spvmlr_pl; 380 int err; 381 382 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 383 if (!spvmlr_pl) 384 return -ENOMEM; 385 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 386 learn_enable); 387 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 388 kfree(spvmlr_pl); 389 return err; 390 } 391 392 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 393 { 394 switch (ethtype) { 395 case ETH_P_8021Q: 396 *p_sver_type = 0; 397 break; 398 case ETH_P_8021AD: 399 *p_sver_type = 1; 400 break; 401 default: 402 return -EINVAL; 403 } 404 405 return 0; 406 } 407 408 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 409 u16 ethtype) 410 { 411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 412 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 413 u8 sver_type; 414 int err; 415 416 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 417 if (err) 418 return err; 419 420 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 421 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 422 } 423 424 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 425 u16 vid, u16 ethtype) 426 { 427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 428 char spvid_pl[MLXSW_REG_SPVID_LEN]; 429 u8 sver_type; 430 int err; 431 432 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 433 if (err) 434 return err; 435 436 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 437 sver_type); 438 439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 440 } 441 442 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 443 bool allow) 444 { 445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 446 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 447 448 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 450 } 451 452 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 u16 ethtype) 454 { 455 int err; 456 457 if (!vid) { 458 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 459 if (err) 460 return err; 461 } else { 462 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 463 if (err) 464 return err; 465 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 466 if (err) 467 goto err_port_allow_untagged_set; 468 } 469 470 mlxsw_sp_port->pvid = vid; 471 return 0; 472 473 err_port_allow_untagged_set: 474 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 475 return err; 476 } 477 478 static int 479 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 480 { 481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 482 char sspr_pl[MLXSW_REG_SSPR_LEN]; 483 484 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 486 } 487 488 static int 489 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 490 u16 local_port, char *pmlp_pl, 491 struct mlxsw_sp_port_mapping *port_mapping) 492 { 493 bool separate_rxtx; 494 u8 first_lane; 495 u8 slot_index; 496 u8 module; 497 u8 width; 498 int i; 499 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 502 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 503 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 504 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 505 506 if (width && !is_power_of_2(width)) { 507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 508 local_port); 509 return -EINVAL; 510 } 511 512 for (i = 0; i < width; i++) { 513 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 514 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 515 local_port); 516 return -EINVAL; 517 } 518 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (separate_rxtx && 524 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 525 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 526 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 527 local_port); 528 return -EINVAL; 529 } 530 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 531 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 532 local_port); 533 return -EINVAL; 534 } 535 } 536 537 port_mapping->module = module; 538 port_mapping->slot_index = slot_index; 539 port_mapping->width = width; 540 port_mapping->module_width = width; 541 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 542 return 0; 543 } 544 545 static int 546 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 547 struct mlxsw_sp_port_mapping *port_mapping) 548 { 549 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 550 int err; 551 552 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 554 if (err) 555 return err; 556 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 557 pmlp_pl, port_mapping); 558 } 559 560 static int 561 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 562 const struct mlxsw_sp_port_mapping *port_mapping) 563 { 564 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 565 int i, err; 566 567 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 568 port_mapping->module); 569 570 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 571 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 572 for (i = 0; i < port_mapping->width; i++) { 573 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 574 port_mapping->slot_index); 575 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 576 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 577 } 578 579 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 580 if (err) 581 goto err_pmlp_write; 582 return 0; 583 584 err_pmlp_write: 585 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 586 port_mapping->module); 587 return err; 588 } 589 590 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 591 u8 slot_index, u8 module) 592 { 593 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 594 595 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 596 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 597 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 598 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 599 } 600 601 static int mlxsw_sp_port_open(struct net_device *dev) 602 { 603 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 int err; 606 607 err = mlxsw_env_module_port_up(mlxsw_sp->core, 608 mlxsw_sp_port->mapping.slot_index, 609 mlxsw_sp_port->mapping.module); 610 if (err) 611 return err; 612 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 613 if (err) 614 goto err_port_admin_status_set; 615 netif_start_queue(dev); 616 return 0; 617 618 err_port_admin_status_set: 619 mlxsw_env_module_port_down(mlxsw_sp->core, 620 mlxsw_sp_port->mapping.slot_index, 621 mlxsw_sp_port->mapping.module); 622 return err; 623 } 624 625 static int mlxsw_sp_port_stop(struct net_device *dev) 626 { 627 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 628 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 629 630 netif_stop_queue(dev); 631 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 632 mlxsw_env_module_port_down(mlxsw_sp->core, 633 mlxsw_sp_port->mapping.slot_index, 634 mlxsw_sp_port->mapping.module); 635 return 0; 636 } 637 638 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 639 struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 643 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 644 const struct mlxsw_tx_info tx_info = { 645 .local_port = mlxsw_sp_port->local_port, 646 .is_emad = false, 647 }; 648 u64 len; 649 int err; 650 651 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 652 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 653 dev_kfree_skb_any(skb); 654 return NETDEV_TX_OK; 655 } 656 657 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 658 659 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 660 return NETDEV_TX_BUSY; 661 662 if (eth_skb_pad(skb)) { 663 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 664 return NETDEV_TX_OK; 665 } 666 667 mlxsw_sp_txhdr_construct(skb, &tx_info); 668 /* TX header is consumed by HW on the way so we shouldn't count its 669 * bytes as being sent. 670 */ 671 len = skb->len - MLXSW_TXHDR_LEN; 672 673 /* Due to a race we might fail here because of a full queue. In that 674 * unlikely case we simply drop the packet. 675 */ 676 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 677 678 if (!err) { 679 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 680 u64_stats_update_begin(&pcpu_stats->syncp); 681 pcpu_stats->tx_packets++; 682 pcpu_stats->tx_bytes += len; 683 u64_stats_update_end(&pcpu_stats->syncp); 684 } else { 685 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 686 dev_kfree_skb_any(skb); 687 } 688 return NETDEV_TX_OK; 689 } 690 691 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 692 { 693 } 694 695 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 696 { 697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 698 struct sockaddr *addr = p; 699 int err; 700 701 if (!is_valid_ether_addr(addr->sa_data)) 702 return -EADDRNOTAVAIL; 703 704 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 705 if (err) 706 return err; 707 eth_hw_addr_set(dev, addr->sa_data); 708 return 0; 709 } 710 711 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 712 { 713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 714 struct mlxsw_sp_hdroom orig_hdroom; 715 struct mlxsw_sp_hdroom hdroom; 716 int err; 717 718 orig_hdroom = *mlxsw_sp_port->hdroom; 719 720 hdroom = orig_hdroom; 721 hdroom.mtu = mtu; 722 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 723 724 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 725 if (err) { 726 netdev_err(dev, "Failed to configure port's headroom\n"); 727 return err; 728 } 729 730 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 731 if (err) 732 goto err_port_mtu_set; 733 dev->mtu = mtu; 734 return 0; 735 736 err_port_mtu_set: 737 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 738 return err; 739 } 740 741 static int 742 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 743 struct rtnl_link_stats64 *stats) 744 { 745 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 746 struct mlxsw_sp_port_pcpu_stats *p; 747 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 748 u32 tx_dropped = 0; 749 unsigned int start; 750 int i; 751 752 for_each_possible_cpu(i) { 753 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 754 do { 755 start = u64_stats_fetch_begin_irq(&p->syncp); 756 rx_packets = p->rx_packets; 757 rx_bytes = p->rx_bytes; 758 tx_packets = p->tx_packets; 759 tx_bytes = p->tx_bytes; 760 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 761 762 stats->rx_packets += rx_packets; 763 stats->rx_bytes += rx_bytes; 764 stats->tx_packets += tx_packets; 765 stats->tx_bytes += tx_bytes; 766 /* tx_dropped is u32, updated without syncp protection. */ 767 tx_dropped += p->tx_dropped; 768 } 769 stats->tx_dropped = tx_dropped; 770 return 0; 771 } 772 773 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 774 { 775 switch (attr_id) { 776 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 777 return true; 778 } 779 780 return false; 781 } 782 783 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 784 void *sp) 785 { 786 switch (attr_id) { 787 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 788 return mlxsw_sp_port_get_sw_stats64(dev, sp); 789 } 790 791 return -EINVAL; 792 } 793 794 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 795 int prio, char *ppcnt_pl) 796 { 797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 799 800 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 801 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 802 } 803 804 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 805 struct rtnl_link_stats64 *stats) 806 { 807 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 808 int err; 809 810 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 811 0, ppcnt_pl); 812 if (err) 813 goto out; 814 815 stats->tx_packets = 816 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 817 stats->rx_packets = 818 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 819 stats->tx_bytes = 820 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 821 stats->rx_bytes = 822 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 823 stats->multicast = 824 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 825 826 stats->rx_crc_errors = 827 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 828 stats->rx_frame_errors = 829 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 830 831 stats->rx_length_errors = ( 832 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 833 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 834 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 835 836 stats->rx_errors = (stats->rx_crc_errors + 837 stats->rx_frame_errors + stats->rx_length_errors); 838 839 out: 840 return err; 841 } 842 843 static void 844 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 845 struct mlxsw_sp_port_xstats *xstats) 846 { 847 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 848 int err, i; 849 850 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 851 ppcnt_pl); 852 if (!err) 853 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 854 855 for (i = 0; i < TC_MAX_QUEUE; i++) { 856 err = mlxsw_sp_port_get_stats_raw(dev, 857 MLXSW_REG_PPCNT_TC_CONG_CNT, 858 i, ppcnt_pl); 859 if (err) 860 goto tc_cnt; 861 862 xstats->wred_drop[i] = 863 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 864 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 865 866 tc_cnt: 867 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 868 i, ppcnt_pl); 869 if (err) 870 continue; 871 872 xstats->backlog[i] = 873 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 874 xstats->tail_drop[i] = 875 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 876 } 877 878 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 879 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 880 i, ppcnt_pl); 881 if (err) 882 continue; 883 884 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 885 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 886 } 887 } 888 889 static void update_stats_cache(struct work_struct *work) 890 { 891 struct mlxsw_sp_port *mlxsw_sp_port = 892 container_of(work, struct mlxsw_sp_port, 893 periodic_hw_stats.update_dw.work); 894 895 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 896 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 897 * necessary when port goes down. 898 */ 899 goto out; 900 901 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 902 &mlxsw_sp_port->periodic_hw_stats.stats); 903 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 904 &mlxsw_sp_port->periodic_hw_stats.xstats); 905 906 out: 907 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 908 MLXSW_HW_STATS_UPDATE_TIME); 909 } 910 911 /* Return the stats from a cache that is updated periodically, 912 * as this function might get called in an atomic context. 913 */ 914 static void 915 mlxsw_sp_port_get_stats64(struct net_device *dev, 916 struct rtnl_link_stats64 *stats) 917 { 918 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 919 920 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 921 } 922 923 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 924 u16 vid_begin, u16 vid_end, 925 bool is_member, bool untagged) 926 { 927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 char *spvm_pl; 929 int err; 930 931 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 932 if (!spvm_pl) 933 return -ENOMEM; 934 935 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 936 vid_end, is_member, untagged); 937 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 938 kfree(spvm_pl); 939 return err; 940 } 941 942 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 943 u16 vid_end, bool is_member, bool untagged) 944 { 945 u16 vid, vid_e; 946 int err; 947 948 for (vid = vid_begin; vid <= vid_end; 949 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 950 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 951 vid_end); 952 953 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 954 is_member, untagged); 955 if (err) 956 return err; 957 } 958 959 return 0; 960 } 961 962 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 963 bool flush_default) 964 { 965 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 966 967 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 968 &mlxsw_sp_port->vlans_list, list) { 969 if (!flush_default && 970 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 971 continue; 972 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 973 } 974 } 975 976 static void 977 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 978 { 979 if (mlxsw_sp_port_vlan->bridge_port) 980 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 981 else if (mlxsw_sp_port_vlan->fid) 982 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 983 } 984 985 struct mlxsw_sp_port_vlan * 986 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 987 { 988 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 989 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 990 int err; 991 992 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 993 if (mlxsw_sp_port_vlan) 994 return ERR_PTR(-EEXIST); 995 996 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 997 if (err) 998 return ERR_PTR(err); 999 1000 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1001 if (!mlxsw_sp_port_vlan) { 1002 err = -ENOMEM; 1003 goto err_port_vlan_alloc; 1004 } 1005 1006 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1007 mlxsw_sp_port_vlan->vid = vid; 1008 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1009 1010 return mlxsw_sp_port_vlan; 1011 1012 err_port_vlan_alloc: 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1014 return ERR_PTR(err); 1015 } 1016 1017 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1018 { 1019 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1020 u16 vid = mlxsw_sp_port_vlan->vid; 1021 1022 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1023 list_del(&mlxsw_sp_port_vlan->list); 1024 kfree(mlxsw_sp_port_vlan); 1025 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1026 } 1027 1028 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1029 __be16 __always_unused proto, u16 vid) 1030 { 1031 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1032 1033 /* VLAN 0 is added to HW filter when device goes up, but it is 1034 * reserved in our case, so simply return. 1035 */ 1036 if (!vid) 1037 return 0; 1038 1039 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1040 } 1041 1042 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1043 __be16 __always_unused proto, u16 vid) 1044 { 1045 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1046 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1047 1048 /* VLAN 0 is removed from HW filter when device goes down, but 1049 * it is reserved in our case, so simply return. 1050 */ 1051 if (!vid) 1052 return 0; 1053 1054 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1055 if (!mlxsw_sp_port_vlan) 1056 return 0; 1057 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1058 1059 return 0; 1060 } 1061 1062 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1063 struct flow_block_offload *f) 1064 { 1065 switch (f->binder_type) { 1066 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1067 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1068 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1069 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1070 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1071 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1072 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1073 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1074 default: 1075 return -EOPNOTSUPP; 1076 } 1077 } 1078 1079 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1080 void *type_data) 1081 { 1082 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1083 1084 switch (type) { 1085 case TC_SETUP_BLOCK: 1086 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1087 case TC_SETUP_QDISC_RED: 1088 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1089 case TC_SETUP_QDISC_PRIO: 1090 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1091 case TC_SETUP_QDISC_ETS: 1092 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1093 case TC_SETUP_QDISC_TBF: 1094 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1095 case TC_SETUP_QDISC_FIFO: 1096 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1097 default: 1098 return -EOPNOTSUPP; 1099 } 1100 } 1101 1102 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1103 { 1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1105 1106 if (!enable) { 1107 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1108 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1109 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1110 return -EINVAL; 1111 } 1112 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1113 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1114 } else { 1115 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1116 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1117 } 1118 return 0; 1119 } 1120 1121 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1125 int err; 1126 1127 if (netif_running(dev)) 1128 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1129 1130 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1131 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1132 pplr_pl); 1133 1134 if (netif_running(dev)) 1135 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1136 1137 return err; 1138 } 1139 1140 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1141 1142 static int mlxsw_sp_handle_feature(struct net_device *dev, 1143 netdev_features_t wanted_features, 1144 netdev_features_t feature, 1145 mlxsw_sp_feature_handler feature_handler) 1146 { 1147 netdev_features_t changes = wanted_features ^ dev->features; 1148 bool enable = !!(wanted_features & feature); 1149 int err; 1150 1151 if (!(changes & feature)) 1152 return 0; 1153 1154 err = feature_handler(dev, enable); 1155 if (err) { 1156 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1157 enable ? "Enable" : "Disable", &feature, err); 1158 return err; 1159 } 1160 1161 if (enable) 1162 dev->features |= feature; 1163 else 1164 dev->features &= ~feature; 1165 1166 return 0; 1167 } 1168 static int mlxsw_sp_set_features(struct net_device *dev, 1169 netdev_features_t features) 1170 { 1171 netdev_features_t oper_features = dev->features; 1172 int err = 0; 1173 1174 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1175 mlxsw_sp_feature_hw_tc); 1176 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1177 mlxsw_sp_feature_loopback); 1178 1179 if (err) { 1180 dev->features = oper_features; 1181 return -EINVAL; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static struct devlink_port * 1188 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1189 { 1190 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1192 1193 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1194 mlxsw_sp_port->local_port); 1195 } 1196 1197 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1198 struct ifreq *ifr) 1199 { 1200 struct hwtstamp_config config; 1201 int err; 1202 1203 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1204 return -EFAULT; 1205 1206 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1207 &config); 1208 if (err) 1209 return err; 1210 1211 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1212 return -EFAULT; 1213 1214 return 0; 1215 } 1216 1217 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1218 struct ifreq *ifr) 1219 { 1220 struct hwtstamp_config config; 1221 int err; 1222 1223 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1224 &config); 1225 if (err) 1226 return err; 1227 1228 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1229 return -EFAULT; 1230 1231 return 0; 1232 } 1233 1234 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1235 { 1236 struct hwtstamp_config config = {0}; 1237 1238 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1239 } 1240 1241 static int 1242 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1243 { 1244 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1245 1246 switch (cmd) { 1247 case SIOCSHWTSTAMP: 1248 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1249 case SIOCGHWTSTAMP: 1250 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1251 default: 1252 return -EOPNOTSUPP; 1253 } 1254 } 1255 1256 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1257 .ndo_open = mlxsw_sp_port_open, 1258 .ndo_stop = mlxsw_sp_port_stop, 1259 .ndo_start_xmit = mlxsw_sp_port_xmit, 1260 .ndo_setup_tc = mlxsw_sp_setup_tc, 1261 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1262 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1263 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1264 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1265 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1266 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1267 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1268 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1269 .ndo_set_features = mlxsw_sp_set_features, 1270 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1271 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1272 }; 1273 1274 static int 1275 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1276 { 1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1278 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1279 const struct mlxsw_sp_port_type_speed_ops *ops; 1280 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1281 u32 eth_proto_cap_masked; 1282 int err; 1283 1284 ops = mlxsw_sp->port_type_speed_ops; 1285 1286 /* Set advertised speeds to speeds supported by both the driver 1287 * and the device. 1288 */ 1289 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1290 0, false); 1291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1292 if (err) 1293 return err; 1294 1295 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1296 ð_proto_admin, ð_proto_oper); 1297 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1298 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1299 eth_proto_cap_masked, 1300 mlxsw_sp_port->link.autoneg); 1301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1302 } 1303 1304 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1305 { 1306 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1308 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1309 u32 eth_proto_oper; 1310 int err; 1311 1312 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1313 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1314 mlxsw_sp_port->local_port, 0, 1315 false); 1316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1317 if (err) 1318 return err; 1319 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1320 ð_proto_oper); 1321 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1322 return 0; 1323 } 1324 1325 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1326 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1327 bool dwrr, u8 dwrr_weight) 1328 { 1329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1330 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1331 1332 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1333 next_index); 1334 mlxsw_reg_qeec_de_set(qeec_pl, true); 1335 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1336 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1337 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1338 } 1339 1340 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1341 enum mlxsw_reg_qeec_hr hr, u8 index, 1342 u8 next_index, u32 maxrate, u8 burst_size) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1346 1347 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1348 next_index); 1349 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1350 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1351 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1352 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1353 } 1354 1355 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1356 enum mlxsw_reg_qeec_hr hr, u8 index, 1357 u8 next_index, u32 minrate) 1358 { 1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1360 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1361 1362 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1363 next_index); 1364 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1365 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1366 1367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1368 } 1369 1370 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1371 u8 switch_prio, u8 tclass) 1372 { 1373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1374 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1375 1376 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1377 tclass); 1378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1379 } 1380 1381 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1382 { 1383 int err, i; 1384 1385 /* Setup the elements hierarcy, so that each TC is linked to 1386 * one subgroup, which are all member in the same group. 1387 */ 1388 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1390 if (err) 1391 return err; 1392 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1393 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1394 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1395 0, false, 0); 1396 if (err) 1397 return err; 1398 } 1399 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1400 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1401 MLXSW_REG_QEEC_HR_TC, i, i, 1402 false, 0); 1403 if (err) 1404 return err; 1405 1406 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1407 MLXSW_REG_QEEC_HR_TC, 1408 i + 8, i, 1409 true, 100); 1410 if (err) 1411 return err; 1412 } 1413 1414 /* Make sure the max shaper is disabled in all hierarchies that support 1415 * it. Note that this disables ptps (PTP shaper), but that is intended 1416 * for the initial configuration. 1417 */ 1418 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1419 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1420 MLXSW_REG_QEEC_MAS_DIS, 0); 1421 if (err) 1422 return err; 1423 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1424 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1425 MLXSW_REG_QEEC_HR_SUBGROUP, 1426 i, 0, 1427 MLXSW_REG_QEEC_MAS_DIS, 0); 1428 if (err) 1429 return err; 1430 } 1431 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1432 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1433 MLXSW_REG_QEEC_HR_TC, 1434 i, i, 1435 MLXSW_REG_QEEC_MAS_DIS, 0); 1436 if (err) 1437 return err; 1438 1439 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1440 MLXSW_REG_QEEC_HR_TC, 1441 i + 8, i, 1442 MLXSW_REG_QEEC_MAS_DIS, 0); 1443 if (err) 1444 return err; 1445 } 1446 1447 /* Configure the min shaper for multicast TCs. */ 1448 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1449 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1450 MLXSW_REG_QEEC_HR_TC, 1451 i + 8, i, 1452 MLXSW_REG_QEEC_MIS_MIN); 1453 if (err) 1454 return err; 1455 } 1456 1457 /* Map all priorities to traffic class 0. */ 1458 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1459 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1460 if (err) 1461 return err; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1468 bool enable) 1469 { 1470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1471 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1472 1473 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1475 } 1476 1477 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1478 { 1479 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1480 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1481 u8 module = mlxsw_sp_port->mapping.module; 1482 u64 overheat_counter; 1483 int err; 1484 1485 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1486 module, &overheat_counter); 1487 if (err) 1488 return err; 1489 1490 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1491 return 0; 1492 } 1493 1494 int 1495 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1496 bool is_8021ad_tagged, 1497 bool is_8021q_tagged) 1498 { 1499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1500 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1501 1502 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1503 is_8021ad_tagged, is_8021q_tagged); 1504 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1505 } 1506 1507 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1508 u16 local_port, u8 *port_number, 1509 u8 *split_port_subnumber, 1510 u8 *slot_index) 1511 { 1512 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1513 int err; 1514 1515 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1516 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1517 if (err) 1518 return err; 1519 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1520 split_port_subnumber, slot_index); 1521 return 0; 1522 } 1523 1524 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1525 bool split, 1526 struct mlxsw_sp_port_mapping *port_mapping) 1527 { 1528 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1529 struct mlxsw_sp_port *mlxsw_sp_port; 1530 u32 lanes = port_mapping->width; 1531 u8 split_port_subnumber; 1532 struct net_device *dev; 1533 u8 port_number; 1534 u8 slot_index; 1535 bool splittable; 1536 int err; 1537 1538 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1539 if (err) { 1540 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1541 local_port); 1542 return err; 1543 } 1544 1545 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1546 if (err) { 1547 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1548 local_port); 1549 goto err_port_swid_set; 1550 } 1551 1552 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1553 &split_port_subnumber, &slot_index); 1554 if (err) { 1555 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1556 local_port); 1557 goto err_port_label_info_get; 1558 } 1559 1560 splittable = lanes > 1 && !split; 1561 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1562 port_number, split, split_port_subnumber, 1563 splittable, lanes, mlxsw_sp->base_mac, 1564 sizeof(mlxsw_sp->base_mac)); 1565 if (err) { 1566 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1567 local_port); 1568 goto err_core_port_init; 1569 } 1570 1571 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1572 if (!dev) { 1573 err = -ENOMEM; 1574 goto err_alloc_etherdev; 1575 } 1576 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1577 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1578 mlxsw_sp_port = netdev_priv(dev); 1579 mlxsw_sp_port->dev = dev; 1580 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1581 mlxsw_sp_port->local_port = local_port; 1582 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1583 mlxsw_sp_port->split = split; 1584 mlxsw_sp_port->mapping = *port_mapping; 1585 mlxsw_sp_port->link.autoneg = 1; 1586 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1587 1588 mlxsw_sp_port->pcpu_stats = 1589 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1590 if (!mlxsw_sp_port->pcpu_stats) { 1591 err = -ENOMEM; 1592 goto err_alloc_stats; 1593 } 1594 1595 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1596 &update_stats_cache); 1597 1598 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1599 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1600 1601 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1602 if (err) { 1603 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1604 mlxsw_sp_port->local_port); 1605 goto err_dev_addr_init; 1606 } 1607 1608 netif_carrier_off(dev); 1609 1610 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1611 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1612 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1613 1614 dev->min_mtu = 0; 1615 dev->max_mtu = ETH_MAX_MTU; 1616 1617 /* Each packet needs to have a Tx header (metadata) on top all other 1618 * headers. 1619 */ 1620 dev->needed_headroom = MLXSW_TXHDR_LEN; 1621 1622 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1623 if (err) { 1624 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1625 mlxsw_sp_port->local_port); 1626 goto err_port_system_port_mapping_set; 1627 } 1628 1629 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1630 if (err) { 1631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1632 mlxsw_sp_port->local_port); 1633 goto err_port_speed_by_width_set; 1634 } 1635 1636 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1637 &mlxsw_sp_port->max_speed); 1638 if (err) { 1639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1640 mlxsw_sp_port->local_port); 1641 goto err_max_speed_get; 1642 } 1643 1644 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1645 if (err) { 1646 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1647 mlxsw_sp_port->local_port); 1648 goto err_port_max_mtu_get; 1649 } 1650 1651 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1652 if (err) { 1653 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1654 mlxsw_sp_port->local_port); 1655 goto err_port_mtu_set; 1656 } 1657 1658 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1659 if (err) 1660 goto err_port_admin_status_set; 1661 1662 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1663 if (err) { 1664 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1665 mlxsw_sp_port->local_port); 1666 goto err_port_buffers_init; 1667 } 1668 1669 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1670 if (err) { 1671 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1672 mlxsw_sp_port->local_port); 1673 goto err_port_ets_init; 1674 } 1675 1676 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_tc_mc_mode; 1681 } 1682 1683 /* ETS and buffers must be initialized before DCB. */ 1684 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1685 if (err) { 1686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1687 mlxsw_sp_port->local_port); 1688 goto err_port_dcb_init; 1689 } 1690 1691 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1692 if (err) { 1693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1694 mlxsw_sp_port->local_port); 1695 goto err_port_fids_init; 1696 } 1697 1698 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1699 if (err) { 1700 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1701 mlxsw_sp_port->local_port); 1702 goto err_port_qdiscs_init; 1703 } 1704 1705 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1706 false); 1707 if (err) { 1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1709 mlxsw_sp_port->local_port); 1710 goto err_port_vlan_clear; 1711 } 1712 1713 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1716 mlxsw_sp_port->local_port); 1717 goto err_port_nve_init; 1718 } 1719 1720 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1721 ETH_P_8021Q); 1722 if (err) { 1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1724 mlxsw_sp_port->local_port); 1725 goto err_port_pvid_set; 1726 } 1727 1728 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1729 MLXSW_SP_DEFAULT_VID); 1730 if (IS_ERR(mlxsw_sp_port_vlan)) { 1731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1732 mlxsw_sp_port->local_port); 1733 err = PTR_ERR(mlxsw_sp_port_vlan); 1734 goto err_port_vlan_create; 1735 } 1736 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1737 1738 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1739 * only packets with 802.1q header as tagged packets. 1740 */ 1741 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1742 if (err) { 1743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1744 local_port); 1745 goto err_port_vlan_classification_set; 1746 } 1747 1748 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1749 mlxsw_sp->ptp_ops->shaper_work); 1750 1751 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1752 1753 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1754 if (err) { 1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1756 mlxsw_sp_port->local_port); 1757 goto err_port_overheat_init_val_set; 1758 } 1759 1760 err = register_netdev(dev); 1761 if (err) { 1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1763 mlxsw_sp_port->local_port); 1764 goto err_register_netdev; 1765 } 1766 1767 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1768 mlxsw_sp_port, dev); 1769 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1770 return 0; 1771 1772 err_register_netdev: 1773 err_port_overheat_init_val_set: 1774 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1775 err_port_vlan_classification_set: 1776 mlxsw_sp->ports[local_port] = NULL; 1777 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1778 err_port_vlan_create: 1779 err_port_pvid_set: 1780 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1781 err_port_nve_init: 1782 err_port_vlan_clear: 1783 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1784 err_port_qdiscs_init: 1785 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1786 err_port_fids_init: 1787 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1788 err_port_dcb_init: 1789 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1790 err_port_tc_mc_mode: 1791 err_port_ets_init: 1792 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1793 err_port_buffers_init: 1794 err_port_admin_status_set: 1795 err_port_mtu_set: 1796 err_port_max_mtu_get: 1797 err_max_speed_get: 1798 err_port_speed_by_width_set: 1799 err_port_system_port_mapping_set: 1800 err_dev_addr_init: 1801 free_percpu(mlxsw_sp_port->pcpu_stats); 1802 err_alloc_stats: 1803 free_netdev(dev); 1804 err_alloc_etherdev: 1805 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1806 err_core_port_init: 1807 err_port_label_info_get: 1808 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1809 MLXSW_PORT_SWID_DISABLED_PORT); 1810 err_port_swid_set: 1811 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1812 port_mapping->slot_index, 1813 port_mapping->module); 1814 return err; 1815 } 1816 1817 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1818 { 1819 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1820 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1821 u8 module = mlxsw_sp_port->mapping.module; 1822 1823 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1824 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1825 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1826 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1827 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1828 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1829 mlxsw_sp->ports[local_port] = NULL; 1830 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1831 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1832 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1833 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1834 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1835 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1836 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1837 free_percpu(mlxsw_sp_port->pcpu_stats); 1838 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1839 free_netdev(mlxsw_sp_port->dev); 1840 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1841 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1842 MLXSW_PORT_SWID_DISABLED_PORT); 1843 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1844 } 1845 1846 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1847 { 1848 struct mlxsw_sp_port *mlxsw_sp_port; 1849 int err; 1850 1851 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1852 if (!mlxsw_sp_port) 1853 return -ENOMEM; 1854 1855 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1856 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1857 1858 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1859 mlxsw_sp_port, 1860 mlxsw_sp->base_mac, 1861 sizeof(mlxsw_sp->base_mac)); 1862 if (err) { 1863 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1864 goto err_core_cpu_port_init; 1865 } 1866 1867 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1868 return 0; 1869 1870 err_core_cpu_port_init: 1871 kfree(mlxsw_sp_port); 1872 return err; 1873 } 1874 1875 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1876 { 1877 struct mlxsw_sp_port *mlxsw_sp_port = 1878 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1879 1880 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1881 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1882 kfree(mlxsw_sp_port); 1883 } 1884 1885 static bool mlxsw_sp_local_port_valid(u16 local_port) 1886 { 1887 return local_port != MLXSW_PORT_CPU_PORT; 1888 } 1889 1890 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1891 { 1892 if (!mlxsw_sp_local_port_valid(local_port)) 1893 return false; 1894 return mlxsw_sp->ports[local_port] != NULL; 1895 } 1896 1897 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1898 u16 local_port, bool enable) 1899 { 1900 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1901 1902 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1903 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1904 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1906 } 1907 1908 struct mlxsw_sp_port_mapping_event { 1909 struct list_head list; 1910 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1911 }; 1912 1913 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1914 { 1915 struct mlxsw_sp_port_mapping_event *event, *next_event; 1916 struct mlxsw_sp_port_mapping_events *events; 1917 struct mlxsw_sp_port_mapping port_mapping; 1918 struct mlxsw_sp *mlxsw_sp; 1919 struct devlink *devlink; 1920 LIST_HEAD(event_queue); 1921 u16 local_port; 1922 int err; 1923 1924 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1925 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1926 devlink = priv_to_devlink(mlxsw_sp->core); 1927 1928 spin_lock_bh(&events->queue_lock); 1929 list_splice_init(&events->queue, &event_queue); 1930 spin_unlock_bh(&events->queue_lock); 1931 1932 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1933 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1934 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1935 event->pmlp_pl, &port_mapping); 1936 if (err) 1937 goto out; 1938 1939 if (WARN_ON_ONCE(!port_mapping.width)) 1940 goto out; 1941 1942 devl_lock(devlink); 1943 1944 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 1945 mlxsw_sp_port_create(mlxsw_sp, local_port, 1946 false, &port_mapping); 1947 else 1948 WARN_ON_ONCE(1); 1949 1950 devl_unlock(devlink); 1951 1952 mlxsw_sp->port_mapping[local_port] = port_mapping; 1953 1954 out: 1955 kfree(event); 1956 } 1957 } 1958 1959 static void 1960 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 1961 char *pmlp_pl, void *priv) 1962 { 1963 struct mlxsw_sp_port_mapping_events *events; 1964 struct mlxsw_sp_port_mapping_event *event; 1965 struct mlxsw_sp *mlxsw_sp = priv; 1966 u16 local_port; 1967 1968 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 1969 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 1970 return; 1971 1972 events = &mlxsw_sp->port_mapping_events; 1973 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1974 if (!event) 1975 return; 1976 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 1977 spin_lock(&events->queue_lock); 1978 list_add_tail(&event->list, &events->queue); 1979 spin_unlock(&events->queue_lock); 1980 mlxsw_core_schedule_work(&events->work); 1981 } 1982 1983 static void 1984 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 1985 { 1986 struct mlxsw_sp_port_mapping_event *event, *next_event; 1987 struct mlxsw_sp_port_mapping_events *events; 1988 1989 events = &mlxsw_sp->port_mapping_events; 1990 1991 /* Caller needs to make sure that no new event is going to appear. */ 1992 cancel_work_sync(&events->work); 1993 list_for_each_entry_safe(event, next_event, &events->queue, list) { 1994 list_del(&event->list); 1995 kfree(event); 1996 } 1997 } 1998 1999 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2000 { 2001 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2002 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2003 int i; 2004 2005 for (i = 1; i < max_ports; i++) 2006 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2007 /* Make sure all scheduled events are processed */ 2008 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2009 2010 devl_lock(devlink); 2011 for (i = 1; i < max_ports; i++) 2012 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2013 mlxsw_sp_port_remove(mlxsw_sp, i); 2014 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2015 devl_unlock(devlink); 2016 kfree(mlxsw_sp->ports); 2017 mlxsw_sp->ports = NULL; 2018 } 2019 2020 static void 2021 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2022 bool (*selector)(void *priv, u16 local_port), 2023 void *priv) 2024 { 2025 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2026 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2027 int i; 2028 2029 for (i = 1; i < max_ports; i++) 2030 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2031 mlxsw_sp_port_remove(mlxsw_sp, i); 2032 } 2033 2034 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2035 { 2036 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2037 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2038 struct mlxsw_sp_port_mapping_events *events; 2039 struct mlxsw_sp_port_mapping *port_mapping; 2040 size_t alloc_size; 2041 int i; 2042 int err; 2043 2044 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2045 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2046 if (!mlxsw_sp->ports) 2047 return -ENOMEM; 2048 2049 events = &mlxsw_sp->port_mapping_events; 2050 INIT_LIST_HEAD(&events->queue); 2051 spin_lock_init(&events->queue_lock); 2052 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2053 2054 for (i = 1; i < max_ports; i++) { 2055 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2056 if (err) 2057 goto err_event_enable; 2058 } 2059 2060 devl_lock(devlink); 2061 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2062 if (err) 2063 goto err_cpu_port_create; 2064 2065 for (i = 1; i < max_ports; i++) { 2066 port_mapping = &mlxsw_sp->port_mapping[i]; 2067 if (!port_mapping->width) 2068 continue; 2069 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2070 if (err) 2071 goto err_port_create; 2072 } 2073 devl_unlock(devlink); 2074 return 0; 2075 2076 err_port_create: 2077 for (i--; i >= 1; i--) 2078 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2079 mlxsw_sp_port_remove(mlxsw_sp, i); 2080 i = max_ports; 2081 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2082 err_cpu_port_create: 2083 devl_unlock(devlink); 2084 err_event_enable: 2085 for (i--; i >= 1; i--) 2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2087 /* Make sure all scheduled events are processed */ 2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2089 kfree(mlxsw_sp->ports); 2090 mlxsw_sp->ports = NULL; 2091 return err; 2092 } 2093 2094 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2095 { 2096 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2097 struct mlxsw_sp_port_mapping *port_mapping; 2098 int i; 2099 int err; 2100 2101 mlxsw_sp->port_mapping = kcalloc(max_ports, 2102 sizeof(struct mlxsw_sp_port_mapping), 2103 GFP_KERNEL); 2104 if (!mlxsw_sp->port_mapping) 2105 return -ENOMEM; 2106 2107 for (i = 1; i < max_ports; i++) { 2108 port_mapping = &mlxsw_sp->port_mapping[i]; 2109 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2110 if (err) 2111 goto err_port_module_info_get; 2112 } 2113 return 0; 2114 2115 err_port_module_info_get: 2116 kfree(mlxsw_sp->port_mapping); 2117 return err; 2118 } 2119 2120 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2121 { 2122 kfree(mlxsw_sp->port_mapping); 2123 } 2124 2125 static int 2126 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2127 struct mlxsw_sp_port_mapping *port_mapping, 2128 unsigned int count, const char *pmtdb_pl) 2129 { 2130 struct mlxsw_sp_port_mapping split_port_mapping; 2131 int err, i; 2132 2133 split_port_mapping = *port_mapping; 2134 split_port_mapping.width /= count; 2135 for (i = 0; i < count; i++) { 2136 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2137 2138 if (!mlxsw_sp_local_port_valid(s_local_port)) 2139 continue; 2140 2141 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2142 true, &split_port_mapping); 2143 if (err) 2144 goto err_port_create; 2145 split_port_mapping.lane += split_port_mapping.width; 2146 } 2147 2148 return 0; 2149 2150 err_port_create: 2151 for (i--; i >= 0; i--) { 2152 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2153 2154 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2155 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2156 } 2157 return err; 2158 } 2159 2160 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2161 unsigned int count, 2162 const char *pmtdb_pl) 2163 { 2164 struct mlxsw_sp_port_mapping *port_mapping; 2165 int i; 2166 2167 /* Go over original unsplit ports in the gap and recreate them. */ 2168 for (i = 0; i < count; i++) { 2169 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2170 2171 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2172 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2173 continue; 2174 mlxsw_sp_port_create(mlxsw_sp, local_port, 2175 false, port_mapping); 2176 } 2177 } 2178 2179 static struct mlxsw_sp_port * 2180 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2181 { 2182 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2183 return mlxsw_sp->ports[local_port]; 2184 return NULL; 2185 } 2186 2187 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2188 unsigned int count, 2189 struct netlink_ext_ack *extack) 2190 { 2191 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2192 struct mlxsw_sp_port_mapping port_mapping; 2193 struct mlxsw_sp_port *mlxsw_sp_port; 2194 enum mlxsw_reg_pmtdb_status status; 2195 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2196 int i; 2197 int err; 2198 2199 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2200 if (!mlxsw_sp_port) { 2201 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2202 local_port); 2203 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2204 return -EINVAL; 2205 } 2206 2207 if (mlxsw_sp_port->split) { 2208 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2209 return -EINVAL; 2210 } 2211 2212 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2213 mlxsw_sp_port->mapping.module, 2214 mlxsw_sp_port->mapping.module_width / count, 2215 count); 2216 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2217 if (err) { 2218 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2219 return err; 2220 } 2221 2222 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2223 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2224 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2225 return -EINVAL; 2226 } 2227 2228 port_mapping = mlxsw_sp_port->mapping; 2229 2230 for (i = 0; i < count; i++) { 2231 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2232 2233 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2234 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2235 } 2236 2237 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2238 count, pmtdb_pl); 2239 if (err) { 2240 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2241 goto err_port_split_create; 2242 } 2243 2244 return 0; 2245 2246 err_port_split_create: 2247 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2248 2249 return err; 2250 } 2251 2252 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2253 struct netlink_ext_ack *extack) 2254 { 2255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2256 struct mlxsw_sp_port *mlxsw_sp_port; 2257 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2258 unsigned int count; 2259 int i; 2260 int err; 2261 2262 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2263 if (!mlxsw_sp_port) { 2264 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2265 local_port); 2266 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2267 return -EINVAL; 2268 } 2269 2270 if (!mlxsw_sp_port->split) { 2271 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2272 return -EINVAL; 2273 } 2274 2275 count = mlxsw_sp_port->mapping.module_width / 2276 mlxsw_sp_port->mapping.width; 2277 2278 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2279 mlxsw_sp_port->mapping.module, 2280 mlxsw_sp_port->mapping.module_width / count, 2281 count); 2282 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2283 if (err) { 2284 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2285 return err; 2286 } 2287 2288 for (i = 0; i < count; i++) { 2289 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2290 2291 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2292 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2293 } 2294 2295 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2296 2297 return 0; 2298 } 2299 2300 static void 2301 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2302 { 2303 int i; 2304 2305 for (i = 0; i < TC_MAX_QUEUE; i++) 2306 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2307 } 2308 2309 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2310 char *pude_pl, void *priv) 2311 { 2312 struct mlxsw_sp *mlxsw_sp = priv; 2313 struct mlxsw_sp_port *mlxsw_sp_port; 2314 enum mlxsw_reg_pude_oper_status status; 2315 u16 local_port; 2316 2317 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2318 2319 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2320 return; 2321 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2322 if (!mlxsw_sp_port) 2323 return; 2324 2325 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2326 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2327 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2328 netif_carrier_on(mlxsw_sp_port->dev); 2329 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2330 } else { 2331 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2332 netif_carrier_off(mlxsw_sp_port->dev); 2333 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2334 } 2335 } 2336 2337 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2338 char *mtpptr_pl, bool ingress) 2339 { 2340 u16 local_port; 2341 u8 num_rec; 2342 int i; 2343 2344 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2345 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2346 for (i = 0; i < num_rec; i++) { 2347 u8 domain_number; 2348 u8 message_type; 2349 u16 sequence_id; 2350 u64 timestamp; 2351 2352 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2353 &domain_number, &sequence_id, 2354 ×tamp); 2355 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2356 message_type, domain_number, 2357 sequence_id, timestamp); 2358 } 2359 } 2360 2361 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2362 char *mtpptr_pl, void *priv) 2363 { 2364 struct mlxsw_sp *mlxsw_sp = priv; 2365 2366 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2367 } 2368 2369 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2370 char *mtpptr_pl, void *priv) 2371 { 2372 struct mlxsw_sp *mlxsw_sp = priv; 2373 2374 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2375 } 2376 2377 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2378 u16 local_port, void *priv) 2379 { 2380 struct mlxsw_sp *mlxsw_sp = priv; 2381 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2382 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2383 2384 if (unlikely(!mlxsw_sp_port)) { 2385 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2386 local_port); 2387 return; 2388 } 2389 2390 skb->dev = mlxsw_sp_port->dev; 2391 2392 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2393 u64_stats_update_begin(&pcpu_stats->syncp); 2394 pcpu_stats->rx_packets++; 2395 pcpu_stats->rx_bytes += skb->len; 2396 u64_stats_update_end(&pcpu_stats->syncp); 2397 2398 skb->protocol = eth_type_trans(skb, skb->dev); 2399 netif_receive_skb(skb); 2400 } 2401 2402 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2403 void *priv) 2404 { 2405 skb->offload_fwd_mark = 1; 2406 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2407 } 2408 2409 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2410 u16 local_port, void *priv) 2411 { 2412 skb->offload_l3_fwd_mark = 1; 2413 skb->offload_fwd_mark = 1; 2414 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2415 } 2416 2417 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2418 u16 local_port) 2419 { 2420 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2421 } 2422 2423 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2424 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2425 _is_ctrl, SP_##_trap_group, DISCARD) 2426 2427 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2428 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2429 _is_ctrl, SP_##_trap_group, DISCARD) 2430 2431 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2432 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2433 _is_ctrl, SP_##_trap_group, DISCARD) 2434 2435 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2436 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2437 2438 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2439 /* Events */ 2440 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2441 /* L2 traps */ 2442 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2443 /* L3 traps */ 2444 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2445 false), 2446 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2447 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2448 false), 2449 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2450 ROUTER_EXP, false), 2451 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2452 ROUTER_EXP, false), 2453 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2454 ROUTER_EXP, false), 2455 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2456 ROUTER_EXP, false), 2457 /* Multicast Router Traps */ 2458 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2459 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2460 /* NVE traps */ 2461 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2462 }; 2463 2464 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2465 /* Events */ 2466 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2467 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2468 }; 2469 2470 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2471 /* Events */ 2472 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2473 }; 2474 2475 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2476 { 2477 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2478 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2479 enum mlxsw_reg_qpcr_ir_units ir_units; 2480 int max_cpu_policers; 2481 bool is_bytes; 2482 u8 burst_size; 2483 u32 rate; 2484 int i, err; 2485 2486 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2487 return -EIO; 2488 2489 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2490 2491 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2492 for (i = 0; i < max_cpu_policers; i++) { 2493 is_bytes = false; 2494 switch (i) { 2495 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2496 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2498 rate = 1024; 2499 burst_size = 7; 2500 break; 2501 default: 2502 continue; 2503 } 2504 2505 __set_bit(i, mlxsw_sp->trap->policers_usage); 2506 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2507 burst_size); 2508 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2509 if (err) 2510 return err; 2511 } 2512 2513 return 0; 2514 } 2515 2516 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2517 { 2518 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2519 enum mlxsw_reg_htgt_trap_group i; 2520 int max_cpu_policers; 2521 int max_trap_groups; 2522 u8 priority, tc; 2523 u16 policer_id; 2524 int err; 2525 2526 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2527 return -EIO; 2528 2529 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2530 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2531 2532 for (i = 0; i < max_trap_groups; i++) { 2533 policer_id = i; 2534 switch (i) { 2535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2536 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2537 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2538 priority = 1; 2539 tc = 1; 2540 break; 2541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2542 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2543 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2544 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2545 break; 2546 default: 2547 continue; 2548 } 2549 2550 if (max_cpu_policers <= policer_id && 2551 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2552 return -EIO; 2553 2554 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2555 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2556 if (err) 2557 return err; 2558 } 2559 2560 return 0; 2561 } 2562 2563 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2564 { 2565 struct mlxsw_sp_trap *trap; 2566 u64 max_policers; 2567 int err; 2568 2569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2570 return -EIO; 2571 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2572 trap = kzalloc(struct_size(trap, policers_usage, 2573 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2574 if (!trap) 2575 return -ENOMEM; 2576 trap->max_policers = max_policers; 2577 mlxsw_sp->trap = trap; 2578 2579 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2580 if (err) 2581 goto err_cpu_policers_set; 2582 2583 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2584 if (err) 2585 goto err_trap_groups_set; 2586 2587 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2588 ARRAY_SIZE(mlxsw_sp_listener), 2589 mlxsw_sp); 2590 if (err) 2591 goto err_traps_register; 2592 2593 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2594 mlxsw_sp->listeners_count, mlxsw_sp); 2595 if (err) 2596 goto err_extra_traps_init; 2597 2598 return 0; 2599 2600 err_extra_traps_init: 2601 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2602 ARRAY_SIZE(mlxsw_sp_listener), 2603 mlxsw_sp); 2604 err_traps_register: 2605 err_trap_groups_set: 2606 err_cpu_policers_set: 2607 kfree(trap); 2608 return err; 2609 } 2610 2611 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2612 { 2613 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2614 mlxsw_sp->listeners_count, 2615 mlxsw_sp); 2616 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2617 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2618 kfree(mlxsw_sp->trap); 2619 } 2620 2621 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2622 2623 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2624 { 2625 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2626 u32 seed; 2627 int err; 2628 2629 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2630 MLXSW_SP_LAG_SEED_INIT); 2631 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2632 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2633 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2634 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2635 MLXSW_REG_SLCR_LAG_HASH_SIP | 2636 MLXSW_REG_SLCR_LAG_HASH_DIP | 2637 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2638 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2639 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2640 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2641 if (err) 2642 return err; 2643 2644 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2645 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2646 return -EIO; 2647 2648 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2649 sizeof(struct mlxsw_sp_upper), 2650 GFP_KERNEL); 2651 if (!mlxsw_sp->lags) 2652 return -ENOMEM; 2653 2654 return 0; 2655 } 2656 2657 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2658 { 2659 kfree(mlxsw_sp->lags); 2660 } 2661 2662 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2663 .clock_init = mlxsw_sp1_ptp_clock_init, 2664 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2665 .init = mlxsw_sp1_ptp_init, 2666 .fini = mlxsw_sp1_ptp_fini, 2667 .receive = mlxsw_sp1_ptp_receive, 2668 .transmitted = mlxsw_sp1_ptp_transmitted, 2669 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2670 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2671 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2672 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2673 .get_stats_count = mlxsw_sp1_get_stats_count, 2674 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2675 .get_stats = mlxsw_sp1_get_stats, 2676 }; 2677 2678 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2679 .clock_init = mlxsw_sp2_ptp_clock_init, 2680 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2681 .init = mlxsw_sp2_ptp_init, 2682 .fini = mlxsw_sp2_ptp_fini, 2683 .receive = mlxsw_sp2_ptp_receive, 2684 .transmitted = mlxsw_sp2_ptp_transmitted, 2685 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2686 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2687 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2688 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2689 .get_stats_count = mlxsw_sp2_get_stats_count, 2690 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2691 .get_stats = mlxsw_sp2_get_stats, 2692 }; 2693 2694 struct mlxsw_sp_sample_trigger_node { 2695 struct mlxsw_sp_sample_trigger trigger; 2696 struct mlxsw_sp_sample_params params; 2697 struct rhash_head ht_node; 2698 struct rcu_head rcu; 2699 refcount_t refcount; 2700 }; 2701 2702 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2703 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2704 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2705 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2706 .automatic_shrinking = true, 2707 }; 2708 2709 static void 2710 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2711 const struct mlxsw_sp_sample_trigger *trigger) 2712 { 2713 memset(key, 0, sizeof(*key)); 2714 key->type = trigger->type; 2715 key->local_port = trigger->local_port; 2716 } 2717 2718 /* RCU read lock must be held */ 2719 struct mlxsw_sp_sample_params * 2720 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2721 const struct mlxsw_sp_sample_trigger *trigger) 2722 { 2723 struct mlxsw_sp_sample_trigger_node *trigger_node; 2724 struct mlxsw_sp_sample_trigger key; 2725 2726 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2727 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2728 mlxsw_sp_sample_trigger_ht_params); 2729 if (!trigger_node) 2730 return NULL; 2731 2732 return &trigger_node->params; 2733 } 2734 2735 static int 2736 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2737 const struct mlxsw_sp_sample_trigger *trigger, 2738 const struct mlxsw_sp_sample_params *params) 2739 { 2740 struct mlxsw_sp_sample_trigger_node *trigger_node; 2741 int err; 2742 2743 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2744 if (!trigger_node) 2745 return -ENOMEM; 2746 2747 trigger_node->trigger = *trigger; 2748 trigger_node->params = *params; 2749 refcount_set(&trigger_node->refcount, 1); 2750 2751 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2752 &trigger_node->ht_node, 2753 mlxsw_sp_sample_trigger_ht_params); 2754 if (err) 2755 goto err_rhashtable_insert; 2756 2757 return 0; 2758 2759 err_rhashtable_insert: 2760 kfree(trigger_node); 2761 return err; 2762 } 2763 2764 static void 2765 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2766 struct mlxsw_sp_sample_trigger_node *trigger_node) 2767 { 2768 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2769 &trigger_node->ht_node, 2770 mlxsw_sp_sample_trigger_ht_params); 2771 kfree_rcu(trigger_node, rcu); 2772 } 2773 2774 int 2775 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2776 const struct mlxsw_sp_sample_trigger *trigger, 2777 const struct mlxsw_sp_sample_params *params, 2778 struct netlink_ext_ack *extack) 2779 { 2780 struct mlxsw_sp_sample_trigger_node *trigger_node; 2781 struct mlxsw_sp_sample_trigger key; 2782 2783 ASSERT_RTNL(); 2784 2785 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2786 2787 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2788 &key, 2789 mlxsw_sp_sample_trigger_ht_params); 2790 if (!trigger_node) 2791 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2792 params); 2793 2794 if (trigger_node->trigger.local_port) { 2795 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2796 return -EINVAL; 2797 } 2798 2799 if (trigger_node->params.psample_group != params->psample_group || 2800 trigger_node->params.truncate != params->truncate || 2801 trigger_node->params.rate != params->rate || 2802 trigger_node->params.trunc_size != params->trunc_size) { 2803 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2804 return -EINVAL; 2805 } 2806 2807 refcount_inc(&trigger_node->refcount); 2808 2809 return 0; 2810 } 2811 2812 void 2813 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2814 const struct mlxsw_sp_sample_trigger *trigger) 2815 { 2816 struct mlxsw_sp_sample_trigger_node *trigger_node; 2817 struct mlxsw_sp_sample_trigger key; 2818 2819 ASSERT_RTNL(); 2820 2821 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2822 2823 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2824 &key, 2825 mlxsw_sp_sample_trigger_ht_params); 2826 if (!trigger_node) 2827 return; 2828 2829 if (!refcount_dec_and_test(&trigger_node->refcount)) 2830 return; 2831 2832 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2833 } 2834 2835 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2836 unsigned long event, void *ptr); 2837 2838 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2839 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2840 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2841 2842 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2843 { 2844 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2845 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2846 mutex_init(&mlxsw_sp->parsing.lock); 2847 } 2848 2849 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2850 { 2851 mutex_destroy(&mlxsw_sp->parsing.lock); 2852 } 2853 2854 struct mlxsw_sp_ipv6_addr_node { 2855 struct in6_addr key; 2856 struct rhash_head ht_node; 2857 u32 kvdl_index; 2858 refcount_t refcount; 2859 }; 2860 2861 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2862 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2863 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2864 .key_len = sizeof(struct in6_addr), 2865 .automatic_shrinking = true, 2866 }; 2867 2868 static int 2869 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2870 u32 *p_kvdl_index) 2871 { 2872 struct mlxsw_sp_ipv6_addr_node *node; 2873 char rips_pl[MLXSW_REG_RIPS_LEN]; 2874 int err; 2875 2876 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2877 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2878 p_kvdl_index); 2879 if (err) 2880 return err; 2881 2882 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2883 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2884 if (err) 2885 goto err_rips_write; 2886 2887 node = kzalloc(sizeof(*node), GFP_KERNEL); 2888 if (!node) { 2889 err = -ENOMEM; 2890 goto err_node_alloc; 2891 } 2892 2893 node->key = *addr6; 2894 node->kvdl_index = *p_kvdl_index; 2895 refcount_set(&node->refcount, 1); 2896 2897 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2898 &node->ht_node, 2899 mlxsw_sp_ipv6_addr_ht_params); 2900 if (err) 2901 goto err_rhashtable_insert; 2902 2903 return 0; 2904 2905 err_rhashtable_insert: 2906 kfree(node); 2907 err_node_alloc: 2908 err_rips_write: 2909 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2910 *p_kvdl_index); 2911 return err; 2912 } 2913 2914 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2915 struct mlxsw_sp_ipv6_addr_node *node) 2916 { 2917 u32 kvdl_index = node->kvdl_index; 2918 2919 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2920 mlxsw_sp_ipv6_addr_ht_params); 2921 kfree(node); 2922 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2923 kvdl_index); 2924 } 2925 2926 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2927 const struct in6_addr *addr6, 2928 u32 *p_kvdl_index) 2929 { 2930 struct mlxsw_sp_ipv6_addr_node *node; 2931 int err = 0; 2932 2933 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2934 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2935 mlxsw_sp_ipv6_addr_ht_params); 2936 if (node) { 2937 refcount_inc(&node->refcount); 2938 *p_kvdl_index = node->kvdl_index; 2939 goto out_unlock; 2940 } 2941 2942 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2943 2944 out_unlock: 2945 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2946 return err; 2947 } 2948 2949 void 2950 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2951 { 2952 struct mlxsw_sp_ipv6_addr_node *node; 2953 2954 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2955 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2956 mlxsw_sp_ipv6_addr_ht_params); 2957 if (WARN_ON(!node)) 2958 goto out_unlock; 2959 2960 if (!refcount_dec_and_test(&node->refcount)) 2961 goto out_unlock; 2962 2963 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 2964 2965 out_unlock: 2966 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2967 } 2968 2969 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 2970 { 2971 int err; 2972 2973 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 2974 &mlxsw_sp_ipv6_addr_ht_params); 2975 if (err) 2976 return err; 2977 2978 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 2979 return 0; 2980 } 2981 2982 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 2983 { 2984 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 2985 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 2986 } 2987 2988 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2989 const struct mlxsw_bus_info *mlxsw_bus_info, 2990 struct netlink_ext_ack *extack) 2991 { 2992 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2993 int err; 2994 2995 mlxsw_sp->core = mlxsw_core; 2996 mlxsw_sp->bus_info = mlxsw_bus_info; 2997 2998 mlxsw_sp_parsing_init(mlxsw_sp); 2999 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 3000 3001 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3002 if (err) { 3003 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3004 return err; 3005 } 3006 3007 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3008 if (err) { 3009 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3010 return err; 3011 } 3012 3013 err = mlxsw_sp_pgt_init(mlxsw_sp); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3016 goto err_pgt_init; 3017 } 3018 3019 err = mlxsw_sp_fids_init(mlxsw_sp); 3020 if (err) { 3021 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3022 goto err_fids_init; 3023 } 3024 3025 err = mlxsw_sp_policers_init(mlxsw_sp); 3026 if (err) { 3027 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3028 goto err_policers_init; 3029 } 3030 3031 err = mlxsw_sp_traps_init(mlxsw_sp); 3032 if (err) { 3033 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3034 goto err_traps_init; 3035 } 3036 3037 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3038 if (err) { 3039 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3040 goto err_devlink_traps_init; 3041 } 3042 3043 err = mlxsw_sp_buffers_init(mlxsw_sp); 3044 if (err) { 3045 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3046 goto err_buffers_init; 3047 } 3048 3049 err = mlxsw_sp_lag_init(mlxsw_sp); 3050 if (err) { 3051 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3052 goto err_lag_init; 3053 } 3054 3055 /* Initialize SPAN before router and switchdev, so that those components 3056 * can call mlxsw_sp_span_respin(). 3057 */ 3058 err = mlxsw_sp_span_init(mlxsw_sp); 3059 if (err) { 3060 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3061 goto err_span_init; 3062 } 3063 3064 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3065 if (err) { 3066 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3067 goto err_switchdev_init; 3068 } 3069 3070 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3071 if (err) { 3072 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3073 goto err_counter_pool_init; 3074 } 3075 3076 err = mlxsw_sp_afa_init(mlxsw_sp); 3077 if (err) { 3078 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3079 goto err_afa_init; 3080 } 3081 3082 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3083 if (err) { 3084 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3085 goto err_ipv6_addr_ht_init; 3086 } 3087 3088 err = mlxsw_sp_nve_init(mlxsw_sp); 3089 if (err) { 3090 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3091 goto err_nve_init; 3092 } 3093 3094 err = mlxsw_sp_acl_init(mlxsw_sp); 3095 if (err) { 3096 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3097 goto err_acl_init; 3098 } 3099 3100 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3101 if (err) { 3102 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3103 goto err_router_init; 3104 } 3105 3106 if (mlxsw_sp->bus_info->read_frc_capable) { 3107 /* NULL is a valid return value from clock_init */ 3108 mlxsw_sp->clock = 3109 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3110 mlxsw_sp->bus_info->dev); 3111 if (IS_ERR(mlxsw_sp->clock)) { 3112 err = PTR_ERR(mlxsw_sp->clock); 3113 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3114 goto err_ptp_clock_init; 3115 } 3116 } 3117 3118 if (mlxsw_sp->clock) { 3119 /* NULL is a valid return value from ptp_ops->init */ 3120 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3121 if (IS_ERR(mlxsw_sp->ptp_state)) { 3122 err = PTR_ERR(mlxsw_sp->ptp_state); 3123 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3124 goto err_ptp_init; 3125 } 3126 } 3127 3128 /* Initialize netdevice notifier after SPAN is initialized, so that the 3129 * event handler can call SPAN respin. 3130 */ 3131 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3132 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3133 &mlxsw_sp->netdevice_nb); 3134 if (err) { 3135 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3136 goto err_netdev_notifier; 3137 } 3138 3139 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3140 if (err) { 3141 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3142 goto err_dpipe_init; 3143 } 3144 3145 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3146 if (err) { 3147 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3148 goto err_port_module_info_init; 3149 } 3150 3151 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3152 &mlxsw_sp_sample_trigger_ht_params); 3153 if (err) { 3154 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3155 goto err_sample_trigger_init; 3156 } 3157 3158 err = mlxsw_sp_ports_create(mlxsw_sp); 3159 if (err) { 3160 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3161 goto err_ports_create; 3162 } 3163 3164 mlxsw_sp->ubridge = false; 3165 return 0; 3166 3167 err_ports_create: 3168 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3169 err_sample_trigger_init: 3170 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3171 err_port_module_info_init: 3172 mlxsw_sp_dpipe_fini(mlxsw_sp); 3173 err_dpipe_init: 3174 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3175 &mlxsw_sp->netdevice_nb); 3176 err_netdev_notifier: 3177 if (mlxsw_sp->clock) 3178 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3179 err_ptp_init: 3180 if (mlxsw_sp->clock) 3181 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3182 err_ptp_clock_init: 3183 mlxsw_sp_router_fini(mlxsw_sp); 3184 err_router_init: 3185 mlxsw_sp_acl_fini(mlxsw_sp); 3186 err_acl_init: 3187 mlxsw_sp_nve_fini(mlxsw_sp); 3188 err_nve_init: 3189 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3190 err_ipv6_addr_ht_init: 3191 mlxsw_sp_afa_fini(mlxsw_sp); 3192 err_afa_init: 3193 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3194 err_counter_pool_init: 3195 mlxsw_sp_switchdev_fini(mlxsw_sp); 3196 err_switchdev_init: 3197 mlxsw_sp_span_fini(mlxsw_sp); 3198 err_span_init: 3199 mlxsw_sp_lag_fini(mlxsw_sp); 3200 err_lag_init: 3201 mlxsw_sp_buffers_fini(mlxsw_sp); 3202 err_buffers_init: 3203 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3204 err_devlink_traps_init: 3205 mlxsw_sp_traps_fini(mlxsw_sp); 3206 err_traps_init: 3207 mlxsw_sp_policers_fini(mlxsw_sp); 3208 err_policers_init: 3209 mlxsw_sp_fids_fini(mlxsw_sp); 3210 err_fids_init: 3211 mlxsw_sp_pgt_fini(mlxsw_sp); 3212 err_pgt_init: 3213 mlxsw_sp_kvdl_fini(mlxsw_sp); 3214 mlxsw_sp_parsing_fini(mlxsw_sp); 3215 return err; 3216 } 3217 3218 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3219 const struct mlxsw_bus_info *mlxsw_bus_info, 3220 struct netlink_ext_ack *extack) 3221 { 3222 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3223 3224 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3225 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3226 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3227 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3228 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3229 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3230 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3231 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3232 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3233 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3234 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3235 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3236 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3237 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3238 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3239 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3240 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3241 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3242 mlxsw_sp->listeners = mlxsw_sp1_listener; 3243 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3244 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr; 3245 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3246 mlxsw_sp->pgt_smpe_index_valid = true; 3247 3248 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3249 } 3250 3251 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3252 const struct mlxsw_bus_info *mlxsw_bus_info, 3253 struct netlink_ext_ack *extack) 3254 { 3255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3256 3257 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3258 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3259 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3260 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3261 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3262 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3263 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3264 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3265 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3266 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3267 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3268 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3269 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3270 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3271 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3272 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3273 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3274 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3275 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3276 mlxsw_sp->listeners = mlxsw_sp2_listener; 3277 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3278 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3279 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3280 mlxsw_sp->pgt_smpe_index_valid = false; 3281 3282 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3283 } 3284 3285 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3286 const struct mlxsw_bus_info *mlxsw_bus_info, 3287 struct netlink_ext_ack *extack) 3288 { 3289 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3290 3291 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3292 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3293 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3294 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3295 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3296 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3297 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3298 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3299 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3300 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3301 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3302 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3303 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3304 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3305 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3306 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3307 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3308 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3309 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3310 mlxsw_sp->listeners = mlxsw_sp2_listener; 3311 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3312 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3313 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3314 mlxsw_sp->pgt_smpe_index_valid = false; 3315 3316 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3317 } 3318 3319 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3320 const struct mlxsw_bus_info *mlxsw_bus_info, 3321 struct netlink_ext_ack *extack) 3322 { 3323 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3324 3325 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3326 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3327 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3328 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3329 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3330 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3331 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3332 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3333 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3334 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3335 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3336 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3337 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3338 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3339 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3340 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3341 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3342 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3343 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3344 mlxsw_sp->listeners = mlxsw_sp2_listener; 3345 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3346 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3347 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3348 mlxsw_sp->pgt_smpe_index_valid = false; 3349 3350 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3351 } 3352 3353 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3354 { 3355 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3356 3357 mlxsw_sp_ports_remove(mlxsw_sp); 3358 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3359 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3360 mlxsw_sp_dpipe_fini(mlxsw_sp); 3361 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3362 &mlxsw_sp->netdevice_nb); 3363 if (mlxsw_sp->clock) { 3364 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3365 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3366 } 3367 mlxsw_sp_router_fini(mlxsw_sp); 3368 mlxsw_sp_acl_fini(mlxsw_sp); 3369 mlxsw_sp_nve_fini(mlxsw_sp); 3370 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3371 mlxsw_sp_afa_fini(mlxsw_sp); 3372 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3373 mlxsw_sp_switchdev_fini(mlxsw_sp); 3374 mlxsw_sp_span_fini(mlxsw_sp); 3375 mlxsw_sp_lag_fini(mlxsw_sp); 3376 mlxsw_sp_buffers_fini(mlxsw_sp); 3377 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3378 mlxsw_sp_traps_fini(mlxsw_sp); 3379 mlxsw_sp_policers_fini(mlxsw_sp); 3380 mlxsw_sp_fids_fini(mlxsw_sp); 3381 mlxsw_sp_pgt_fini(mlxsw_sp); 3382 mlxsw_sp_kvdl_fini(mlxsw_sp); 3383 mlxsw_sp_parsing_fini(mlxsw_sp); 3384 } 3385 3386 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3387 * 802.1Q FIDs 3388 */ 3389 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3390 VLAN_VID_MASK - 1) 3391 3392 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3393 .used_max_mid = 1, 3394 .max_mid = MLXSW_SP_MID_MAX, 3395 .used_flood_tables = 1, 3396 .used_flood_mode = 1, 3397 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED, 3398 .max_fid_flood_tables = 3, 3399 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3400 .used_max_ib_mc = 1, 3401 .max_ib_mc = 0, 3402 .used_max_pkey = 1, 3403 .max_pkey = 0, 3404 .used_kvd_sizes = 1, 3405 .kvd_hash_single_parts = 59, 3406 .kvd_hash_double_parts = 41, 3407 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3408 .swid_config = { 3409 { 3410 .used_type = 1, 3411 .type = MLXSW_PORT_SWID_TYPE_ETH, 3412 } 3413 }, 3414 }; 3415 3416 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3417 .used_max_mid = 1, 3418 .max_mid = MLXSW_SP_MID_MAX, 3419 .used_flood_tables = 1, 3420 .used_flood_mode = 1, 3421 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED, 3422 .max_fid_flood_tables = 3, 3423 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3424 .used_max_ib_mc = 1, 3425 .max_ib_mc = 0, 3426 .used_max_pkey = 1, 3427 .max_pkey = 0, 3428 .swid_config = { 3429 { 3430 .used_type = 1, 3431 .type = MLXSW_PORT_SWID_TYPE_ETH, 3432 } 3433 }, 3434 }; 3435 3436 static void 3437 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3438 struct devlink_resource_size_params *kvd_size_params, 3439 struct devlink_resource_size_params *linear_size_params, 3440 struct devlink_resource_size_params *hash_double_size_params, 3441 struct devlink_resource_size_params *hash_single_size_params) 3442 { 3443 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3444 KVD_SINGLE_MIN_SIZE); 3445 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3446 KVD_DOUBLE_MIN_SIZE); 3447 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3448 u32 linear_size_min = 0; 3449 3450 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3451 MLXSW_SP_KVD_GRANULARITY, 3452 DEVLINK_RESOURCE_UNIT_ENTRY); 3453 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3454 kvd_size - single_size_min - 3455 double_size_min, 3456 MLXSW_SP_KVD_GRANULARITY, 3457 DEVLINK_RESOURCE_UNIT_ENTRY); 3458 devlink_resource_size_params_init(hash_double_size_params, 3459 double_size_min, 3460 kvd_size - single_size_min - 3461 linear_size_min, 3462 MLXSW_SP_KVD_GRANULARITY, 3463 DEVLINK_RESOURCE_UNIT_ENTRY); 3464 devlink_resource_size_params_init(hash_single_size_params, 3465 single_size_min, 3466 kvd_size - double_size_min - 3467 linear_size_min, 3468 MLXSW_SP_KVD_GRANULARITY, 3469 DEVLINK_RESOURCE_UNIT_ENTRY); 3470 } 3471 3472 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3473 { 3474 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3475 struct devlink_resource_size_params hash_single_size_params; 3476 struct devlink_resource_size_params hash_double_size_params; 3477 struct devlink_resource_size_params linear_size_params; 3478 struct devlink_resource_size_params kvd_size_params; 3479 u32 kvd_size, single_size, double_size, linear_size; 3480 const struct mlxsw_config_profile *profile; 3481 int err; 3482 3483 profile = &mlxsw_sp1_config_profile; 3484 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3485 return -EIO; 3486 3487 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3488 &linear_size_params, 3489 &hash_double_size_params, 3490 &hash_single_size_params); 3491 3492 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3493 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3494 kvd_size, MLXSW_SP_RESOURCE_KVD, 3495 DEVLINK_RESOURCE_ID_PARENT_TOP, 3496 &kvd_size_params); 3497 if (err) 3498 return err; 3499 3500 linear_size = profile->kvd_linear_size; 3501 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3502 linear_size, 3503 MLXSW_SP_RESOURCE_KVD_LINEAR, 3504 MLXSW_SP_RESOURCE_KVD, 3505 &linear_size_params); 3506 if (err) 3507 return err; 3508 3509 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3510 if (err) 3511 return err; 3512 3513 double_size = kvd_size - linear_size; 3514 double_size *= profile->kvd_hash_double_parts; 3515 double_size /= profile->kvd_hash_double_parts + 3516 profile->kvd_hash_single_parts; 3517 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3518 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3519 double_size, 3520 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3521 MLXSW_SP_RESOURCE_KVD, 3522 &hash_double_size_params); 3523 if (err) 3524 return err; 3525 3526 single_size = kvd_size - double_size - linear_size; 3527 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3528 single_size, 3529 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3530 MLXSW_SP_RESOURCE_KVD, 3531 &hash_single_size_params); 3532 if (err) 3533 return err; 3534 3535 return 0; 3536 } 3537 3538 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3539 { 3540 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3541 struct devlink_resource_size_params kvd_size_params; 3542 u32 kvd_size; 3543 3544 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3545 return -EIO; 3546 3547 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3548 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3549 MLXSW_SP_KVD_GRANULARITY, 3550 DEVLINK_RESOURCE_UNIT_ENTRY); 3551 3552 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3553 kvd_size, MLXSW_SP_RESOURCE_KVD, 3554 DEVLINK_RESOURCE_ID_PARENT_TOP, 3555 &kvd_size_params); 3556 } 3557 3558 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3559 { 3560 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3561 struct devlink_resource_size_params span_size_params; 3562 u32 max_span; 3563 3564 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3565 return -EIO; 3566 3567 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3568 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3569 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3570 3571 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3572 max_span, MLXSW_SP_RESOURCE_SPAN, 3573 DEVLINK_RESOURCE_ID_PARENT_TOP, 3574 &span_size_params); 3575 } 3576 3577 static int 3578 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3579 { 3580 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3581 struct devlink_resource_size_params size_params; 3582 u8 max_rif_mac_profiles; 3583 3584 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3585 max_rif_mac_profiles = 1; 3586 else 3587 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3588 MAX_RIF_MAC_PROFILES); 3589 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3590 max_rif_mac_profiles, 1, 3591 DEVLINK_RESOURCE_UNIT_ENTRY); 3592 3593 return devlink_resource_register(devlink, 3594 "rif_mac_profiles", 3595 max_rif_mac_profiles, 3596 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3597 DEVLINK_RESOURCE_ID_PARENT_TOP, 3598 &size_params); 3599 } 3600 3601 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3602 { 3603 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3604 struct devlink_resource_size_params size_params; 3605 u64 max_rifs; 3606 3607 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3608 return -EIO; 3609 3610 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3611 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3612 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3613 3614 return devlink_resource_register(devlink, "rifs", max_rifs, 3615 MLXSW_SP_RESOURCE_RIFS, 3616 DEVLINK_RESOURCE_ID_PARENT_TOP, 3617 &size_params); 3618 } 3619 3620 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3621 { 3622 int err; 3623 3624 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3625 if (err) 3626 return err; 3627 3628 err = mlxsw_sp_resources_span_register(mlxsw_core); 3629 if (err) 3630 goto err_resources_span_register; 3631 3632 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3633 if (err) 3634 goto err_resources_counter_register; 3635 3636 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3637 if (err) 3638 goto err_policer_resources_register; 3639 3640 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3641 if (err) 3642 goto err_resources_rif_mac_profile_register; 3643 3644 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3645 if (err) 3646 goto err_resources_rifs_register; 3647 3648 return 0; 3649 3650 err_resources_rifs_register: 3651 err_resources_rif_mac_profile_register: 3652 err_policer_resources_register: 3653 err_resources_counter_register: 3654 err_resources_span_register: 3655 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3656 return err; 3657 } 3658 3659 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3660 { 3661 int err; 3662 3663 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3664 if (err) 3665 return err; 3666 3667 err = mlxsw_sp_resources_span_register(mlxsw_core); 3668 if (err) 3669 goto err_resources_span_register; 3670 3671 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3672 if (err) 3673 goto err_resources_counter_register; 3674 3675 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3676 if (err) 3677 goto err_policer_resources_register; 3678 3679 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3680 if (err) 3681 goto err_resources_rif_mac_profile_register; 3682 3683 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3684 if (err) 3685 goto err_resources_rifs_register; 3686 3687 return 0; 3688 3689 err_resources_rifs_register: 3690 err_resources_rif_mac_profile_register: 3691 err_policer_resources_register: 3692 err_resources_counter_register: 3693 err_resources_span_register: 3694 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3695 return err; 3696 } 3697 3698 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3699 const struct mlxsw_config_profile *profile, 3700 u64 *p_single_size, u64 *p_double_size, 3701 u64 *p_linear_size) 3702 { 3703 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3704 u32 double_size; 3705 int err; 3706 3707 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3708 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3709 return -EIO; 3710 3711 /* The hash part is what left of the kvd without the 3712 * linear part. It is split to the single size and 3713 * double size by the parts ratio from the profile. 3714 * Both sizes must be a multiplications of the 3715 * granularity from the profile. In case the user 3716 * provided the sizes they are obtained via devlink. 3717 */ 3718 err = devlink_resource_size_get(devlink, 3719 MLXSW_SP_RESOURCE_KVD_LINEAR, 3720 p_linear_size); 3721 if (err) 3722 *p_linear_size = profile->kvd_linear_size; 3723 3724 err = devlink_resource_size_get(devlink, 3725 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3726 p_double_size); 3727 if (err) { 3728 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3729 *p_linear_size; 3730 double_size *= profile->kvd_hash_double_parts; 3731 double_size /= profile->kvd_hash_double_parts + 3732 profile->kvd_hash_single_parts; 3733 *p_double_size = rounddown(double_size, 3734 MLXSW_SP_KVD_GRANULARITY); 3735 } 3736 3737 err = devlink_resource_size_get(devlink, 3738 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3739 p_single_size); 3740 if (err) 3741 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3742 *p_double_size - *p_linear_size; 3743 3744 /* Check results are legal. */ 3745 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3746 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3747 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3748 return -EIO; 3749 3750 return 0; 3751 } 3752 3753 static int 3754 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3755 struct devlink_param_gset_ctx *ctx) 3756 { 3757 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3758 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3759 3760 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3761 return 0; 3762 } 3763 3764 static int 3765 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3766 struct devlink_param_gset_ctx *ctx) 3767 { 3768 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3769 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3770 3771 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3772 } 3773 3774 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3775 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3776 "acl_region_rehash_interval", 3777 DEVLINK_PARAM_TYPE_U32, 3778 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3779 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3780 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3781 NULL), 3782 }; 3783 3784 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3785 { 3786 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3787 union devlink_param_value value; 3788 int err; 3789 3790 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3791 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3792 if (err) 3793 return err; 3794 3795 value.vu32 = 0; 3796 devlink_param_driverinit_value_set(devlink, 3797 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3798 value); 3799 return 0; 3800 } 3801 3802 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3803 { 3804 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3805 mlxsw_sp2_devlink_params, 3806 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3807 } 3808 3809 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3810 struct sk_buff *skb, u16 local_port) 3811 { 3812 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3813 3814 skb_pull(skb, MLXSW_TXHDR_LEN); 3815 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3816 } 3817 3818 static struct mlxsw_driver mlxsw_sp1_driver = { 3819 .kind = mlxsw_sp1_driver_name, 3820 .priv_size = sizeof(struct mlxsw_sp), 3821 .fw_req_rev = &mlxsw_sp1_fw_rev, 3822 .fw_filename = MLXSW_SP1_FW_FILENAME, 3823 .init = mlxsw_sp1_init, 3824 .fini = mlxsw_sp_fini, 3825 .port_split = mlxsw_sp_port_split, 3826 .port_unsplit = mlxsw_sp_port_unsplit, 3827 .sb_pool_get = mlxsw_sp_sb_pool_get, 3828 .sb_pool_set = mlxsw_sp_sb_pool_set, 3829 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3830 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3831 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3832 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3833 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3834 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3835 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3836 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3837 .trap_init = mlxsw_sp_trap_init, 3838 .trap_fini = mlxsw_sp_trap_fini, 3839 .trap_action_set = mlxsw_sp_trap_action_set, 3840 .trap_group_init = mlxsw_sp_trap_group_init, 3841 .trap_group_set = mlxsw_sp_trap_group_set, 3842 .trap_policer_init = mlxsw_sp_trap_policer_init, 3843 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3844 .trap_policer_set = mlxsw_sp_trap_policer_set, 3845 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3846 .txhdr_construct = mlxsw_sp_txhdr_construct, 3847 .resources_register = mlxsw_sp1_resources_register, 3848 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3849 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3850 .txhdr_len = MLXSW_TXHDR_LEN, 3851 .profile = &mlxsw_sp1_config_profile, 3852 }; 3853 3854 static struct mlxsw_driver mlxsw_sp2_driver = { 3855 .kind = mlxsw_sp2_driver_name, 3856 .priv_size = sizeof(struct mlxsw_sp), 3857 .fw_req_rev = &mlxsw_sp2_fw_rev, 3858 .fw_filename = MLXSW_SP2_FW_FILENAME, 3859 .init = mlxsw_sp2_init, 3860 .fini = mlxsw_sp_fini, 3861 .port_split = mlxsw_sp_port_split, 3862 .port_unsplit = mlxsw_sp_port_unsplit, 3863 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3864 .sb_pool_get = mlxsw_sp_sb_pool_get, 3865 .sb_pool_set = mlxsw_sp_sb_pool_set, 3866 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3867 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3868 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3869 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3870 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3871 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3872 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3873 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3874 .trap_init = mlxsw_sp_trap_init, 3875 .trap_fini = mlxsw_sp_trap_fini, 3876 .trap_action_set = mlxsw_sp_trap_action_set, 3877 .trap_group_init = mlxsw_sp_trap_group_init, 3878 .trap_group_set = mlxsw_sp_trap_group_set, 3879 .trap_policer_init = mlxsw_sp_trap_policer_init, 3880 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3881 .trap_policer_set = mlxsw_sp_trap_policer_set, 3882 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3883 .txhdr_construct = mlxsw_sp_txhdr_construct, 3884 .resources_register = mlxsw_sp2_resources_register, 3885 .params_register = mlxsw_sp2_params_register, 3886 .params_unregister = mlxsw_sp2_params_unregister, 3887 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3888 .txhdr_len = MLXSW_TXHDR_LEN, 3889 .profile = &mlxsw_sp2_config_profile, 3890 }; 3891 3892 static struct mlxsw_driver mlxsw_sp3_driver = { 3893 .kind = mlxsw_sp3_driver_name, 3894 .priv_size = sizeof(struct mlxsw_sp), 3895 .fw_req_rev = &mlxsw_sp3_fw_rev, 3896 .fw_filename = MLXSW_SP3_FW_FILENAME, 3897 .init = mlxsw_sp3_init, 3898 .fini = mlxsw_sp_fini, 3899 .port_split = mlxsw_sp_port_split, 3900 .port_unsplit = mlxsw_sp_port_unsplit, 3901 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3902 .sb_pool_get = mlxsw_sp_sb_pool_get, 3903 .sb_pool_set = mlxsw_sp_sb_pool_set, 3904 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3905 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3906 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3907 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3908 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3909 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3910 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3911 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3912 .trap_init = mlxsw_sp_trap_init, 3913 .trap_fini = mlxsw_sp_trap_fini, 3914 .trap_action_set = mlxsw_sp_trap_action_set, 3915 .trap_group_init = mlxsw_sp_trap_group_init, 3916 .trap_group_set = mlxsw_sp_trap_group_set, 3917 .trap_policer_init = mlxsw_sp_trap_policer_init, 3918 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3919 .trap_policer_set = mlxsw_sp_trap_policer_set, 3920 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3921 .txhdr_construct = mlxsw_sp_txhdr_construct, 3922 .resources_register = mlxsw_sp2_resources_register, 3923 .params_register = mlxsw_sp2_params_register, 3924 .params_unregister = mlxsw_sp2_params_unregister, 3925 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3926 .txhdr_len = MLXSW_TXHDR_LEN, 3927 .profile = &mlxsw_sp2_config_profile, 3928 }; 3929 3930 static struct mlxsw_driver mlxsw_sp4_driver = { 3931 .kind = mlxsw_sp4_driver_name, 3932 .priv_size = sizeof(struct mlxsw_sp), 3933 .init = mlxsw_sp4_init, 3934 .fini = mlxsw_sp_fini, 3935 .port_split = mlxsw_sp_port_split, 3936 .port_unsplit = mlxsw_sp_port_unsplit, 3937 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3938 .sb_pool_get = mlxsw_sp_sb_pool_get, 3939 .sb_pool_set = mlxsw_sp_sb_pool_set, 3940 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3941 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3942 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3943 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3944 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3945 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3946 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3947 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3948 .trap_init = mlxsw_sp_trap_init, 3949 .trap_fini = mlxsw_sp_trap_fini, 3950 .trap_action_set = mlxsw_sp_trap_action_set, 3951 .trap_group_init = mlxsw_sp_trap_group_init, 3952 .trap_group_set = mlxsw_sp_trap_group_set, 3953 .trap_policer_init = mlxsw_sp_trap_policer_init, 3954 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3955 .trap_policer_set = mlxsw_sp_trap_policer_set, 3956 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3957 .txhdr_construct = mlxsw_sp_txhdr_construct, 3958 .resources_register = mlxsw_sp2_resources_register, 3959 .params_register = mlxsw_sp2_params_register, 3960 .params_unregister = mlxsw_sp2_params_unregister, 3961 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3962 .txhdr_len = MLXSW_TXHDR_LEN, 3963 .profile = &mlxsw_sp2_config_profile, 3964 }; 3965 3966 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3967 { 3968 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3969 } 3970 3971 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3972 struct netdev_nested_priv *priv) 3973 { 3974 int ret = 0; 3975 3976 if (mlxsw_sp_port_dev_check(lower_dev)) { 3977 priv->data = (void *)netdev_priv(lower_dev); 3978 ret = 1; 3979 } 3980 3981 return ret; 3982 } 3983 3984 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3985 { 3986 struct netdev_nested_priv priv = { 3987 .data = NULL, 3988 }; 3989 3990 if (mlxsw_sp_port_dev_check(dev)) 3991 return netdev_priv(dev); 3992 3993 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3994 3995 return (struct mlxsw_sp_port *)priv.data; 3996 } 3997 3998 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3999 { 4000 struct mlxsw_sp_port *mlxsw_sp_port; 4001 4002 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4003 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4004 } 4005 4006 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4007 { 4008 struct netdev_nested_priv priv = { 4009 .data = NULL, 4010 }; 4011 4012 if (mlxsw_sp_port_dev_check(dev)) 4013 return netdev_priv(dev); 4014 4015 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4016 &priv); 4017 4018 return (struct mlxsw_sp_port *)priv.data; 4019 } 4020 4021 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4022 { 4023 struct mlxsw_sp_port *mlxsw_sp_port; 4024 4025 rcu_read_lock(); 4026 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4027 if (mlxsw_sp_port) 4028 dev_hold(mlxsw_sp_port->dev); 4029 rcu_read_unlock(); 4030 return mlxsw_sp_port; 4031 } 4032 4033 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4034 { 4035 dev_put(mlxsw_sp_port->dev); 4036 } 4037 4038 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4039 { 4040 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4041 int err = 0; 4042 4043 mutex_lock(&mlxsw_sp->parsing.lock); 4044 4045 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4046 goto out_unlock; 4047 4048 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4049 mlxsw_sp->parsing.vxlan_udp_dport); 4050 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4051 if (err) 4052 goto out_unlock; 4053 4054 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4055 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4056 4057 out_unlock: 4058 mutex_unlock(&mlxsw_sp->parsing.lock); 4059 return err; 4060 } 4061 4062 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4063 { 4064 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4065 4066 mutex_lock(&mlxsw_sp->parsing.lock); 4067 4068 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4069 goto out_unlock; 4070 4071 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4072 mlxsw_sp->parsing.vxlan_udp_dport); 4073 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4074 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4075 4076 out_unlock: 4077 mutex_unlock(&mlxsw_sp->parsing.lock); 4078 } 4079 4080 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4081 __be16 udp_dport) 4082 { 4083 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4084 int err; 4085 4086 mutex_lock(&mlxsw_sp->parsing.lock); 4087 4088 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4089 be16_to_cpu(udp_dport)); 4090 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4091 if (err) 4092 goto out_unlock; 4093 4094 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4095 4096 out_unlock: 4097 mutex_unlock(&mlxsw_sp->parsing.lock); 4098 return err; 4099 } 4100 4101 static void 4102 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4103 struct net_device *lag_dev) 4104 { 4105 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4106 struct net_device *upper_dev; 4107 struct list_head *iter; 4108 4109 if (netif_is_bridge_port(lag_dev)) 4110 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4111 4112 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4113 if (!netif_is_bridge_port(upper_dev)) 4114 continue; 4115 br_dev = netdev_master_upper_dev_get(upper_dev); 4116 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4117 } 4118 } 4119 4120 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4121 { 4122 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4123 4124 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4125 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4126 } 4127 4128 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4129 { 4130 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4131 4132 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4133 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4134 } 4135 4136 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4137 u16 lag_id, u8 port_index) 4138 { 4139 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4140 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4141 4142 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4143 lag_id, port_index); 4144 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4145 } 4146 4147 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4148 u16 lag_id) 4149 { 4150 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4151 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4152 4153 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4154 lag_id); 4155 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4156 } 4157 4158 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4159 u16 lag_id) 4160 { 4161 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4162 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4163 4164 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4165 lag_id); 4166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4167 } 4168 4169 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4170 u16 lag_id) 4171 { 4172 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4173 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4174 4175 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4176 lag_id); 4177 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4178 } 4179 4180 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4181 struct net_device *lag_dev, 4182 u16 *p_lag_id) 4183 { 4184 struct mlxsw_sp_upper *lag; 4185 int free_lag_id = -1; 4186 u64 max_lag; 4187 int i; 4188 4189 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4190 for (i = 0; i < max_lag; i++) { 4191 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4192 if (lag->ref_count) { 4193 if (lag->dev == lag_dev) { 4194 *p_lag_id = i; 4195 return 0; 4196 } 4197 } else if (free_lag_id < 0) { 4198 free_lag_id = i; 4199 } 4200 } 4201 if (free_lag_id < 0) 4202 return -EBUSY; 4203 *p_lag_id = free_lag_id; 4204 return 0; 4205 } 4206 4207 static bool 4208 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4209 struct net_device *lag_dev, 4210 struct netdev_lag_upper_info *lag_upper_info, 4211 struct netlink_ext_ack *extack) 4212 { 4213 u16 lag_id; 4214 4215 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4216 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4217 return false; 4218 } 4219 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4220 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4221 return false; 4222 } 4223 return true; 4224 } 4225 4226 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4227 u16 lag_id, u8 *p_port_index) 4228 { 4229 u64 max_lag_members; 4230 int i; 4231 4232 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4233 MAX_LAG_MEMBERS); 4234 for (i = 0; i < max_lag_members; i++) { 4235 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4236 *p_port_index = i; 4237 return 0; 4238 } 4239 } 4240 return -EBUSY; 4241 } 4242 4243 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4244 struct net_device *lag_dev, 4245 struct netlink_ext_ack *extack) 4246 { 4247 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4248 struct mlxsw_sp_upper *lag; 4249 u16 lag_id; 4250 u8 port_index; 4251 int err; 4252 4253 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4254 if (err) 4255 return err; 4256 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4257 if (!lag->ref_count) { 4258 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4259 if (err) 4260 return err; 4261 lag->dev = lag_dev; 4262 } 4263 4264 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4265 if (err) 4266 return err; 4267 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4268 if (err) 4269 goto err_col_port_add; 4270 4271 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4272 mlxsw_sp_port->local_port); 4273 mlxsw_sp_port->lag_id = lag_id; 4274 mlxsw_sp_port->lagged = 1; 4275 lag->ref_count++; 4276 4277 /* Port is no longer usable as a router interface */ 4278 if (mlxsw_sp_port->default_vlan->fid) 4279 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4280 4281 /* Join a router interface configured on the LAG, if exists */ 4282 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4283 lag_dev, extack); 4284 if (err) 4285 goto err_router_join; 4286 4287 return 0; 4288 4289 err_router_join: 4290 lag->ref_count--; 4291 mlxsw_sp_port->lagged = 0; 4292 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4293 mlxsw_sp_port->local_port); 4294 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4295 err_col_port_add: 4296 if (!lag->ref_count) 4297 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4298 return err; 4299 } 4300 4301 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4302 struct net_device *lag_dev) 4303 { 4304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4305 u16 lag_id = mlxsw_sp_port->lag_id; 4306 struct mlxsw_sp_upper *lag; 4307 4308 if (!mlxsw_sp_port->lagged) 4309 return; 4310 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4311 WARN_ON(lag->ref_count == 0); 4312 4313 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4314 4315 /* Any VLANs configured on the port are no longer valid */ 4316 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4317 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4318 /* Make the LAG and its directly linked uppers leave bridges they 4319 * are memeber in 4320 */ 4321 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4322 4323 if (lag->ref_count == 1) 4324 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4325 4326 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4327 mlxsw_sp_port->local_port); 4328 mlxsw_sp_port->lagged = 0; 4329 lag->ref_count--; 4330 4331 /* Make sure untagged frames are allowed to ingress */ 4332 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4333 ETH_P_8021Q); 4334 } 4335 4336 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4337 u16 lag_id) 4338 { 4339 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4340 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4341 4342 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4343 mlxsw_sp_port->local_port); 4344 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4345 } 4346 4347 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4348 u16 lag_id) 4349 { 4350 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4351 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4352 4353 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4354 mlxsw_sp_port->local_port); 4355 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4356 } 4357 4358 static int 4359 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4360 { 4361 int err; 4362 4363 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4364 mlxsw_sp_port->lag_id); 4365 if (err) 4366 return err; 4367 4368 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4369 if (err) 4370 goto err_dist_port_add; 4371 4372 return 0; 4373 4374 err_dist_port_add: 4375 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4376 return err; 4377 } 4378 4379 static int 4380 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4381 { 4382 int err; 4383 4384 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4385 mlxsw_sp_port->lag_id); 4386 if (err) 4387 return err; 4388 4389 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4390 mlxsw_sp_port->lag_id); 4391 if (err) 4392 goto err_col_port_disable; 4393 4394 return 0; 4395 4396 err_col_port_disable: 4397 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4398 return err; 4399 } 4400 4401 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4402 struct netdev_lag_lower_state_info *info) 4403 { 4404 if (info->tx_enabled) 4405 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4406 else 4407 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4408 } 4409 4410 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4411 bool enable) 4412 { 4413 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4414 enum mlxsw_reg_spms_state spms_state; 4415 char *spms_pl; 4416 u16 vid; 4417 int err; 4418 4419 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4420 MLXSW_REG_SPMS_STATE_DISCARDING; 4421 4422 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4423 if (!spms_pl) 4424 return -ENOMEM; 4425 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4426 4427 for (vid = 0; vid < VLAN_N_VID; vid++) 4428 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4429 4430 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4431 kfree(spms_pl); 4432 return err; 4433 } 4434 4435 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4436 { 4437 u16 vid = 1; 4438 int err; 4439 4440 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4441 if (err) 4442 return err; 4443 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4444 if (err) 4445 goto err_port_stp_set; 4446 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4447 true, false); 4448 if (err) 4449 goto err_port_vlan_set; 4450 4451 for (; vid <= VLAN_N_VID - 1; vid++) { 4452 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4453 vid, false); 4454 if (err) 4455 goto err_vid_learning_set; 4456 } 4457 4458 return 0; 4459 4460 err_vid_learning_set: 4461 for (vid--; vid >= 1; vid--) 4462 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4463 err_port_vlan_set: 4464 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4465 err_port_stp_set: 4466 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4467 return err; 4468 } 4469 4470 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4471 { 4472 u16 vid; 4473 4474 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4475 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4476 vid, true); 4477 4478 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4479 false, false); 4480 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4481 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4482 } 4483 4484 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4485 { 4486 unsigned int num_vxlans = 0; 4487 struct net_device *dev; 4488 struct list_head *iter; 4489 4490 netdev_for_each_lower_dev(br_dev, dev, iter) { 4491 if (netif_is_vxlan(dev)) 4492 num_vxlans++; 4493 } 4494 4495 return num_vxlans > 1; 4496 } 4497 4498 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4499 { 4500 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4501 struct net_device *dev; 4502 struct list_head *iter; 4503 4504 netdev_for_each_lower_dev(br_dev, dev, iter) { 4505 u16 pvid; 4506 int err; 4507 4508 if (!netif_is_vxlan(dev)) 4509 continue; 4510 4511 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4512 if (err || !pvid) 4513 continue; 4514 4515 if (test_and_set_bit(pvid, vlans)) 4516 return false; 4517 } 4518 4519 return true; 4520 } 4521 4522 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4523 struct netlink_ext_ack *extack) 4524 { 4525 if (br_multicast_enabled(br_dev)) { 4526 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4527 return false; 4528 } 4529 4530 if (!br_vlan_enabled(br_dev) && 4531 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4532 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4533 return false; 4534 } 4535 4536 if (br_vlan_enabled(br_dev) && 4537 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4538 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4539 return false; 4540 } 4541 4542 return true; 4543 } 4544 4545 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4546 struct net_device *dev, 4547 unsigned long event, void *ptr) 4548 { 4549 struct netdev_notifier_changeupper_info *info; 4550 struct mlxsw_sp_port *mlxsw_sp_port; 4551 struct netlink_ext_ack *extack; 4552 struct net_device *upper_dev; 4553 struct mlxsw_sp *mlxsw_sp; 4554 int err = 0; 4555 u16 proto; 4556 4557 mlxsw_sp_port = netdev_priv(dev); 4558 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4559 info = ptr; 4560 extack = netdev_notifier_info_to_extack(&info->info); 4561 4562 switch (event) { 4563 case NETDEV_PRECHANGEUPPER: 4564 upper_dev = info->upper_dev; 4565 if (!is_vlan_dev(upper_dev) && 4566 !netif_is_lag_master(upper_dev) && 4567 !netif_is_bridge_master(upper_dev) && 4568 !netif_is_ovs_master(upper_dev) && 4569 !netif_is_macvlan(upper_dev) && 4570 !netif_is_l3_master(upper_dev)) { 4571 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4572 return -EINVAL; 4573 } 4574 if (!info->linking) 4575 break; 4576 if (netif_is_bridge_master(upper_dev) && 4577 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4578 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4579 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4580 return -EOPNOTSUPP; 4581 if (netdev_has_any_upper_dev(upper_dev) && 4582 (!netif_is_bridge_master(upper_dev) || 4583 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4584 upper_dev))) { 4585 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4586 return -EINVAL; 4587 } 4588 if (netif_is_lag_master(upper_dev) && 4589 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4590 info->upper_info, extack)) 4591 return -EINVAL; 4592 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4593 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4594 return -EINVAL; 4595 } 4596 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4597 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4598 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4599 return -EINVAL; 4600 } 4601 if (netif_is_macvlan(upper_dev) && 4602 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4603 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4604 return -EOPNOTSUPP; 4605 } 4606 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4607 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4608 return -EINVAL; 4609 } 4610 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4611 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4612 return -EINVAL; 4613 } 4614 if (netif_is_bridge_master(upper_dev)) { 4615 br_vlan_get_proto(upper_dev, &proto); 4616 if (br_vlan_enabled(upper_dev) && 4617 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4618 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4619 return -EOPNOTSUPP; 4620 } 4621 if (vlan_uses_dev(lower_dev) && 4622 br_vlan_enabled(upper_dev) && 4623 proto == ETH_P_8021AD) { 4624 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4625 return -EOPNOTSUPP; 4626 } 4627 } 4628 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4629 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4630 4631 if (br_vlan_enabled(br_dev)) { 4632 br_vlan_get_proto(br_dev, &proto); 4633 if (proto == ETH_P_8021AD) { 4634 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4635 return -EOPNOTSUPP; 4636 } 4637 } 4638 } 4639 if (is_vlan_dev(upper_dev) && 4640 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4641 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4642 return -EOPNOTSUPP; 4643 } 4644 break; 4645 case NETDEV_CHANGEUPPER: 4646 upper_dev = info->upper_dev; 4647 if (netif_is_bridge_master(upper_dev)) { 4648 if (info->linking) 4649 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4650 lower_dev, 4651 upper_dev, 4652 extack); 4653 else 4654 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4655 lower_dev, 4656 upper_dev); 4657 } else if (netif_is_lag_master(upper_dev)) { 4658 if (info->linking) { 4659 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4660 upper_dev, extack); 4661 } else { 4662 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4663 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4664 upper_dev); 4665 } 4666 } else if (netif_is_ovs_master(upper_dev)) { 4667 if (info->linking) 4668 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4669 else 4670 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4671 } else if (netif_is_macvlan(upper_dev)) { 4672 if (!info->linking) 4673 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4674 } else if (is_vlan_dev(upper_dev)) { 4675 struct net_device *br_dev; 4676 4677 if (!netif_is_bridge_port(upper_dev)) 4678 break; 4679 if (info->linking) 4680 break; 4681 br_dev = netdev_master_upper_dev_get(upper_dev); 4682 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4683 br_dev); 4684 } 4685 break; 4686 } 4687 4688 return err; 4689 } 4690 4691 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4692 unsigned long event, void *ptr) 4693 { 4694 struct netdev_notifier_changelowerstate_info *info; 4695 struct mlxsw_sp_port *mlxsw_sp_port; 4696 int err; 4697 4698 mlxsw_sp_port = netdev_priv(dev); 4699 info = ptr; 4700 4701 switch (event) { 4702 case NETDEV_CHANGELOWERSTATE: 4703 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4704 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4705 info->lower_state_info); 4706 if (err) 4707 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4708 } 4709 break; 4710 } 4711 4712 return 0; 4713 } 4714 4715 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4716 struct net_device *port_dev, 4717 unsigned long event, void *ptr) 4718 { 4719 switch (event) { 4720 case NETDEV_PRECHANGEUPPER: 4721 case NETDEV_CHANGEUPPER: 4722 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4723 event, ptr); 4724 case NETDEV_CHANGELOWERSTATE: 4725 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4726 ptr); 4727 } 4728 4729 return 0; 4730 } 4731 4732 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4733 unsigned long event, void *ptr) 4734 { 4735 struct net_device *dev; 4736 struct list_head *iter; 4737 int ret; 4738 4739 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4740 if (mlxsw_sp_port_dev_check(dev)) { 4741 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4742 ptr); 4743 if (ret) 4744 return ret; 4745 } 4746 } 4747 4748 return 0; 4749 } 4750 4751 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4752 struct net_device *dev, 4753 unsigned long event, void *ptr, 4754 u16 vid) 4755 { 4756 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4758 struct netdev_notifier_changeupper_info *info = ptr; 4759 struct netlink_ext_ack *extack; 4760 struct net_device *upper_dev; 4761 int err = 0; 4762 4763 extack = netdev_notifier_info_to_extack(&info->info); 4764 4765 switch (event) { 4766 case NETDEV_PRECHANGEUPPER: 4767 upper_dev = info->upper_dev; 4768 if (!netif_is_bridge_master(upper_dev) && 4769 !netif_is_macvlan(upper_dev) && 4770 !netif_is_l3_master(upper_dev)) { 4771 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4772 return -EINVAL; 4773 } 4774 if (!info->linking) 4775 break; 4776 if (netif_is_bridge_master(upper_dev) && 4777 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4778 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4779 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4780 return -EOPNOTSUPP; 4781 if (netdev_has_any_upper_dev(upper_dev) && 4782 (!netif_is_bridge_master(upper_dev) || 4783 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4784 upper_dev))) { 4785 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4786 return -EINVAL; 4787 } 4788 if (netif_is_macvlan(upper_dev) && 4789 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4790 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4791 return -EOPNOTSUPP; 4792 } 4793 break; 4794 case NETDEV_CHANGEUPPER: 4795 upper_dev = info->upper_dev; 4796 if (netif_is_bridge_master(upper_dev)) { 4797 if (info->linking) 4798 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4799 vlan_dev, 4800 upper_dev, 4801 extack); 4802 else 4803 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4804 vlan_dev, 4805 upper_dev); 4806 } else if (netif_is_macvlan(upper_dev)) { 4807 if (!info->linking) 4808 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4809 } 4810 break; 4811 } 4812 4813 return err; 4814 } 4815 4816 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4817 struct net_device *lag_dev, 4818 unsigned long event, 4819 void *ptr, u16 vid) 4820 { 4821 struct net_device *dev; 4822 struct list_head *iter; 4823 int ret; 4824 4825 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4826 if (mlxsw_sp_port_dev_check(dev)) { 4827 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4828 event, ptr, 4829 vid); 4830 if (ret) 4831 return ret; 4832 } 4833 } 4834 4835 return 0; 4836 } 4837 4838 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4839 struct net_device *br_dev, 4840 unsigned long event, void *ptr, 4841 u16 vid) 4842 { 4843 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4844 struct netdev_notifier_changeupper_info *info = ptr; 4845 struct netlink_ext_ack *extack; 4846 struct net_device *upper_dev; 4847 4848 if (!mlxsw_sp) 4849 return 0; 4850 4851 extack = netdev_notifier_info_to_extack(&info->info); 4852 4853 switch (event) { 4854 case NETDEV_PRECHANGEUPPER: 4855 upper_dev = info->upper_dev; 4856 if (!netif_is_macvlan(upper_dev) && 4857 !netif_is_l3_master(upper_dev)) { 4858 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4859 return -EOPNOTSUPP; 4860 } 4861 if (!info->linking) 4862 break; 4863 if (netif_is_macvlan(upper_dev) && 4864 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4865 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4866 return -EOPNOTSUPP; 4867 } 4868 break; 4869 case NETDEV_CHANGEUPPER: 4870 upper_dev = info->upper_dev; 4871 if (info->linking) 4872 break; 4873 if (netif_is_macvlan(upper_dev)) 4874 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4875 break; 4876 } 4877 4878 return 0; 4879 } 4880 4881 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4882 unsigned long event, void *ptr) 4883 { 4884 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4885 u16 vid = vlan_dev_vlan_id(vlan_dev); 4886 4887 if (mlxsw_sp_port_dev_check(real_dev)) 4888 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4889 event, ptr, vid); 4890 else if (netif_is_lag_master(real_dev)) 4891 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4892 real_dev, event, 4893 ptr, vid); 4894 else if (netif_is_bridge_master(real_dev)) 4895 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4896 event, ptr, vid); 4897 4898 return 0; 4899 } 4900 4901 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4902 unsigned long event, void *ptr) 4903 { 4904 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4905 struct netdev_notifier_changeupper_info *info = ptr; 4906 struct netlink_ext_ack *extack; 4907 struct net_device *upper_dev; 4908 u16 proto; 4909 4910 if (!mlxsw_sp) 4911 return 0; 4912 4913 extack = netdev_notifier_info_to_extack(&info->info); 4914 4915 switch (event) { 4916 case NETDEV_PRECHANGEUPPER: 4917 upper_dev = info->upper_dev; 4918 if (!is_vlan_dev(upper_dev) && 4919 !netif_is_macvlan(upper_dev) && 4920 !netif_is_l3_master(upper_dev)) { 4921 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4922 return -EOPNOTSUPP; 4923 } 4924 if (!info->linking) 4925 break; 4926 if (br_vlan_enabled(br_dev)) { 4927 br_vlan_get_proto(br_dev, &proto); 4928 if (proto == ETH_P_8021AD) { 4929 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4930 return -EOPNOTSUPP; 4931 } 4932 } 4933 if (is_vlan_dev(upper_dev) && 4934 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4935 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4936 return -EOPNOTSUPP; 4937 } 4938 if (netif_is_macvlan(upper_dev) && 4939 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4940 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4941 return -EOPNOTSUPP; 4942 } 4943 break; 4944 case NETDEV_CHANGEUPPER: 4945 upper_dev = info->upper_dev; 4946 if (info->linking) 4947 break; 4948 if (is_vlan_dev(upper_dev)) 4949 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4950 if (netif_is_macvlan(upper_dev)) 4951 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4952 break; 4953 } 4954 4955 return 0; 4956 } 4957 4958 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4959 unsigned long event, void *ptr) 4960 { 4961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4962 struct netdev_notifier_changeupper_info *info = ptr; 4963 struct netlink_ext_ack *extack; 4964 struct net_device *upper_dev; 4965 4966 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4967 return 0; 4968 4969 extack = netdev_notifier_info_to_extack(&info->info); 4970 upper_dev = info->upper_dev; 4971 4972 if (!netif_is_l3_master(upper_dev)) { 4973 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4974 return -EOPNOTSUPP; 4975 } 4976 4977 return 0; 4978 } 4979 4980 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4981 struct net_device *dev, 4982 unsigned long event, void *ptr) 4983 { 4984 struct netdev_notifier_changeupper_info *cu_info; 4985 struct netdev_notifier_info *info = ptr; 4986 struct netlink_ext_ack *extack; 4987 struct net_device *upper_dev; 4988 4989 extack = netdev_notifier_info_to_extack(info); 4990 4991 switch (event) { 4992 case NETDEV_CHANGEUPPER: 4993 cu_info = container_of(info, 4994 struct netdev_notifier_changeupper_info, 4995 info); 4996 upper_dev = cu_info->upper_dev; 4997 if (!netif_is_bridge_master(upper_dev)) 4998 return 0; 4999 if (!mlxsw_sp_lower_get(upper_dev)) 5000 return 0; 5001 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5002 return -EOPNOTSUPP; 5003 if (cu_info->linking) { 5004 if (!netif_running(dev)) 5005 return 0; 5006 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5007 * device needs to be mapped to a VLAN, but at this 5008 * point no VLANs are configured on the VxLAN device 5009 */ 5010 if (br_vlan_enabled(upper_dev)) 5011 return 0; 5012 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5013 dev, 0, extack); 5014 } else { 5015 /* VLANs were already flushed, which triggered the 5016 * necessary cleanup 5017 */ 5018 if (br_vlan_enabled(upper_dev)) 5019 return 0; 5020 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5021 } 5022 break; 5023 case NETDEV_PRE_UP: 5024 upper_dev = netdev_master_upper_dev_get(dev); 5025 if (!upper_dev) 5026 return 0; 5027 if (!netif_is_bridge_master(upper_dev)) 5028 return 0; 5029 if (!mlxsw_sp_lower_get(upper_dev)) 5030 return 0; 5031 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5032 extack); 5033 case NETDEV_DOWN: 5034 upper_dev = netdev_master_upper_dev_get(dev); 5035 if (!upper_dev) 5036 return 0; 5037 if (!netif_is_bridge_master(upper_dev)) 5038 return 0; 5039 if (!mlxsw_sp_lower_get(upper_dev)) 5040 return 0; 5041 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5042 break; 5043 } 5044 5045 return 0; 5046 } 5047 5048 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5049 unsigned long event, void *ptr) 5050 { 5051 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5052 struct mlxsw_sp_span_entry *span_entry; 5053 struct mlxsw_sp *mlxsw_sp; 5054 int err = 0; 5055 5056 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5057 if (event == NETDEV_UNREGISTER) { 5058 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5059 if (span_entry) 5060 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5061 } 5062 mlxsw_sp_span_respin(mlxsw_sp); 5063 5064 if (netif_is_vxlan(dev)) 5065 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5066 else if (mlxsw_sp_port_dev_check(dev)) 5067 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5068 else if (netif_is_lag_master(dev)) 5069 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5070 else if (is_vlan_dev(dev)) 5071 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5072 else if (netif_is_bridge_master(dev)) 5073 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5074 else if (netif_is_macvlan(dev)) 5075 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5076 5077 return notifier_from_errno(err); 5078 } 5079 5080 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5081 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5082 }; 5083 5084 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5085 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5086 }; 5087 5088 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5089 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5090 {0, }, 5091 }; 5092 5093 static struct pci_driver mlxsw_sp1_pci_driver = { 5094 .name = mlxsw_sp1_driver_name, 5095 .id_table = mlxsw_sp1_pci_id_table, 5096 }; 5097 5098 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5099 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5100 {0, }, 5101 }; 5102 5103 static struct pci_driver mlxsw_sp2_pci_driver = { 5104 .name = mlxsw_sp2_driver_name, 5105 .id_table = mlxsw_sp2_pci_id_table, 5106 }; 5107 5108 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5109 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5110 {0, }, 5111 }; 5112 5113 static struct pci_driver mlxsw_sp3_pci_driver = { 5114 .name = mlxsw_sp3_driver_name, 5115 .id_table = mlxsw_sp3_pci_id_table, 5116 }; 5117 5118 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5119 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5120 {0, }, 5121 }; 5122 5123 static struct pci_driver mlxsw_sp4_pci_driver = { 5124 .name = mlxsw_sp4_driver_name, 5125 .id_table = mlxsw_sp4_pci_id_table, 5126 }; 5127 5128 static int __init mlxsw_sp_module_init(void) 5129 { 5130 int err; 5131 5132 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5133 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5134 5135 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5136 if (err) 5137 goto err_sp1_core_driver_register; 5138 5139 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5140 if (err) 5141 goto err_sp2_core_driver_register; 5142 5143 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5144 if (err) 5145 goto err_sp3_core_driver_register; 5146 5147 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5148 if (err) 5149 goto err_sp4_core_driver_register; 5150 5151 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5152 if (err) 5153 goto err_sp1_pci_driver_register; 5154 5155 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5156 if (err) 5157 goto err_sp2_pci_driver_register; 5158 5159 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5160 if (err) 5161 goto err_sp3_pci_driver_register; 5162 5163 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5164 if (err) 5165 goto err_sp4_pci_driver_register; 5166 5167 return 0; 5168 5169 err_sp4_pci_driver_register: 5170 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5171 err_sp3_pci_driver_register: 5172 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5173 err_sp2_pci_driver_register: 5174 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5175 err_sp1_pci_driver_register: 5176 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5177 err_sp4_core_driver_register: 5178 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5179 err_sp3_core_driver_register: 5180 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5181 err_sp2_core_driver_register: 5182 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5183 err_sp1_core_driver_register: 5184 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5185 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5186 return err; 5187 } 5188 5189 static void __exit mlxsw_sp_module_exit(void) 5190 { 5191 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5192 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5193 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5194 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5195 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5196 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5197 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5198 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5199 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5200 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5201 } 5202 5203 module_init(mlxsw_sp_module_init); 5204 module_exit(mlxsw_sp_module_exit); 5205 5206 MODULE_LICENSE("Dual BSD/GPL"); 5207 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5208 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5209 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5210 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5211 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5212 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5213 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5214 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5215 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5216 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5217